repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
LukasK13/ESBO-ETC
[ "d1db999f1670f2777c5227d79629d421f03e5393" ]
[ "tests/sensor/test_PixelMask.py" ]
[ "from unittest import TestCase\nfrom esbo_etc.classes.sensor.PixelMask import PixelMask\nimport numpy as np\nimport astropy.units as u\n\n\nclass TestPixelMask(TestCase):\n def setUp(self):\n self.mask = PixelMask(np.array([10, 8]) << u.pix, 6.5 * u.um, center_offset=np.array([0.2, 0.5]) << u.pix)\n\n def test___new__(self):\n self.assertTrue((self.mask.view(np.ndarray) == np.zeros((8, 10))).all())\n self.assertEqual(self.mask.center_ind, [3.5, 4.5])\n self.assertEqual(self.mask.psf_center_ind, [4.0, 4.7])\n self.assertEqual(self.mask.pixel_geometry, [8 * u.pix, 10 * u.pix])\n\n def test_createPhotometricAperture(self):\n # circle\n self.mask.createPhotometricAperture(\"circle\", 2.3 * u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n\n self.setUp()\n self.mask.createPhotometricAperture(\"circle\", 2.6 * u.pix, np.array([-0.5, 0.8]) << u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 0., 0., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 0., 0., 0.],\n [0., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n\n # square\n self.setUp()\n self.mask.createPhotometricAperture(\"square\", 2.3 * u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
lori94/DSCI_522_Group_404
[ "e8177bd7fa388dcada94bcb9c2f6e69dc0227591" ]
[ "src/model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n'''This script finds the best parameters for SVC and LGR models and fits the data to these two models and outputs the classification images and the classification reports as the csv documents.\n\nUsage: src/model.py --data_input=<data_input> --result_output=<result_output> \n\nArguments:\n--data_input=<data_input> The path for all the clean data\n--result_output=<result_output> The path where to store the csv data\n'''\nimport numpy as np\nimport pandas as pd\nfrom docopt import docopt\nfrom sklearn.model_selection import RandomizedSearchCV\n#from sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n#from plot_classifier import plot_classifier\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nimport lightgbm as lgb\n\nopt = docopt(__doc__)\n\ndef get_model_results(X, y, X_train, y_train, X_test, y_test, result_output):\n \n parameters_svc = {'C':np.logspace(-3,3,7), 'gamma':np.logspace(-4,2,7)}\n pd.DataFrame(parameters_svc).to_csv(result_output + '/hyper_parameters.csv')\n svc = SVC()\n svc_opt = RandomizedSearchCV(svc, parameters_svc, cv=5, iid=False, n_iter = 25)\n # svc_opt.fit(X_train, y_train)\n # train_score_svc = svc_opt.score(X_train,y_train)\n # test_score_svc= svc_opt.score(X_test,y_test)\n #svc_opt = GridSearchCV(svc, parameters_svc, cv=5, iid=False)\n \n svc_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())\n train_score_svc = svc_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_svc = svc_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n parameters_lgr = {'C':np.logspace(-3,3,7)}\n \n lgr = LogisticRegression()\n\n #lgr_opt = GridSearchCV(lgr, parameters_lgr, cv=5, iid=False)\n lgr_opt = RandomizedSearchCV(lgr, parameters_lgr, cv=5, iid=False, n_iter = 25)\n\n lgr_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())\n train_score_lgr = lgr_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_lgr = lgr_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n \n lgbm = lgb.LGBMClassifier()\n lgbm.fit(X_train.to_numpy(),y_train.to_numpy().ravel())\n train_score_lgbm = lgbm.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_lgbm = lgbm.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n \n data = {'Train accuracy':[train_score_svc, train_score_lgr, train_score_lgbm], 'Validation accuracy':[test_score_svc, test_score_lgr,test_score_lgbm], 'Best parameters':[svc_opt.best_params_,lgr_opt.best_params_, 'NA']}\n accuracy_df = pd.DataFrame(data, index = ['SVC','LGR','LGBM'])\n accuracy_df.to_csv(result_output+'/accuracy.csv')\n \n predictions_svc = svc_opt.predict(X_test)\n predictions_lgr = lgr_opt.predict(X_test)\n predictions_lgbm = lgbm.predict(X_test)\n svc_report = pd.DataFrame(classification_report(y_test, predictions_svc, output_dict=True))\n lgr_report = pd.DataFrame(classification_report(y_test, predictions_lgr, output_dict=True))\n lgbm_report = pd.DataFrame(classification_report(y_test, predictions_lgbm, output_dict=True))\n svc_report.to_csv(result_output+'/svc_classification_report.csv')\n lgr_report.to_csv(result_output+'/lgr_classification_report.csv')\n lgbm_report.to_csv(result_output+'/lgbm_classification_report.csv')\n \n try:\n pd.read_csv(result_output+'/svc_classification_report.csv')\n pd.read_csv(result_output+'/lgr_classification_report.csv')\n pd.read_csv(result_output+'/lgbm_classification_report.csv')\n \n except: \n raise Exception(\"result doesn't save successfully\")\n \n return svc_opt, lgr_opt, lgbm\n\ndef main(data_input, result_output):\n X_train = pd.read_csv(data_input+'/X_train_clean.csv')\n y_train = pd.read_csv(data_input+'/y_train.csv',usecols = [\"Target\"])\n X_test = pd.read_csv(data_input+'/X_test_clean.csv')\n y_test = pd.read_csv(data_input+'/y_test.csv',usecols = [\"Target\"])\n X = pd.read_csv(data_input+'/X_original.csv')\n y = pd.read_csv(data_input+'/y_original.csv')\n svc_opt, lgr_opt, lgbm = get_model_results(X, y, X_train, y_train, X_test, y_test, result_output)\n plt.figure(figsize=(18,3))\n # model = [svc_opt, lgr_opt]\n # for i in range(2):\n # plt.subplot(1,4,i+1)\n # classifier = model[i]\n # plot_classifier(X,y,classifier,ax=plt.gca())\n # plt.savefig(result_output+'classifier_plot.png')\nif __name__ == \"__main__\":\n main(opt[\"--data_input\"], opt[\"--result_output\"])\n\n" ]
[ [ "sklearn.model_selection.RandomizedSearchCV", "sklearn.svm.SVC", "sklearn.metrics.classification_report", "pandas.read_csv", "matplotlib.pyplot.figure", "pandas.DataFrame", "sklearn.linear_model.LogisticRegression", "numpy.logspace" ] ]
naegawa/Aup2wav_dataset
[ "bc22ce50704497f2d496da13b00cebae0083bbef" ]
[ "audacity.py" ]
[ "#!/usr/bin/env python\n# 2017 kojima changed https://github.com/davidavdav/audacity.py (c) 2016 David A. van Leeuwen\n# \n#\n# \nimport xml.etree.ElementTree as ET\nimport wave, os, numpy, struct\n\nclass Aup:\n\tdef __init__(self, aupfile):\n\t\tfqpath = os.path.join(os.path.curdir, aupfile)\n\t\tdir = os.path.dirname(fqpath)\n\t\txml = open(aupfile)\n\t\tself.tree = ET.parse(xml)\n\t\tself.root = self.tree.getroot()\n\t\tself.rate = float(self.root.attrib[\"rate\"])\n\t\tns = {\"ns\":\"http://audacity.sourceforge.net/xml/\"}\n\t\tself.project = self.root.attrib[\"projname\"]\n\t\tself.files = []\n\t\tself.labels= []\n\t\tfor channel, wavetrack in enumerate(self.root.findall(\"ns:wavetrack\", ns)):\n\t\t\ttrack_channel=wavetrack.attrib[\"channel\"]\n\t\t\taufiles = []\n\t\t\tfor b in wavetrack.iter(\"{%s}simpleblockfile\" % ns[\"ns\"]):\n\t\t\t\tfilename = b.attrib[\"filename\"]\n\t\t\t\td1 = filename[0:3]\n\t\t\t\td2 = \"d\" + filename[3:5]\n\t\t\t\tfile = os.path.join(dir, self.project, d1, d2, filename)\n\t\t\t\tif not os.path.exists(file):\n\t\t\t\t\traise IOError(\"File missing in %s: %s\" % (self.project, file))\n\t\t\t\telse:\n\t\t\t\t\taufiles.append((file, int(b.attrib[\"len\"]),track_channel))\n\t\t\tself.files.append(aufiles)\n\t\tfor channel, labeltrack in enumerate(self.root.findall(\"ns:labeltrack\", ns)):\n\t\t\tlabelset=[]\n\t\t\tfor b in labeltrack.iter(\"{%s}label\" % ns[\"ns\"]):\n\t\t\t\tl=(float(b.attrib[\"t\"]), float(b.attrib[\"t1\"]), b.attrib[\"title\"])\n\t\t\t\tlabelset.append(l)\n\t\t\tself.labels.append(labelset)\n\n\t\tself.nchannels = len(self.files)\n\t\tself.aunr = -1\n\n\tdef open(self, channel):\n\t\tif not (0 <= channel < self.nchannels):\n\t\t\traise ValueError(\"Channel number out of bounds\")\n\t\tself.channel = channel\n\t\tself.aunr = 0\n\t\tself.offset = 0\n\t\treturn self\n\n\tdef close(self):\n\t\tself.aunr = -1\n\n\t## a linear search (not great)\n\tdef seek(self, pos):\n\t\tif self.aunr < 0:\n\t\t\traise IOError(\"File not opened\")\n\t\ts = 0\n\t\ti = 0\n\t\tlength = 0\n\t\tfor i, f in enumerate(self.files[self.channel]):\n\t\t\ts += f[1]\n\t\t\tif s > pos:\n\t\t\t\tlength = f[1]\n\t\t\t\tbreak\n\t\tif pos >= s:\n\t\t\traise EOFError(\"Seek past end of file\")\n\t\tself.aunr = i\n\t\tself.offset = pos - s + length\n\n\tdef read(self):\n\t\tif self.aunr < 0:\n\t\t\traise IOError(\"File not opened\")\n\t\twhile self.aunr < len(self.files[self.channel]):\n\t\t\twith open(self.files[self.channel][self.aunr][0],\"rb\") as fd:\n\t\t\t\ttrack_ch=self.files[self.channel][self.aunr][2]\n\t\t\t\tif track_ch==\"0\":\n\t\t\t\t\tfd.seek((self.offset - self.files[self.channel][self.aunr][1]) * 2, 2)\n\t\t\t\telse:\n\t\t\t\t\tfd.seek((self.offset - self.files[self.channel][self.aunr][1]) * 4, 2)\n\t\t\t\tdata = fd.read()\n\t\t\t\tyield data,track_ch\n\t\t\tself.aunr += 1\n\t\t\tself.offset = 0\n\n\tdef __enter__(self):\n\t\treturn self\n\n\tdef __exit__(self, exc_type, exc_val, exc_tb):\n\t\tself.close()\n\n\tdef get_labels(self, channel):\n\t\tif not (0 <= channel <len(self.labels)):\n\t\t\traise ValueError(\"Channel number out of bounds\")\n\t\treturn self.labels[channel]\n\n\tdef towav(self, filename, channel, start=0, stop=None):\n\t\twav = wave.open(filename, \"w\")\n\t\twav.setnchannels(1)\n\t\twav.setsampwidth(2)\n\t\twav.setframerate(self.rate)\n\t\tscale = 1 << 15\n\t\tif stop:\n\t\t\tlength = int(self.rate * (stop - start)) ## number of samples to extract\n\t\twith self.open(channel) as fd: #fd=self\n\t\t\tfd.seek(int(self.rate * start))\n\t\t\tfor data,track_ch in fd.read():\n\t\t\t\tif track_ch==\"0\":\n\t\t\t\t\tshorts = numpy.frombuffer(data, numpy.short)\n\t\t\t\telse:\n\t\t\t\t\tshorts = numpy.short(numpy.clip(numpy.frombuffer(data, numpy.float32) * scale, -scale, scale-1))\n\t\t\t\tif stop and len(shorts) > length:\n\t\t\t\t\tshorts = shorts[range(length)]\n\t\t\t\tformat = \"<\" + str(len(shorts)) + \"h\"\n\t\t\t\twav.writeframesraw(struct.pack(format, *shorts))\n\t\t\t\tif stop:\n\t\t\t\t\tlength -= len(shorts)\n\t\t\t\t\tif length <= 0:\n\t\t\t\t\t\tbreak\n\t\t\twav.writeframes(bytes(b'')) ## sets length in wavfile\n\t\twav.close()\n" ]
[ [ "numpy.frombuffer" ] ]
ClaireDelplancke/SIRF-Contribs
[ "130223d9bc11991eadcd11f9b715aea34c4842fd" ]
[ "src/Python/sirf/contrib/kcl/user_dePierroMap_real_data.py" ]
[ "'''User implemented De Pierro MAPEM reconstruction\nReal data implementation of De Pierro MAPEM, using a Bowsher weighted quadratic\npenalty. The guidance image (here a T1-weighted MR image) must be pre-aligned \nto the PET image and sampled on the same image grid.\nImplemented by Sam Ellis (13th Feb 2019)\n\nUsage:\n dePierroMap_eg [--help | options]\n\nOptions:\n -f <file>, --file=<file> raw data file\n [default: my_forward_projection.hs]\n -p <path>, --path=<path> path to data files, defaults to data/examples/PET\n subfolder of SIRF root folder\n -s <subs>, --subs=<subs> number of subsets [default: 12]\n -i <siter>, --subiter=<siter> number of sub-iterations [default: 24]\n -e <engn>, --engine=<engn> reconstruction engine [default: STIR]\n'''\n\n## CCP PETMR Synergistic Image Reconstruction Framework (SIRF)\n## Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC\n## Copyright 2015 - 2017 University College London.\n##\n## This is software developed for the Collaborative Computational\n## Project in Positron Emission Tomography and Magnetic Resonance imaging\n## (http://www.ccppetmr.ac.uk/).\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n## http://www.apache.org/licenses/LICENSE-2.0\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n\n__version__ = '0.1.0'\n\ndef my_dePierroMap(image, obj_fun, beta, filter, weights, sensitivity_image):\n \n # Check that weights are normalised\n if (np.abs(np.sum(weights,axis=1)-1)>1.0e-6).any():\n raise ValueError(\"Weights should sum to 1 for each voxel\")\n \n # Create OSEM reconstructor\n print('Setting up reconstruction object')\n OSEM_reconstructor = OSMAPOSLReconstructor()\n OSEM_reconstructor.set_objective_function(obj_fun) \n OSEM_reconstructor.set_num_subsets(21)\n OSEM_reconstructor.set_num_subiterations(21*10)\n OSEM_reconstructor.set_up(image)\n num_subiterations = OSEM_reconstructor.get_num_subiterations()\n \n current_image = image.clone()\n\n for iter in range(1,num_subiterations + 1):\n print('\\n------------- Subiteration %d' % iter) \n\n # clear the temp files from the current working directory (vital when\n # reconstructing real datasets with many iterations)\n if np.mod(iter,5) == 0:\n os.system('rm *.hv *.hs *.v *.s *.ahv')\n \n # Calculate imageReg and return as an array\n imageReg_array = dePierroReg(current_image.as_array(),weights)\n \n # OSEM image update\n OSEM_reconstructor.update(current_image)\n imageEM_array = current_image.as_array()\n \n # Final image update\n imageUpdated_array = dePierroUpdate \\\n (imageEM_array, imageReg_array, beta, sensitivity_image.as_array())\n \n # Fill image and truncate to cylindrical field of view \n current_image.fill(imageUpdated_array)\n filter.apply(current_image)\n \n image_out = current_image.clone()\n return image_out\n\n\ndef dePierroUpdate(imageEM, imageReg, beta, sensImg):\n \n delta = 1e-6*abs(sensImg).max()\n sensImg[sensImg < delta] = delta # avoid division by zero\n beta_j = beta/sensImg\n \n b_j = 1 - beta_j*imageReg\n \n numer = (2*imageEM)\n denom = ((b_j**2 + 4*beta_j*imageEM)**0.5 + b_j)\n \n delta = 1e-6*abs(denom).max()\n denom[denom < delta] = delta # avoid division by zero\n \n imageUpdated = numer/denom\n \n return imageUpdated\n\ndef dePierroReg(image,weights):\n \n # get size and vectorise image for indexing \n imSize = image.shape\n imageVec = image.reshape(-1,1,order='F').flatten('F')\n \n # get the neigbourhoods of each voxel\n weightsSize = weights.shape\n w = int(round(weightsSize[1]**(1.0/3))) # side length of neighbourhood\n nhoodInd = neighbourExtract(imSize,w)\n nhoodIndVec = nhoodInd.reshape(-1,1,order='F').flatten('F')\n \n # retrieve voxel intensities for neighbourhoods \n resultVec = np.float32(imageVec[nhoodIndVec])\n result = resultVec.reshape(nhoodInd.shape,order='F')\n \n # compute xreg\n try:\n imageReg = 0.5*np.sum(weights*(result + np.float32(image).reshape(-1,1,order='F')),axis=1)\n except:\n tmpVar = 1; \n imageReg = imageReg.reshape(imSize,order='F')\n \n return imageReg\n\ndef neighbourExtract(imageSize,w):\n # Adapted from Prior class \n n = imageSize[0]\n m = imageSize[1]\n h = imageSize[2]\n wlen = 2*np.floor(w/2)\n widx = xidx = yidx = np.arange(-wlen/2,wlen/2+1)\n\n if h==1:\n zidx = [0]\n nN = w*w\n else:\n zidx = widx\n nN = w*w*w\n \n Y,X,Z = np.meshgrid(np.arange(0,m), np.arange(0,n), np.arange(0,h)) \n N = np.zeros([n*m*h, nN],dtype='int32')\n l = 0\n for x in xidx:\n Xnew = setBoundary(X + x,n)\n for y in yidx:\n Ynew = setBoundary(Y + y,m)\n for z in zidx:\n Znew = setBoundary(Z + z,h)\n N[:,l] = ((Xnew + (Ynew)*n + (Znew)*n*m)).reshape(-1,1).flatten('F')\n l += 1\n return N\n \ndef setBoundary(X,n):\n # Boundary conditions for neighbourExtract\n # Adapted from Prior class\n idx = X<0\n X[idx] = X[idx] + n\n idx = X>n-1\n X[idx] = X[idx] - n\n return X.flatten('F')\n\n# %%\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nfrom pUtilities import show_2D_array\nfrom pSTIR import *\nimport numpy as np\nimport Prior as pr\n\ndata_path = '/media/sf_SIRF_data/sino_rawdata_100/'\n#data_path='/home/sirfuser/data/NEMA'\nprint('Finding files in %s' % data_path)\n\nnum_subsets = 12\n \n# set filenames \n# input files\nsino_file = 'my_data_sino.hs'\nnorm_file = 'my_data_norm.hs'\nattn_file = 'my_data_mumap.hv'\nrand_file = 'my_data_rand.hs'\nmr_file = 'my_data_MR_SIRF.hv'\n\n# output goes to files\nmsg_red = MessageRedirector('info.txt', 'warn.txt', 'error.txt')\n\nacq_data = AcquisitionData(data_path + sino_file)\n\n#%%\n\n# copy the acquisition data into a Python array\nacq_array = acq_data.as_array()\nprint('acquisition data dimensions: %dx%dx%d' % acq_array.shape)\n# use a slice number for display that is appropriate for the NEMA phantom\nz = 71\nshow_2D_array('Acquisition data', acq_array[z,:,:])\n\n# create acquisition model\nacq_model = AcquisitionModelUsingRayTracingMatrix()\nacq_model.set_num_tangential_LORs(10);\n\n#%% Correction sinograms\nnorm_file = 'data-norm.n.hdr'\nasm_norm = AcquisitionSensitivityModel(data_path + norm_file)\nacq_model.set_acquisition_sensitivity(asm_norm)\n\n# ---------------- taken from the example-----------------------------------\nattn_image = ImageData(data_path + attn_file)\nattn_acq_model = AcquisitionModelUsingRayTracingMatrix()\nattn_acq_model.set_num_tangential_LORs(10)\nasm_attn = AcquisitionSensitivityModel(attn_image, attn_acq_model)\n\n# temporary fix pending attenuation offset fix in STIR:\n# converting attenuation into 'bin efficiency'\nasm_attn.set_up(acq_data)\nattn_factors = AcquisitionData(acq_data)\nattn_factors.fill(1.0)\nprint('applying attenuation (please wait, may take a while)...')\nasm_attn.unnormalise(attn_factors)\nasm_attn = AcquisitionSensitivityModel(attn_factors)\nasm = AcquisitionSensitivityModel(asm_norm, asm_attn)\nacq_model.set_acquisition_sensitivity(asm)\n# --------------------------------------------------------------------------\n\n# randoms\nrandoms = AcquisitionData(data_path + rand_file)\nrandoms_array=randoms.as_array()\nshow_2D_array('randoms',randoms_array[z,:,:])\nacq_model.set_background_term(randoms)\n\n# MR guidance\nmr_image = ImageData(data_path + mr_file)\nmr_array = mr_image.as_array()\nshow_2D_array('MR image',mr_array[45,110:220,115:225])\n\n\n#%%\n# define objective function to be maximized as\n# Poisson logarithmic likelihood (with linear model for mean)\nobj_fun = make_Poisson_loglikelihood(acq_data)\nobj_fun.set_acquisition_model(acq_model)\n\n#%%\n\n# create initial image estimate from one iteration of MLEM\nrecon_init = OSMAPOSLReconstructor()\nrecon_init.set_objective_function(obj_fun)\n \nrecon_init.set_num_subsets(1)\nrecon_init.set_num_subiterations(1)\nnxny = (344, 344, 127)\ninitial_image = acq_data.create_uniform_image(1.0, nxny)\n\nimage=initial_image\n\nrecon_init.set_up(image)\n\nrecon_init.set_current_estimate(image)\n\nrecon_init.process()\n\nimage = recon_init.get_current_estimate()\n\n\n# %% bit more prep\n\n# create filter that zeroes the image outside a cylinder of the same\n# diameter as the image xy-section size\nfilter = TruncateToCylinderProcessor()\n\n# filter image estimate to FOV\nfilter.apply(image)\n\n# get the full sensitivity image\nobj_fun2 = make_Poisson_loglikelihood(acq_data)\nobj_fun2.set_acquisition_model(acq_model)\nobj_fun2.set_num_subsets(1)\nobj_fun2.set_up(image)\nsensitivity_image = obj_fun2.get_subset_sensitivity(0)\n\n\n# %% guided reconstruction\n\n# create a Prior for computing Bowsher weights\nmyPrior = pr.Prior(sensitivity_image.as_array().shape)\nweights = myPrior.BowshserWeights(mr_array,7)\nweights = np.float32(weights/7.0)\n\nimage_guided = my_dePierroMap(image, obj_fun, 50000, filter, weights, sensitivity_image)\nimage_array_guided = image_guided.as_array()\nshow_2D_array('Reconstructed guided', image_array_guided[45,110:220,115:225])\n\nimage_guided.write('output_images/image_guided.v')\n\n## %% OSEM reconstruction (beta = 0)\n#\n#image_OSEM = my_dePierroMap(image, obj_fun, 0, filter, weights, sensitivity_image)\n#image_array_OSEM = image_OSEM.as_array()\n#show_2D_array('Reconstructed OSEM image', image_array_OSEM[45,110:220,115:225])\n#\n#image_OSEM.write('output_images/image_OSEM.v')\n#\n## %% unguided reconstruction\n#\n## uniform weights\n#weights = np.ones([image.as_array().size,27],dtype='float')\n#weights = np.float32(weights/27.0)\n#\n#image_unguided = my_dePierroMap(image, obj_fun, 50000, filter, weights, sensitivity_image)\n#image_array_unguided = image_unguided.as_array()\n#show_2D_array('Reconstructed unguided', image_array_unguided[45,110:220,115:225])\n#\n#image_unguided.write('output_images/image_unguided.v')\n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.floor", "numpy.float32", "numpy.mod", "numpy.arange" ] ]
hojeong3709/RL
[ "a1c6eab8c3e7f2487e527fe68658d13eea5334af" ]
[ "lecture/1. policy-iteration/environment.py" ]
[ "import tkinter as tk\nfrom tkinter import Button\nimport time\nimport numpy as np\nfrom PIL import ImageTk, Image\n\nPhotoImage = ImageTk.PhotoImage\nUNIT = 100 # 픽셀 수\nHEIGHT = 5 # 그리드월드 세로\nWIDTH = 5 # 그리드월드 가로\nTRANSITION_PROB = 1 #상태 변환 확률 값\nPOSSIBLE_ACTIONS = [0, 1, 2, 3] # 상, 하, 좌, 우\nACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌표로 나타낸 행동\nREWARDS = []\n\n\nclass GraphicDisplay(tk.Tk):\n def __init__(self, agent):\n super(GraphicDisplay, self).__init__()\n self.title('Policy Iteration')\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))\n self.texts = [] #v값 표시\n self.arrows = []#진행 방향 표시\n self.env = Env()\n self.agent = agent #policyIteration\n self.evaluation_count = 0 #정책 평가 버튼 누른 횟 수\n self.improvement_count = 0#정책 발전 버튼 누른 횟 수\n self.is_moving = 0 #현재 move 중 인지를 나타내는 값\n (self.up, self.down, self.left, self.right), self.shapes = self.load_images()\n self.canvas = self._build_canvas()\n self.text_reward(2, 2, \"R : 1.0\")\n self.text_reward(1, 2, \"R : -1.0\")\n self.text_reward(2, 1, \"R : -1.0\")\n\n def _build_canvas(self):\n canvas = tk.Canvas(self, bg='white',\n height=HEIGHT * UNIT,\n width=WIDTH * UNIT)\n # 버튼 초기화\n iteration_button = Button(self, text=\"Evaluate\",\n command=self.evaluate_policy)\n iteration_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10,\n window=iteration_button)\n policy_button = Button(self, text=\"Improve\",\n command=self.improve_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10,\n window=policy_button)\n policy_button = Button(self, text=\"move\", command=self.move_by_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10,\n window=policy_button)\n policy_button = Button(self, text=\"reset\", command=self.reset)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10,\n window=policy_button)\n\n # 그리드 생성\n for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT\n canvas.create_line(x0, y0, x1, y1)\n for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row\n canvas.create_line(x0, y0, x1, y1)\n\n # 캔버스에 이미지 추가\n self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])\n canvas.create_image(250, 150, image=self.shapes[1])\n canvas.create_image(150, 250, image=self.shapes[1])\n canvas.create_image(250, 250, image=self.shapes[2])\n\n canvas.pack()\n\n return canvas\n\n def load_images(self):\n up = PhotoImage(Image.open(\"../img/up.png\").resize((13, 13)))\n right = PhotoImage(Image.open(\"../img/right.png\").resize((13, 13)))\n left = PhotoImage(Image.open(\"../img/left.png\").resize((13, 13)))\n down = PhotoImage(Image.open(\"../img/down.png\").resize((13, 13)))\n rectangle = PhotoImage(Image.open(\"../img/rectangle.png\").resize((65, 65)))\n triangle = PhotoImage(Image.open(\"../img/triangle.png\").resize((65, 65)))\n circle = PhotoImage(Image.open(\"../img/circle.png\").resize((65, 65)))\n return (up, down, left, right), (rectangle, triangle, circle)\n\n def reset(self):\n if self.is_moving == 0:\n self.evaluation_count = 0\n self.improvement_count = 0\n for i in self.texts:\n self.canvas.delete(i)\n\n for i in self.arrows:\n self.canvas.delete(i)\n self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]\n self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH\n for _ in range(HEIGHT)])\n self.agent.policy_table[2][2] = []\n x, y = self.canvas.coords(self.rectangle)\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\n\n def text_value(self, row, col, contents, font='Helvetica', size=10,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 85, 70\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def text_reward(self, row, col, contents, font='Helvetica', size=10,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 5, 5\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def rectangle_move(self, action):\n base_action = np.array([0, 0])\n location = self.find_rectangle()\n self.render()\n if action == 0 and location[0] > 0: # 상\n base_action[1] -= UNIT\n elif action == 1 and location[0] < HEIGHT - 1: # 하\n base_action[1] += UNIT\n elif action == 2 and location[1] > 0: # 좌\n base_action[0] -= UNIT\n elif action == 3 and location[1] < WIDTH - 1: # 우\n base_action[0] += UNIT\n # move agent\n self.canvas.move(self.rectangle, base_action[0], base_action[1])\n\n #진행 중인 사각형 위치를 반환 ex) 2,3\n def find_rectangle(self):\n temp = self.canvas.coords(self.rectangle)\n x = (temp[0] / 100) - 0.5\n y = (temp[1] / 100) - 0.5\n return int(y), int(x)\n\n def move_by_policy(self):\n if self.improvement_count != 0 and self.is_moving != 1:\n self.is_moving = 1\n\n x, y = self.canvas.coords(self.rectangle) #좌상단 우하단 좌표\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)#사각형은 x,y 좌표 만큼 이동\n\n x, y = self.find_rectangle()\n while len(self.agent.policy_table[x][y]) != 0:\n self.after(100,\n self.rectangle_move(self.agent.get_action([x, y])))\n x, y = self.find_rectangle()\n self.is_moving = 0\n\n def draw_one_arrow(self, col, row, policy):\n if col == 2 and row == 2:\n return\n\n if policy[0] > 0: # up\n origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.up))\n if policy[1] > 0: # down\n origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.down))\n if policy[2] > 0: # left\n origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.left))\n if policy[3] > 0: # right\n origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.right))\n\n def draw_from_policy(self, policy_table):\n for i in range(HEIGHT):\n for j in range(WIDTH):\n self.draw_one_arrow(i, j, policy_table[i][j])\n\n def print_value_table(self, value_table):\n for i in range(WIDTH):\n for j in range(HEIGHT):\n self.text_value(i, j, value_table[i][j])\n\n def render(self):\n time.sleep(0.1)\n self.canvas.tag_raise(self.rectangle)\n self.update()\n\n def evaluate_policy(self):\n self.evaluation_count += 1\n for i in self.texts:\n self.canvas.delete(i)\n self.agent.policy_evaluation()\n self.print_value_table(self.agent.value_table)\n\n def improve_policy(self):\n self.improvement_count += 1\n for i in self.arrows:\n self.canvas.delete(i)\n self.agent.policy_improvement()\n self.draw_from_policy(self.agent.policy_table)\n\n\nclass Env:\n def __init__(self):\n self.transition_probability = TRANSITION_PROB\n self.width = WIDTH\n self.height = HEIGHT\n self.reward = [[0] * WIDTH for _ in range(HEIGHT)] #보상은 0으로 초기화\n self.possible_actions = POSSIBLE_ACTIONS # 4방향으로 초기화\n self.reward[2][2] = 1 # (2,2) 좌표 동그라미 위치에 보상 1\n self.reward[1][2] = -1 # (1,2) 좌표 세모 위치에 보상 -1\n self.reward[2][1] = -1 # (2,1) 좌표 세모 위치에 보상 -1\n self.all_state = [] #모든 상태값 저장\n\n #모든 상태값을 저장한다.\n for x in range(WIDTH):\n for y in range(HEIGHT):\n state = [x, y]\n self.all_state.append(state)\n\n def get_reward(self, state, action):\n next_state = self.state_after_action(state, action)\n return self.reward[next_state[0]][next_state[1]]#[상태][행동]\n\n def state_after_action(self, state, action_index):\n action = ACTIONS[action_index]\n return self.check_boundary([state[0] + action[0], state[1] + action[1]])\n\n #범위를 벗어나는 지 확인\n @staticmethod\n def check_boundary(state):\n state[0] = (0 if state[0] < 0 else WIDTH - 1\n if state[0] > WIDTH - 1 else state[0])\n state[1] = (0 if state[1] < 0 else HEIGHT - 1\n if state[1] > HEIGHT - 1 else state[1])\n return state\n\n def get_transition_prob(self, state, action):\n return self.transition_probability\n\n def get_all_states(self):\n return self.all_state\n" ]
[ [ "numpy.array" ] ]
ZhuokunYao/smoke
[ "d524fbe43b1aba6078c25d9aca7924b71a635e1d" ]
[ "smoke/layers/utils.py" ]
[ "import torch\nfrom torch.nn import functional as F\n\n\ndef sigmoid_hm(hm_features, training=False):\n x = hm_features.sigmoid_()\n if training:\n x = x.clamp(min=1e-4, max=1 - 1e-4)\n return x\n\n\ndef nms_hm(heat_map, kernel=3):\n pad = (kernel - 1) // 2\n hmax = F.max_pool2d(heat_map,\n kernel_size=(kernel, kernel),\n stride=1,\n padding=pad)\n eq_index = torch.floor(heat_map - hmax) + 1.0\n\n return heat_map * eq_index\n\n\ndef select_topk(heat_map, K=100):\n '''\n Args:\n heat_map: heat_map in [N, C, H, W]\n K: top k samples to be selected\n score: detection threshold\n\n Returns:\n\n '''\n batch, cls, height, width = heat_map.size()\n\n # First select topk scores in all classes and batchs\n # [N, C, H, W] -----> [N, C, H*W]\n heat_map = heat_map.view(batch, cls, -1)\n # Both in [N, C, K] top K of each class, K each class\n topk_scores_all, topk_inds_all = torch.topk(heat_map, K)\n\n # topk_inds_all = topk_inds_all % (height * width) # todo: this seems redudant\n # [N, C, K]\n topk_ys = (topk_inds_all / width).float()\n topk_xs = (topk_inds_all % width).float()\n\n assert isinstance(topk_xs, torch.cuda.FloatTensor)\n assert isinstance(topk_ys, torch.cuda.FloatTensor)\n\n # Select topK examples across channel\n # [N, C, K] -----> [N, C*K]\n topk_scores_all = topk_scores_all.view(batch, -1)\n # Both in [N, K]\n topk_scores, topk_inds = torch.topk(topk_scores_all, K)\n topk_clses = (topk_inds / K).float()\n\n assert isinstance(topk_clses, torch.cuda.FloatTensor)\n\n # First expand it as 3 dimension\n topk_inds_all = _gather_feat(topk_inds_all.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_inds).view(batch, K)\n # bs,50 bs,50 bs,50 bs,50 bs,50\n return topk_scores, topk_inds_all, topk_clses, topk_ys, topk_xs\n\n\ndef _gather_feat(feat, ind):\n '''\n Select specific indexs on featuremap\n Args:\n feat: all results in 3 dimensions\n ind: positive index\n\n Returns:\n\n '''\n channel = feat.size(-1)\n ind = ind.unsqueeze(-1).expand(ind.size(0), ind.size(1), channel)\n feat = feat.gather(1, ind)\n\n return feat\n# batch: 100\n# index: bs, 100, 2\n# feature_maps: bs, 9 * 6, h, w\n# target_cls: bs, 100\n# cls_num: 6\ndef select_point_of_interest(batch, index, feature_maps, target_cls, cls_num):\n '''\n Select POI(point of interest) on feature map\n Args:\n batch: batch size\n index: in point format or index format\n feature_maps: regression feature map in [N, C, H, W]\n\n Returns:\n\n '''\n w = feature_maps.shape[3]\n # bs, 100, 2\n if len(index.shape) == 3:\n index = index[:, :, 1] * w + index[:, :, 0]\n # bs, 100\n index = index.view(batch, -1)\n \n # [N, 9 * 6, H, W] -----> [N, H, W, 9 * 6]\n feature_maps = feature_maps.permute(0, 2, 3, 1).contiguous()\n channel = feature_maps.shape[-1]\n # [N, H, W, C] -----> [N, H*W, C]\n feature_maps = feature_maps.view(batch, -1, channel)\n # expand index in channels\n # bs, 100, C\n index = index.unsqueeze(-1).repeat(1, 1, channel)\n # select specific features bases on POIs\n feature_maps = feature_maps.gather(1, index.long()) # bs, 100, 9 * 6\n\n feature_maps = feature_maps.view(batch, feature_maps.shape[1], cls_num, -1) # bs, 100, 6, 9\n cls_index = target_cls.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,feature_maps.shape[-1]) # bs, 100, 1, 9\n\n feature_maps = feature_maps.gather(2, cls_index.long()).squeeze(2) # bs, 100, 9\n\n return feature_maps\n" ]
[ [ "torch.topk", "torch.floor", "torch.nn.functional.max_pool2d" ] ]
aviolinist/EEE
[ "032e2029815229875048cc92dd7da24ff3f71e93" ]
[ "codes/lib/position.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\nimport numpy as np\n\n__all__ = ['position']\n\ndef position(row=1, col=1, num=1,\n left=0.125, right=0.9, bottom=0.1, top=0.9,\n hspace=0.1, vspace=None, wspace=None,\n width=None, height=None,\n sortcol=False, golden=False, inversegolden=False,\n figsize=(1.,1.)):\n \"\"\"\n Gives positions of subplots.\n To be used with add_axes instead of subplot.\n\n All dimensions are fractions of the figure width or height.\n Figure and subplot spaces are the same as for figure.subplotparams\n except for hspace and vspace, which are halved.\n\n If the figsize keyword is given, a rectangular section of the figure\n will be used.\n\n\n Definition\n ----------\n def position(row=1, col=1, num=1,\n left=0.125, right=0.9, bottom=0.1, top=0.9,\n hspace=0.1, vspace=None, wspace=None,\n width=None, height=None,\n sortcol=False, golden=False, inversegolden=False,\n figsize=(1.,1.)):\n\n\n Optional Input\n --------------\n row number of subplot rows (default 1)\n col number of subplot columns (default 1)\n num subplot number (default 1)\n left left border of plot (default 0.125)\n right right border of plot (default 0.9)\n bottom bottom border of plot (default 0.1)\n top top border of plot (default 0.9)\n hspace space between columns (default 0.1)\n vspace space between rows (default 0.1)\n wspace historical, same as vspace; will be overwritten by vspace\n width prescribe width of plots (default None)\n height prescribe height of plots (default None)\n sortcol fill columns then rows (default False)\n golden golden ratio of width/height = (1+sqrt(5))/2\n (default False)\n inversegolden golden ratio of height/width\n (overwritten by golden) (default False)\n figsize (width, height) of figure as given by e.g.\n matplotlib.rcParams['figure.figsize'].\n Scales everything to rectangular section\n (default (1,1))\n\n\n Output\n ------\n position array with [left, bottom, width, height)\n to be used with fig.add_axes.\n\n\n Examples\n --------\n # Use, for example, as follows\n # fig1 = figure(1)\n # sub1 = fig1.add_axes(position(2,2,1))\n # sub2 = fig1.add_axes(position(2,2,2))\n\n # if you want to have a true rectangle\n # figsize = matplotlib.rcParams['figure.figsize']\n # sub = fig1.add_axes(position(1,1,1,figsize=figsize,left=0.1))\n\n # if you want to have a true golden ratio\n # sub = fig1.add_axes(position(1,1,1,figsize=figsize,golden=True))\n\n # Doctest examples\n >>> from autostring import astr\n >>> print(astr(position(2,2,1),3,pp=True))\n ['0.125' '0.550' '0.338' '0.350']\n >>> print(astr(position(2,2,1,sortcol=True),3,pp=True))\n ['0.125' '0.550' '0.338' '0.350']\n >>> print(astr(position(2,2,1,golden=True),3,pp=True))\n ['0.125' '0.409' '0.338' '0.209']\n >>> print(astr(position(2,2,1,inversegolden=True),3,pp=True))\n ['0.125' '0.550' '0.216' '0.350']\n >>> print(astr(position(2,2,1,golden=True,sortcol=True),3,pp=True))\n ['0.125' '0.409' '0.338' '0.209']\n >>> print(astr(position(2,2,1,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.000' '0.500' '0.500' '0.500']\n >>> print(astr(position(2,2,2,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.500' '0.500' '0.500' '0.500']\n >>> print(astr(position(2,2,3,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.000' '0.000' '0.500' '0.500']\n >>> print(astr(position(2,2,4,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.500' '0.000' '0.500' '0.500']\n >>> print(astr(position(2,2,1,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.000' '0.309' '0.500' '0.309']\n >>> print(astr(position(2,2,2,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.500' '0.309' '0.500' '0.309']\n >>> print(astr(position(2,2,3,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.000' '0.000' '0.500' '0.309']\n >>> print(astr(position(2,2,4,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.500' '0.000' '0.500' '0.309']\n >>> figsize=[8,11]\n >>> print(astr(position(2,2,1,golden=True,sortcol=True,figsize=figsize),3,pp=True))\n ['0.125' '0.324' '0.338' '0.152']\n >>> print(astr(position(2,2,1,figsize=figsize,left=0.1),3,pp=True))\n ['0.100' '0.427' '0.350' '0.255']\n >>> print(astr(position(2,2,1,figsize=figsize,left=0.1,golden=True),3,pp=True))\n ['0.100' '0.330' '0.350' '0.157']\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT License.\n\n Copyright (c) 2009-2016 Matthias Cuntz - mc (at) macu (dot) de\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, MC, Aug 2009\n Modified, MC, Feb 2013 - ported to Python 3\n MC, Jul 2013 - vspace, wspace obsolete\n MC, Apr 2014 - assert\n ST, Feb 2016 - added height and width\n \"\"\"\n #\n # Check\n nplots = row*col\n assert num <= nplots, 'num > number of plots: '+str(num)+' > '+str(nplots)\n assert right-left > 0., 'right > left: '+str(right)+' > '+str(left)\n assert top-bottom > 0., 'top < bottom: '+str(top)+' < '+str(bottom)\n if vspace != None:\n ivspace = vspace\n elif wspace != None:\n ivspace = wspace\n else:\n ivspace = 0.1\n #\n # Scaling to figsize\n scalex = figsize[1]/float(max(figsize))\n scaley = figsize[0]/float(max(figsize))\n #\n # width, height\n if width is None:\n dx = (right-left-(col-1)*hspace)/col\n else:\n dx = width\n if height is None:\n dy = (top-bottom-(row-1)*ivspace)/row\n else:\n dy = height\n #\n # golden ratio\n ratio = (1.+np.sqrt(5.))/2.\n if golden:\n width = dx\n height = dx / ratio\n checkheight = (top-bottom-row*height) - (row-1)*ivspace\n if checkheight < 0.:\n height = dy\n width = dy * ratio\n checkwidth = (right-left-col*width) - (col-1)*hspace\n if checkwidth < 0.:\n raise ValueError('golden ratio does not work. Have to recode.')\n else:\n if inversegolden:\n height = dy\n width = dy / ratio\n checkwidth = (right-left-col*width) - (col-1)*hspace\n if checkwidth < 0.:\n width = dx\n height = dx * ratio\n checkheight = (top-bottom-row*height) - (row-1)*ivspace\n if checkheight < 0.:\n raise ValueError('inverse golden ratio does not work. Have to recode.')\n else:\n width = dx\n height = dy\n #\n # order row/colmn, column/row\n if sortcol:\n irow = (num-1) % row\n icol = (num-1) // row\n else:\n irow = (num-1) // col\n icol = (num-1) % col\n #\n # position\n pos = np.empty(4)\n pos[0] = left + icol*(width+hspace) *scalex\n pos[1] = bottom + (row-1-irow)*(height+ivspace) *scaley\n pos[2] = width *scalex\n pos[3] = height *scaley\n #\n return pos\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)\n\n" ]
[ [ "numpy.sqrt", "numpy.empty" ] ]
ConeyLiu/oap-raydp
[ "3fe728f01dbb6494d94c4abd65bc9aacff771080" ]
[ "python/raydp/spark/torch/dataset.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections.abc import Iterable\nfrom typing import Any, List, Optional\n\nimport numpy as np\nimport pandas\nimport torch\nfrom torch.utils.data import Dataset, DistributedSampler\n\nfrom raydp.spark.context import save_to_ray\nfrom raydp.spark.resource_manager.exchanger import SharedDataset\nfrom raydp.spark.utils import BLOCK_SIZE_BIT, divide_blocks\n\n\nclass _Dataset(Dataset):\n def __init__(self,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param feature_columns: the feature columns in df\n :param feature_shapes: the each feature shape that need to return when loading this\n dataset. If it is not None, it's size must match the size of feature_columns.\n If it is None, we guess all are scalar value and return all as a tensor when\n loading this dataset.\n :param feature_types: the feature types. All will be casted into torch.float by default\n :param label_column: the label column in df\n :param label_type: the label type. It will be casted into torch.float by default.\n \"\"\"\n super(_Dataset, self).__init__()\n self._feature_columns = feature_columns\n self._feature_shapes = feature_shapes\n self._feature_types = feature_types\n self._label_column = label_column\n self._label_type = label_type\n\n self._feature_tensor = None\n self._label_tensor = None\n\n def _check_and_convert(self):\n # convert to list for convenience\n if not isinstance(self._feature_columns, List):\n self._feature_columns = [self._feature_columns]\n\n if self._feature_shapes:\n if not isinstance(self._feature_shapes, list):\n self._feature_shapes = [self._feature_shapes]\n\n assert len(self._feature_columns) == len(self._feature_shapes), \\\n \"The feature_shapes size must match the feature_columns\"\n for i in range(len(self._feature_shapes)):\n if not isinstance(self._feature_shapes[i], Iterable):\n self._feature_shapes[i] = [self._feature_shapes[i]]\n\n if self._feature_types:\n if not isinstance(self._feature_types, list):\n self._feature_types = [self._feature_types]\n\n assert len(self._feature_columns) == len(self._feature_types), \\\n \"The feature_types size must match the feature_columns\"\n for i in range(len(self._feature_types)):\n assert all(isinstance(dtype, torch.dtype) for dtype in self._feature_types), \\\n \"All value in feature_types should be torch.dtype instance\"\n\n if not self._feature_shapes and self._feature_types:\n assert all(dtype == self._feature_types[0] for dtype in self._feature_types), \\\n \"All dtypes should be same when feature_shapes doesn't provide\"\n\n if not self._feature_types:\n self._feature_types = [torch.float] * len(self._feature_columns)\n\n if not self._label_type:\n self._label_type = torch.float\n\n def _convert_to_tensor(self, df):\n if self._feature_shapes:\n tensors = []\n for col, shape, dtype in zip(self._feature_columns, self._feature_shapes,\n self._feature_types):\n column = df[col].values\n if column.dtype == np.object:\n if isinstance(column[0], np.ndarray):\n column = np.stack(column)\n elif isinstance(column[0], (list, tuple)):\n column = list(column)\n else:\n raise Exception(\n f\"Column {col}'s type: {type(column[0])} is not supported. It must \"\n \"be numpy built in type or numpy object of (ndarray, list, tuple)\")\n\n t = torch.as_tensor(column, dtype=dtype)\n if shape != [0]:\n t = t.view(*(-1, *shape))\n tensors.append(t)\n self._feature_tensor = tensors\n else:\n feature_columns = (self._feature_columns if\n len(self._feature_columns) > 1 else self._feature_columns[0])\n feature_df = df[feature_columns].values\n t = torch.as_tensor(feature_df, dtype=self._feature_types[0])\n self._feature_tensor = [t]\n\n label_df = df[self._label_column].values\n self._label_tensor = torch.as_tensor(label_df, dtype=self._label_type)\n\n def _get_next(self, index):\n label = self._label_tensor[index]\n features = [tensor[index] for tensor in self._feature_tensor]\n return (*features, label)\n\n\nclass RayDataset(_Dataset):\n \"\"\"\n Store Spark DataFrame or koalas.DataFrame into ray object store and wrap into a torch\n Dataset which could be used by torch DataLoader.\n \"\"\"\n def __init__(self,\n df: Any = None,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param df: Spark DataFrame or Koalas.DataFrame\n \"\"\"\n super(RayDataset, self).__init__(feature_columns, feature_shapes,\n feature_types, label_column, label_type)\n self._unresolved_shared_dataset: SharedDataset = None\n self._resolved_shared_dataset: SharedDataset = None\n self._previous_block_index = -1\n\n self._check_and_convert()\n\n if df is not None:\n self._unresolved_shared_dataset = save_to_ray(df)\n\n def _resolve_with_indices(self,\n indices: List[int],\n plasma_store_socket_name: Optional[str]):\n resolved_shared_dataset = self._unresolved_shared_dataset.subset(indices)\n resolved_shared_dataset.set_plasma_store_socket_name(plasma_store_socket_name)\n resolved_shared_dataset.resolve()\n self._resolved_shared_dataset = resolved_shared_dataset\n\n def __getitem__(self, index):\n block_index = index >> BLOCK_SIZE_BIT\n block_inner_index = (block_index << BLOCK_SIZE_BIT) ^ index\n if block_index != self._previous_block_index:\n self._previous_block_index = block_index\n df = self._resolved_shared_dataset[block_index]\n self._convert_to_tensor(df)\n return self._get_next(block_inner_index)\n\n def __len__(self):\n \"\"\"Get the total size\"\"\"\n return self._unresolved_shared_dataset.total_size()\n\n def block_sizes(self) -> List[int]:\n \"\"\"Get the block sizes\"\"\"\n return self._unresolved_shared_dataset.partition_sizes()\n\n @classmethod\n def _custom_deserialize(cls,\n data_set: SharedDataset,\n feature_columns: List[str],\n feature_shapes: List[Any],\n feature_types: List[torch.dtype],\n label_column: str,\n label_type: torch.dtype):\n instance = cls(\n None, feature_columns, feature_shapes, feature_types, label_column, label_type)\n instance._unresolved_shared_dataset = data_set\n return instance\n\n def __reduce__(self):\n return (RayDataset._custom_deserialize,\n (self._unresolved_shared_dataset, self._feature_columns, self._feature_shapes,\n self._feature_types, self._label_column, self._label_type))\n\n\nclass BlockSetSampler(DistributedSampler):\n \"\"\"\n A distributed sampler for BlockSet.\n\n We will shuffle the blocks order and then shuffle the block inner if shuffle is set to True.\n \"\"\"\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, init_lazy=True):\n assert isinstance(dataset, RayDataset)\n self._args = (dataset, num_replicas, rank, shuffle)\n self._inited = False\n\n self._block_indices = None\n self._selected_indices = None\n\n if not init_lazy:\n self._init_lazy()\n\n def _init_lazy(self):\n \"\"\"\n This is a workaround because of ray sgd call initialize the data creator before of\n setup distributed components.\n \"\"\"\n if not self._inited:\n super(BlockSetSampler, self).__init__(*self._args)\n self._split_blocks()\n self._inited = True\n\n def _split_blocks(self):\n block_indexes, packed_selected_indexes = divide_blocks(\n self.dataset.block_sizes(), self.num_replicas, self.rank, self.shuffle)\n self._block_indices = block_indexes\n self._selected_indices = packed_selected_indexes\n\n def resolve(self, plasma_store_socket_name: Optional[str] = None):\n \"\"\"Manually trigger the underlying object transfer.\"\"\"\n self._init_lazy()\n self.dataset._resolve_with_indices(self._block_indices,\n plasma_store_socket_name)\n\n @property\n def block_indices(self):\n return self._block_indices\n\n def __iter__(self):\n self.resolve()\n # deterministically shuffle based on epoch\n np.random.seed(self.epoch)\n block_indices = list(range(len(self._block_indices)))\n if self.shuffle:\n np.random.shuffle(block_indices)\n\n indices = []\n for index in block_indices:\n tmp = self._selected_indices[index]\n tmp = np.copy(tmp)\n if self.shuffle:\n np.random.shuffle(tmp)\n indices += tmp.tolist()\n\n return iter(indices)\n\n def __len__(self):\n # if we use `if sampler` to determine whether the sampler is None,\n # it will call this method. This can be happened when the BlockSetSampler\n # used in the evaluation in ray TorchTrainer.\n self._init_lazy()\n return self.num_samples\n\n\nclass PandasDataset(_Dataset):\n \"\"\"\n A pandas dataset which support feature columns with different shapes.\n \"\"\"\n def __init__(self,\n df: pandas.DataFrame = None,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param df: pandas DataFrame\n \"\"\"\n super(PandasDataset, self).__init__(feature_columns, feature_shapes,\n feature_types, label_column, label_type)\n self._check_and_convert()\n\n self._size = len(df)\n self._convert_to_tensor(df)\n\n def __getitem__(self, index):\n return self._get_next(index)\n\n def __len__(self):\n return self._size\n" ]
[ [ "numpy.random.shuffle", "torch.as_tensor", "numpy.random.seed", "numpy.copy", "numpy.stack" ] ]
2anchao/NTS_NET
[ "3b0a58616cb4b44699a11541eac2777556169812" ]
[ "core/model.py" ]
[ "from torch import nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom core import resnet\r\nimport numpy as np\r\nfrom core.anchors import generate_default_anchor_maps, hard_nms\r\nfrom config import Config as cfg\r\n\r\n\r\nclass ProposalNet(nn.Module):\r\n \"\"\"\r\n Navigator Network\r\n \"\"\"\r\n def __init__(self, in_channel=2048, inner_channel=128, out_channels=[6, 6, 9]):\r\n super(ProposalNet, self).__init__()\r\n self.down1 = nn.Conv2d(in_channel, inner_channel, 3, 1, 1)\r\n self.down2 = nn.Conv2d(inner_channel, inner_channel, 3, 2, 1)\r\n self.down3 = nn.Conv2d(inner_channel, inner_channel, 3, 2, 1)\r\n self.ReLU = nn.ReLU()\r\n\r\n self.tidy1 = nn.Conv2d(inner_channel, out_channels[0], 1, 1, 0) #32倍, 6 Anchor Box\r\n self.tidy2 = nn.Conv2d(inner_channel, out_channels[1], 1, 1, 0) #64倍, 6 Anchor Box\r\n self.tidy3 = nn.Conv2d(inner_channel, out_channels[2], 1, 1, 0) ##128倍, 9 Anchor Box\r\n\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n d1 = self.ReLU(self.down1(x)) # 32倍下采样, 14x14\r\n d2 = self.ReLU(self.down2(d1)) # 64倍下采样, 7x7\r\n d3 = self.ReLU(self.down3(d2)) # 128倍下采样, 4x4\r\n\r\n t1 = self.tidy1(d1).view(batch_size, -1)\r\n t2 = self.tidy2(d2).view(batch_size, -1)\r\n t3 = self.tidy3(d3).view(batch_size, -1)\r\n #一个像素点对应一个Anchor Box的得分\r\n return torch.cat((t1, t2, t3), dim=1)\r\n\r\n\r\nclass attention_net(nn.Module):\r\n def __init__(self, topN=4, num_class=196, fc_channel=2048, pad_side=224):\r\n super(attention_net, self).__init__()\r\n self.pretrained_model = resnet.resnet50(pretrained=True)\r\n self.pretrained_model.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.pretrained_model.fc = nn.Linear(fc_channel, num_class)\r\n self.proposal_net = ProposalNet()\r\n self.topN = topN\r\n #concat_net就是Scrutinizer Network\r\n self.concat_net = nn.Linear(fc_channel * (cfg.CAT_NUM + 1), num_class)\r\n #partcls_net就是Teacher Network\r\n self.partcls_net = nn.Linear(fc_channel, num_class)\r\n _, edge_anchors, _ = generate_default_anchor_maps()\r\n self.pad_side = pad_side\r\n #有padding操作,因为要抠图,所以anchor box坐标要更新\r\n self.edge_anchors = (edge_anchors + pad_side).astype(np.int)\r\n\r\n def forward(self, x):\r\n resnet_out, rpn_feature, feature = self.pretrained_model(x)\r\n x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0)\r\n batch = x.size(0)\r\n # we will reshape rpn to shape: batch * nb_anchor\r\n rpn_score = self.proposal_net(rpn_feature.detach())\r\n all_cdds = [\r\n np.concatenate((x.reshape(-1, 1), self.edge_anchors.copy(), np.arange(0, len(x)).reshape(-1, 1)), axis=1)\r\n for x in rpn_score.data.cpu().numpy()]\r\n top_n_cdds = [hard_nms(x, topn=self.topN, iou_thresh=0.25) for x in all_cdds]\r\n top_n_cdds = np.array(top_n_cdds)\r\n top_n_index = top_n_cdds[:, :, -1].astype(np.int)\r\n top_n_index = torch.from_numpy(top_n_index).cuda()\r\n top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index)\r\n part_imgs = torch.zeros([batch, self.topN, 3, 224, 224]).cuda()\r\n for i in range(batch):\r\n for j in range(self.topN):\r\n [y0, x0, y1, x1] = top_n_cdds[i][j, 1:5].astype(np.int)\r\n part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode='bilinear',\r\n align_corners=True)\r\n part_imgs = part_imgs.view(batch * self.topN, 3, 224, 224)\r\n _, _, part_features = self.pretrained_model(part_imgs.detach())\r\n part_feature = part_features.view(batch, self.topN, -1)\r\n part_feature = part_feature[:, :cfg.CAT_NUM, ...].contiguous()\r\n part_feature = part_feature.view(batch, -1)\r\n # concat_logits have the shape: B*num_class\r\n concat_out = torch.cat([part_feature, feature], dim=1)\r\n concat_logits = self.concat_net(concat_out)\r\n raw_logits = resnet_out\r\n # part_logits have the shape: B*N*num_class\r\n part_logits = self.partcls_net(part_features).view(batch, self.topN, -1)\r\n return [raw_logits, concat_logits, part_logits, top_n_index, top_n_prob]\r\n\r\n\r\ndef list_loss(logits, targets):\r\n temp = F.log_softmax(logits, -1)\r\n loss = [-temp[i][targets[i].item()] for i in range(logits.size(0))]\r\n #置信度越靠近1,Loss越靠近0\r\n return torch.stack(loss)\r\n\r\n\r\ndef ranking_loss(score, targets, proposal_num=cfg.PROPOSAL_NUM):\r\n loss = Variable(torch.zeros(1).cuda())\r\n batch_size = score.size(0)\r\n for i in range(proposal_num):\r\n targets_p = (targets > targets[:, i].unsqueeze(1)).type(torch.cuda.FloatTensor)\r\n pivot = score[:, i].unsqueeze(1)\r\n loss_p = (1 - pivot + score) * targets_p\r\n loss_p = torch.sum(F.relu(loss_p))\r\n loss += loss_p\r\n return loss / batch_size\r\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.stack", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.pad", "torch.zeros", "torch.gather", "torch.nn.functional.relu", "torch.from_numpy", "torch.nn.Conv2d", "numpy.array", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.interpolate" ] ]
ProskuraPD/catboost
[ "d4593d4fbc8b8da66ff2d8b838578eba819d9d0d" ]
[ "catboost/python-package/catboost/core.py" ]
[ "import sys\nfrom copy import deepcopy\nfrom six import iteritems, string_types, integer_types\nimport os\nimport imp\nfrom collections import Iterable, Sequence, Mapping, MutableMapping\nimport warnings\nimport numpy as np\nimport ctypes\nimport platform\nimport tempfile\nfrom enum import Enum\nfrom operator import itemgetter\n\nif platform.system() == 'Linux':\n try:\n ctypes.CDLL('librt.so')\n except Exception:\n pass\n\ntry:\n from pandas import DataFrame, Series\nexcept ImportError:\n class DataFrame(object):\n pass\n\n class Series(object):\n pass\n\n\ndef get_so_paths(dir_name):\n dir_name = os.path.join(os.path.dirname(__file__), dir_name)\n list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else []\n return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']]\n\n\ndef get_catboost_bin_module():\n if '_catboost' in sys.modules:\n return sys.modules['_catboost']\n so_paths = get_so_paths('./')\n for so_path in so_paths:\n try:\n loaded_catboost = imp.load_dynamic('_catboost', so_path)\n sys.modules['catboost._catboost'] = loaded_catboost\n return loaded_catboost\n except ImportError:\n pass\n import _catboost\n return _catboost\n\n\n_catboost = get_catboost_bin_module()\n_PoolBase = _catboost._PoolBase\n_CatBoost = _catboost._CatBoost\n_MetricCalcerBase = _catboost._MetricCalcerBase\n_cv = _catboost._cv\n_set_logger = _catboost._set_logger\n_reset_logger = _catboost._reset_logger\n_configure_malloc = _catboost._configure_malloc\nCatboostError = _catboost.CatboostError\n_metric_description_or_str_to_str = _catboost._metric_description_or_str_to_str\ncompute_wx_test = _catboost.compute_wx_test\nis_classification_objective = _catboost.is_classification_objective\nis_regression_objective = _catboost.is_regression_objective\n_PreprocessParams = _catboost._PreprocessParams\n_check_train_params = _catboost._check_train_params\n_MetadataHashProxy = _catboost._MetadataHashProxy\n_NumpyAwareEncoder = _catboost._NumpyAwareEncoder\nFeaturesData = _catboost.FeaturesData\n\n\nfrom contextlib import contextmanager # noqa E402\n\n\n_configure_malloc()\n_catboost._library_init()\n\nINTEGER_TYPES = (integer_types, np.integer)\nFLOAT_TYPES = (float, np.floating)\nSTRING_TYPES = (string_types,)\nARRAY_TYPES = (list, np.ndarray, DataFrame, Series)\n\n\n@contextmanager\ndef log_fixup():\n _set_logger(sys.stdout, sys.stderr)\n yield\n _reset_logger()\n\n\ndef _cast_to_base_types(value):\n # NOTE: Special case, avoiding new list creation.\n if isinstance(value, list):\n for index, element in enumerate(value):\n value[index] = _cast_to_base_types(element)\n return value\n if isinstance(value, ARRAY_TYPES[1:]):\n new_value = []\n for element in value:\n new_value.append(_cast_to_base_types(element))\n return new_value\n if isinstance(value, (Mapping, MutableMapping)):\n for key in list(value):\n value[key] = _cast_to_base_types(value[key])\n return value\n if isinstance(value, bool):\n return value\n if isinstance(value, INTEGER_TYPES):\n return int(value)\n if isinstance(value, FLOAT_TYPES):\n return float(value)\n return value\n\n\ndef metric_description_or_str_to_str(description):\n return _metric_description_or_str_to_str(description)\n\n\ndef _check_param_type(value, name, types, or_none=True):\n if not isinstance(value, types + ((type(None),) if or_none else ())):\n raise CatboostError('Parameter {} should have a type of {}, got {}'.format(name, [t.__class__.__name__ for t in types], type(value).__class__.__name__))\n\n\ndef _process_verbose(metric_period=None, verbose=None, logging_level=None, verbose_eval=None, silent=None):\n _check_param_type(metric_period, 'metric_period', (int,))\n _check_param_type(verbose, 'verbose', (bool, int))\n _check_param_type(logging_level, 'logging_level', (str,))\n _check_param_type(verbose_eval, 'verbose_eval', (bool, int))\n _check_param_type(silent, 'silent', (bool,))\n\n params = locals()\n exclusive_params = ['verbose', 'logging_level', 'verbose_eval', 'silent']\n at_most_one = sum([params[exclusive] is not None for exclusive in exclusive_params])\n if at_most_one > 1:\n raise CatboostError('Only one of parameters {} should be set'.format(exclusive_params.keys()))\n\n if verbose is None:\n if silent is not None:\n verbose = not silent\n elif verbose_eval is not None:\n verbose = verbose_eval\n if verbose is not None:\n logging_level = 'Verbose' if verbose else 'Silent'\n verbose = int(verbose)\n\n if isinstance(metric_period, int):\n if metric_period <= 0:\n raise CatboostError('metric_period should be positive.')\n if verbose is not None:\n if verbose % metric_period != 0:\n raise CatboostError('verbose should be a multiple of metric_period')\n\n return (metric_period, verbose, logging_level)\n\n\ndef enum_from_enum_or_str(enum_type, arg):\n if isinstance(arg, enum_type):\n return arg\n elif isinstance(arg, str):\n return enum_type[arg]\n else:\n raise Exception(\"can't create enum \" + str(enum_type) + \" from type \" + str(type(arg)))\n\n\nclass EFstrType(Enum):\n \"\"\"Calculate score for every feature.\"\"\"\n FeatureImportance = 0\n \"\"\"Calculate pairwise score between every feature.\"\"\"\n Interaction = 1\n \"\"\"Calculate SHAP Values for every object.\"\"\"\n ShapValues = 2\n\n\nclass Pool(_PoolBase):\n \"\"\"\n Pool used in CatBoost as data structure to train model from.\n \"\"\"\n\n def __init__(self, data, label=None, cat_features=None, column_description=None, pairs=None, delimiter='\\t',\n has_header=False, weight=None, group_id=None, group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None,\n feature_names=None, thread_count=-1):\n \"\"\"\n Pool is a internal data structure that used by CatBoost.\n You can construct Pool from list, numpy.array, pandas.DataFrame, pandas.Series.\n\n Parameters\n ----------\n data : list or numpy.array or pandas.DataFrame or pandas.Series or FeaturesData or string\n Data source of Pool.\n If list or numpy.arrays or pandas.DataFrame or pandas.Series, giving 2 dimensional array like data.\n If FeaturesData - see FeaturesData description for details, 'cat_features' and 'feature_names'\n parameters must be equal to None in this case\n If string, giving the path to the file with data in catboost format.\n\n label : list or numpy.arrays or pandas.DataFrame or pandas.Series, optional (default=None)\n Label of the training data.\n If not None, giving 1 dimensional array like data with floats.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Must be None if 'data' parameter has FeatureData type\n\n column_description : string, optional (default=None)\n ColumnsDescription parameter.\n There are several columns description types: Label, Categ, Num, Auxiliary, DocId, Weight, Baseline, GroupId, Timestamp.\n All columns are Num as default, it's not necessary to specify\n this type of columns. Default Label column index is 0 (zero).\n If None, Label column is 0 (zero) as default, all data columns are Num as default.\n If string, giving the path to the file with ColumnsDescription in column_description format.\n\n pairs : list or numpy.array or pandas.DataFrame or string\n The pairs description.\n If list or numpy.arrays or pandas.DataFrame, giving 2 dimensional.\n The shape should be Nx2, where N is the pairs' count. The first element of pair is\n the index of winner object in training set. The second element of pair is\n the index of loser object in training set.\n If string, giving the path to the file with pairs description.\n\n delimiter : string, optional (default='\\t')\n Delimiter to use for separate features in file.\n Should be only one symbol, otherwise would be taken only the first character of the string.\n\n has_header : boolm optional (default=False)\n If True, read column names from first line.\n\n weight : list or numpy.array, optional (default=None)\n Weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n group_id : list or numpy.array, optional (default=None)\n group id for each instance.\n If not None, giving 1 dimensional array like data.\n\n group_weight : list or numpy.array, optional (default=None)\n Group weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n subgroup_id : list or numpy.array, optional (default=None)\n subgroup id for each instance.\n If not None, giving 1 dimensional array like data.\n\n pairs_weight : list or numpy.array, optional (default=None)\n Weight for each pair.\n If not None, giving 1 dimensional array like pairs.\n\n baseline : list or numpy.array, optional (default=None)\n Baseline for each instance.\n If not None, giving 2 dimensional array like data.\n\n feature_names : list, optional (default=None)\n Names for each given data_feature.\n Must be None if 'data' parameter has FeatureData type\n\n thread_count : int, optional (default=-1)\n Thread count to read data from file.\n Use only with reading data from file.\n If -1, then the number of threads is set to the number of cores.\n\n \"\"\"\n if data is not None:\n self._check_data_type(data, cat_features)\n self._check_data_empty(data)\n if pairs is not None and isinstance(data, STRING_TYPES) != isinstance(pairs, STRING_TYPES):\n raise CatboostError(\"data and pairs parameters should be the same types.\")\n if column_description is not None and not isinstance(data, STRING_TYPES):\n raise CatboostError(\"data should be the string type if column_description parameter is specified.\")\n if isinstance(data, STRING_TYPES):\n if any(v is not None for v in [cat_features, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names]):\n raise CatboostError(\"cat_features, weight, group_id, group_weight, subgroup_id, pairs_weight, \\\n baseline, feature_names should have the None type when the pool is read from the file.\")\n self._read(data, column_description, pairs, delimiter, has_header, thread_count)\n else:\n if isinstance(data, FeaturesData):\n if any(v is not None for v in [cat_features, feature_names]):\n raise CatboostError(\n \"cat_features, feature_names should have the None type when 'data' parameter \"\n \" has FeaturesData type\"\n )\n elif isinstance(data, np.ndarray):\n if (data.dtype == np.float32) and (cat_features is not None) and (len(cat_features) > 0):\n raise CatboostError(\n \"'data' is numpy array of np.float32, it means no categorical features,\"\n \" but 'cat_features' parameter specifies nonzero number of categorical features\"\n )\n\n self._init(data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)\n super(Pool, self).__init__()\n\n def _check_files(self, data, column_description, pairs):\n \"\"\"\n Check files existence.\n \"\"\"\n if not os.path.isfile(data):\n raise CatboostError(\"Invalid data path='{}': file does not exist.\".format(data))\n if column_description is not None and not os.path.isfile(column_description):\n raise CatboostError(\"Invalid column_description path='{}': file does not exist.\".format(column_description))\n if pairs is not None and not os.path.isfile(pairs):\n raise CatboostError(\"Invalid pairs path='{}': file does not exist.\".format(pairs))\n\n def _check_delimiter(self, delimiter):\n if not isinstance(delimiter, STRING_TYPES):\n raise CatboostError(\"Invalid delimiter type={} : must be str().\".format(type(delimiter)))\n if len(delimiter) < 1:\n raise CatboostError(\"Invalid delimiter length={} : must be > 0.\".format(len(delimiter)))\n\n def _check_column_description_type(self, column_description):\n \"\"\"\n Check type of column_description parameter.\n \"\"\"\n if not isinstance(column_description, STRING_TYPES):\n raise CatboostError(\"Invalid column_description type={}: must be str().\".format(type(column_description)))\n\n def _check_cf_type(self, cat_features):\n \"\"\"\n Check type of cat_feature parameter.\n \"\"\"\n if not isinstance(cat_features, (list, np.ndarray)):\n raise CatboostError(\"Invalid cat_features type={}: must be list() or np.ndarray().\".format(type(cat_features)))\n\n def _check_cf_value(self, cat_features, features_count):\n \"\"\"\n Check values in cat_feature parameter. Must be int indices.\n \"\"\"\n for indx, feature in enumerate(cat_features):\n if not isinstance(feature, INTEGER_TYPES):\n raise CatboostError(\"Invalid cat_features[{}] = {} value type={}: must be int().\".format(indx, feature, type(feature)))\n if feature >= features_count:\n raise CatboostError(\"Invalid cat_features[{}] = {} value: must be < {}.\".format(indx, feature, features_count))\n\n def _check_pairs_type(self, pairs):\n \"\"\"\n Check type of pairs parameter.\n \"\"\"\n if not isinstance(pairs, (list, np.ndarray, DataFrame)):\n raise CatboostError(\"Invalid pairs type={}: must be list(), np.ndarray() or pd.DataFrame.\".format(type(pairs)))\n\n def _check_pairs_value(self, pairs):\n \"\"\"\n Check values in pairs parameter. Must be int indices.\n \"\"\"\n for pair_id, pair in enumerate(pairs):\n if (len(pair) != 2):\n raise CatboostError(\"Length of pairs[{}] isn't equal to 2.\".format(pair_id))\n for i, index in enumerate(pair):\n if not isinstance(index, INTEGER_TYPES):\n raise CatboostError(\"Invalid pairs[{}][{}] = {} value type={}: must be int().\".format(pair_id, i, index, type(index)))\n\n def _check_data_type(self, data, cat_features):\n \"\"\"\n Check type of data.\n \"\"\"\n if not isinstance(data, (STRING_TYPES, ARRAY_TYPES, FeaturesData)):\n raise CatboostError(\"Invalid data type={}: data must be list(), np.ndarray(), DataFrame(), Series(), FeaturesData or filename str().\".format(type(data)))\n\n def _check_data_empty(self, data):\n \"\"\"\n Check data is not empty (0 objects is ok).\n note: already checked if data is FeatureType, so no need to check again\n \"\"\"\n\n if isinstance(data, STRING_TYPES):\n if not data:\n raise CatboostError(\"Features filename is empty.\")\n elif isinstance(data, ARRAY_TYPES):\n data_shape = np.shape(data)\n if len(data_shape) == 1 and data_shape[0] > 0:\n if isinstance(data[0], Iterable):\n data_shape = tuple(data_shape + tuple([len(data[0])]))\n else:\n data_shape = tuple(data_shape + tuple([1]))\n if not len(data_shape) == 2:\n raise CatboostError(\"Input data has invalid shape: {}. Must be 2 dimensional\".format(data_shape))\n if data_shape[1] == 0:\n raise CatboostError(\"Input data must have at least one feature\")\n\n def _check_label_type(self, label):\n \"\"\"\n Check type of label.\n \"\"\"\n if not isinstance(label, ARRAY_TYPES):\n raise CatboostError(\"Invalid label type={}: must be array like.\".format(type(label)))\n\n def _check_label_empty(self, label):\n \"\"\"\n Check label is not empty.\n \"\"\"\n if len(label) == 0:\n raise CatboostError(\"Labels variable is empty.\")\n\n def _check_label_shape(self, label, samples_count):\n \"\"\"\n Check label length and dimension.\n \"\"\"\n if len(label) != samples_count:\n raise CatboostError(\"Length of label={} and length of data={} is different.\".format(len(label), samples_count))\n if isinstance(label[0], Iterable) and not isinstance(label[0], STRING_TYPES):\n if len(label[0]) > 1:\n raise CatboostError(\"Input label cannot have multiple values per row.\")\n\n def _check_baseline_type(self, baseline):\n \"\"\"\n Check type of baseline parameter.\n \"\"\"\n if not isinstance(baseline, ARRAY_TYPES):\n raise CatboostError(\"Invalid baseline type={}: must be array like.\".format(type(baseline)))\n\n def _check_baseline_shape(self, baseline, samples_count):\n \"\"\"\n Check baseline length and dimension.\n \"\"\"\n if len(baseline) != samples_count:\n raise CatboostError(\"Length of baseline={} and length of data={} are different.\".format(len(baseline), samples_count))\n if not isinstance(baseline[0], Iterable) or isinstance(baseline[0], STRING_TYPES):\n raise CatboostError(\"Baseline must be 2 dimensional data, 1 column for each class.\")\n try:\n if np.array(baseline).dtype not in (np.dtype('float'), np.dtype('float32'), np.dtype('int')):\n raise CatboostError()\n except CatboostError:\n raise CatboostError(\"Invalid baseline value type={}: must be float or int.\".format(np.array(baseline).dtype))\n\n def _check_weight_type(self, weight):\n \"\"\"\n Check type of weight parameter.\n \"\"\"\n if not isinstance(weight, ARRAY_TYPES):\n raise CatboostError(\"Invalid weight type={}: must be array like.\".format(type(weight)))\n\n def _check_weight_shape(self, weight, samples_count):\n \"\"\"\n Check weight length.\n \"\"\"\n if len(weight) != samples_count:\n raise CatboostError(\"Length of weight={} and length of data={} are different.\".format(len(weight), samples_count))\n if not isinstance(weight[0], (INTEGER_TYPES, FLOAT_TYPES)):\n raise CatboostError(\"Invalid weight value type={}: must be 1 dimensional data with int, float or long types.\".format(type(weight[0])))\n\n def _check_group_id_type(self, group_id):\n \"\"\"\n Check type of group_id parameter.\n \"\"\"\n if not isinstance(group_id, ARRAY_TYPES):\n raise CatboostError(\"Invalid group_id type={}: must be array like.\".format(type(group_id)))\n\n def _check_group_id_shape(self, group_id, samples_count):\n \"\"\"\n Check group_id length.\n \"\"\"\n if len(group_id) != samples_count:\n raise CatboostError(\"Length of group_id={} and length of data={} are different.\".format(len(group_id), samples_count))\n\n def _check_group_weight_type(self, group_weight):\n \"\"\"\n Check type of group_weight parameter.\n \"\"\"\n if not isinstance(group_weight, ARRAY_TYPES):\n raise CatboostError(\"Invalid group_weight type={}: must be array like.\".format(type(group_weight)))\n\n def _check_group_weight_shape(self, group_weight, samples_count):\n \"\"\"\n Check group_weight length.\n \"\"\"\n if len(group_weight) != samples_count:\n raise CatboostError(\"Length of group_weight={} and length of data={} are different.\".format(len(group_weight), samples_count))\n if not isinstance(group_weight[0], (FLOAT_TYPES)):\n raise CatboostError(\"Invalid group_weight value type={}: must be 1 dimensional data with float types.\".format(type(group_weight[0])))\n\n def _check_subgroup_id_type(self, subgroup_id):\n \"\"\"\n Check type of subgroup_id parameter.\n \"\"\"\n if not isinstance(subgroup_id, ARRAY_TYPES):\n raise CatboostError(\"Invalid subgroup_id type={}: must be array like.\".format(type(subgroup_id)))\n\n def _check_subgroup_id_shape(self, subgroup_id, samples_count):\n \"\"\"\n Check subgroup_id length.\n \"\"\"\n if len(subgroup_id) != samples_count:\n raise CatboostError(\"Length of subgroup_id={} and length of data={} are different.\".format(len(subgroup_id), samples_count))\n\n def _check_feature_names(self, feature_names, num_col=None):\n if num_col is None:\n num_col = self.num_col()\n if not isinstance(feature_names, Sequence):\n raise CatboostError(\"Invalid feature_names type={} : must be list\".format(type(feature_names)))\n if len(feature_names) != num_col:\n raise CatboostError(\"Invalid length feature_names={} : must be equal to number of columns in data={}\".format(len(feature_names), num_col))\n\n def _check_thread_count(self, thread_count):\n if not isinstance(thread_count, INTEGER_TYPES):\n raise CatboostError(\"Invalid thread_count type={} : must be int\".format(type(thread_count)))\n\n def slice(self, rindex):\n if not isinstance(rindex, ARRAY_TYPES):\n raise CatboostError(\"Invalid rindex type={} : must be list or numpy.array\".format(type(rindex)))\n slicedPool = Pool(None)\n slicedPool._take_slice(self, rindex)\n return slicedPool\n\n def set_pairs(self, pairs):\n self._check_pairs_type(pairs)\n if isinstance(pairs, DataFrame):\n pairs = pairs.values\n self._check_pairs_value(pairs)\n self._set_pairs(pairs)\n return self\n\n def set_feature_names(self, feature_names):\n self._check_feature_names(feature_names)\n self._set_feature_names(feature_names)\n return self\n\n def set_baseline(self, baseline):\n self._check_baseline_type(baseline)\n baseline = self._if_pandas_to_numpy(baseline)\n baseline = np.reshape(baseline, (self.num_row(), -1))\n self._check_baseline_shape(baseline, self.num_row())\n self._set_baseline(baseline)\n return self\n\n def set_weight(self, weight):\n self._check_weight_type(weight)\n weight = self._if_pandas_to_numpy(weight)\n self._check_weight_shape(weight, self.num_row())\n self._set_weight(weight)\n return self\n\n def set_group_id(self, group_id):\n self._check_group_id_type(group_id)\n group_id = self._if_pandas_to_numpy(group_id)\n self._check_group_id_shape(group_id, self.num_row())\n self._set_group_id(group_id)\n return self\n\n def set_group_weight(self, group_weight):\n self._check_group_weight_type(group_weight)\n group_weight = self._if_pandas_to_numpy(group_weight)\n self._check_group_weight_shape(group_weight, self.num_row())\n self._set_group_weight(group_weight)\n return self\n\n def set_subgroup_id(self, subgroup_id):\n self._check_subgroup_id_type(subgroup_id)\n subgroup_id = self._if_pandas_to_numpy(subgroup_id)\n self._check_subgroup_id_shape(subgroup_id, self.num_row())\n self._set_subgroup_id(subgroup_id)\n return self\n\n def set_pairs_weight(self, pairs_weight):\n self._check_weight_type(pairs_weight)\n pairs_weight = self._if_pandas_to_numpy(pairs_weight)\n self._check_weight_shape(pairs_weight, self.num_pairs())\n self._set_pairs_weight(pairs_weight)\n return self\n\n def _if_pandas_to_numpy(self, array):\n if isinstance(array, Series):\n array = array.values\n if isinstance(array, DataFrame):\n array = np.transpose(array.values)[0]\n return array\n\n def _read(self, pool_file, column_description, pairs, delimiter, has_header, thread_count):\n \"\"\"\n Read Pool from file.\n \"\"\"\n with log_fixup():\n self._check_files(pool_file, column_description, pairs)\n self._check_delimiter(delimiter)\n if column_description is None:\n column_description = ''\n else:\n self._check_column_description_type(column_description)\n if pairs is None:\n pairs = ''\n self._check_thread_count(thread_count)\n self._read_pool(pool_file, column_description, pairs, delimiter[0], has_header, thread_count)\n\n def _init(self, data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names):\n \"\"\"\n Initialize Pool from array like data.\n \"\"\"\n if isinstance(data, DataFrame):\n feature_names = list(data.columns)\n if isinstance(data, Series):\n data = data.values.tolist()\n if isinstance(data, FeaturesData):\n samples_count = data.get_object_count()\n features_count = data.get_feature_count()\n else:\n if len(np.shape(data)) == 1:\n data = np.expand_dims(data, 1)\n samples_count, features_count = np.shape(data)\n pairs_len = 0\n if label is not None:\n self._check_label_type(label)\n self._check_label_empty(label)\n label = self._if_pandas_to_numpy(label)\n self._check_label_shape(label, samples_count)\n if cat_features is not None:\n self._check_cf_type(cat_features)\n self._check_cf_value(cat_features, features_count)\n if pairs is not None:\n self._check_pairs_type(pairs)\n if isinstance(pairs, DataFrame):\n pairs = pairs.values\n self._check_pairs_value(pairs)\n pairs_len = np.shape(pairs)[0]\n if weight is not None:\n self._check_weight_type(weight)\n weight = self._if_pandas_to_numpy(weight)\n self._check_weight_shape(weight, samples_count)\n if group_id is not None:\n self._check_group_id_type(group_id)\n group_id = self._if_pandas_to_numpy(group_id)\n self._check_group_id_shape(group_id, samples_count)\n if group_weight is not None:\n self._check_group_weight_type(group_weight)\n group_weight = self._if_pandas_to_numpy(group_weight)\n self._check_group_weight_shape(group_weight, samples_count)\n if subgroup_id is not None:\n self._check_subgroup_id_type(subgroup_id)\n subgroup_id = self._if_pandas_to_numpy(subgroup_id)\n self._check_subgroup_id_shape(subgroup_id, samples_count)\n if pairs_weight is not None:\n self._check_weight_type(pairs_weight)\n pairs_weight = self._if_pandas_to_numpy(pairs_weight)\n self._check_weight_shape(pairs_weight, pairs_len)\n if baseline is not None:\n self._check_baseline_type(baseline)\n baseline = self._if_pandas_to_numpy(baseline)\n baseline = np.reshape(baseline, (samples_count, -1))\n self._check_baseline_shape(baseline, samples_count)\n if feature_names is not None:\n self._check_feature_names(feature_names, features_count)\n self._init_pool(data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)\n\n\ndef _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description):\n train_pool = None\n if isinstance(X, Pool):\n train_pool = X\n if any(v is not None for v in [cat_features, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline]):\n raise CatboostError(\"cat_features, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline should have the None type when X has catboost.Pool type.\")\n if X.get_label() is None and X.num_pairs() == 0:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n elif isinstance(X, STRING_TYPES):\n train_pool = Pool(data=X, pairs=pairs, column_description=column_description)\n else:\n if y is None:\n raise CatboostError(\"y has not initialized in fit(): X is not catboost.Pool object, y must be not None in fit().\")\n train_pool = Pool(X, y, cat_features=cat_features, pairs=pairs, weight=sample_weight, group_id=group_id,\n group_weight=group_weight, subgroup_id=subgroup_id, pairs_weight=pairs_weight, baseline=baseline)\n return train_pool\n\n\ndef _clear_training_files(train_dir):\n for filename in ['catboost_training.json']:\n path = os.path.join(train_dir, filename)\n if os.path.exists(path):\n os.remove(path)\n\n\ndef _get_train_dir(params):\n return params.get('train_dir', 'catboost_info')\n\n\ndef _get_catboost_widget(train_dir):\n _clear_training_files(train_dir)\n try:\n from .widget import MetricVisualizer\n return MetricVisualizer(train_dir)\n except ImportError as e:\n warnings.warn(\"To draw plots in fit() method you should install ipywidgets and ipython\")\n raise ImportError(str(e))\n\n\n@contextmanager\ndef plot_wrapper(plot, params):\n if plot:\n widget = _get_catboost_widget(_get_train_dir(params))\n widget._run_update()\n try:\n yield\n finally:\n if plot:\n widget._stop_update()\n\n\n# the first element of the synonyms list is the canonical name\ndef _process_synonyms_group(synonyms, params):\n assert len(synonyms) > 1, 'there should be more than one synonym'\n\n value = None\n for synonym in synonyms:\n if synonym in params:\n if value is not None:\n raise CatboostError('only one of the parameters ' + (', '.join(synonyms)) + ' should be initialized.')\n value = params[synonym]\n del params[synonym]\n\n if value is not None:\n params[synonyms[0]] = value\n\n\ndef _process_synonyms(params):\n if 'objective' in params:\n params['loss_function'] = params['objective']\n del params['objective']\n\n if 'scale_pos_weight' in params:\n if 'loss_function' in params and params['loss_function'] != 'Logloss':\n raise CatboostError('scale_pos_weight is only supported for binary classification Logloss loss')\n if 'class_weights' in params:\n raise CatboostError('only one of parameters scale_pos_weight, class_weights should be initialized.')\n params['class_weights'] = [1.0, params['scale_pos_weight']]\n del params['scale_pos_weight']\n\n _process_synonyms_group(['learning_rate', 'eta'], params)\n _process_synonyms_group(['border_count', 'max_bin'], params)\n _process_synonyms_group(['depth', 'max_depth'], params)\n _process_synonyms_group(['rsm', 'colsample_bylevel'], params)\n _process_synonyms_group(['random_seed', 'random_state'], params)\n _process_synonyms_group(['l2_leaf_reg', 'reg_lambda'], params)\n _process_synonyms_group(['iterations', 'n_estimators', 'num_boost_round', 'num_trees'], params)\n _process_synonyms_group(['od_wait', 'early_stopping_rounds'], params)\n _process_synonyms_group(['custom_metric', 'custom_loss'], params)\n\n metric_period = None\n if 'metric_period' in params:\n metric_period = params['metric_period']\n del params['metric_period']\n\n verbose = None\n if 'verbose' in params:\n verbose = params['verbose']\n del params['verbose']\n\n logging_level = None\n if 'logging_level' in params:\n logging_level = params['logging_level']\n del params['logging_level']\n\n verbose_eval = None\n if 'verbose_eval' in params:\n verbose_eval = params['verbose_eval']\n del params['verbose_eval']\n\n silent = None\n if 'silent' in params:\n silent = params['silent']\n del params['silent']\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval, silent)\n\n if metric_period is not None:\n params['metric_period'] = metric_period\n if verbose is not None:\n params['verbose'] = verbose\n if logging_level is not None:\n params['logging_level'] = logging_level\n\n if 'used_ram_limit' in params:\n params['used_ram_limit'] = str(params['used_ram_limit'])\n\n\nclass _CatBoostBase(object):\n def __init__(self, params):\n self._init_params = params.copy() if params is not None else {}\n self._object = _CatBoost()\n\n def __getstate__(self):\n params = self._init_params.copy()\n test_evals = self._object._get_test_evals()\n if test_evals:\n params['_test_evals'] = test_evals\n if self.is_fitted():\n params['__model'] = self._serialize_model()\n for attr in ['_classes', '_feature_importance']:\n if getattr(self, attr, None) is not None:\n params[attr] = getattr(self, attr, None)\n return params\n\n def __setstate__(self, state):\n if '_object' not in dict(self.__dict__.items()):\n self._object = _CatBoost()\n if '_init_params' not in dict(self.__dict__.items()):\n self._init_params = {}\n if '__model' in state:\n self._deserialize_model(state['__model'])\n self._set_trained_model_attributes()\n del state['__model']\n if '_test_eval' in state:\n self._set_test_evals([state['_test_eval']])\n del state['_test_eval']\n if '_test_evals' in state:\n self._set_test_evals(state['_test_evals'])\n del state['_test_evals']\n for attr in ['_classes', '_feature_importance']:\n if attr in state:\n setattr(self, attr, state[attr])\n del state[attr]\n self._init_params.update(state)\n\n def __copy__(self):\n return self.__deepcopy__(None)\n\n def __deepcopy__(self, _):\n state = self.__getstate__()\n model = self.__class__()\n model.__setstate__(state)\n return model\n\n def copy(self):\n return self.__copy__()\n\n def is_fitted(self):\n return getattr(self, '_random_seed', None) is not None\n\n def _set_trained_model_attributes(self):\n setattr(self, '_random_seed', self._object._get_random_seed())\n setattr(self, '_learning_rate', self._object._get_learning_rate())\n setattr(self, '_tree_count', self._object._get_tree_count())\n\n def _train(self, train_pool, test_pool, params, allow_clear_pool):\n self._object._train(train_pool, test_pool, params, allow_clear_pool)\n self._set_trained_model_attributes()\n\n def _set_test_evals(self, test_evals):\n self._object._set_test_evals(test_evals)\n\n def get_test_eval(self):\n test_evals = self._object._get_test_evals()\n if len(test_evals) == 0:\n if self.is_fitted():\n raise CatboostError('The model was trained without eval set.')\n else:\n raise CatboostError('You should train the model first.')\n if len(test_evals) > 1:\n raise CatboostError(\"With multiple eval sets use 'get_test_evals()'\")\n test_eval = test_evals[0]\n return test_eval[0] if len(test_eval) == 1 else test_eval\n\n def get_test_evals(self):\n test_evals = self._object._get_test_evals()\n if len(test_evals) == 0:\n if self.is_fitted():\n raise CatboostError('The model was trained without eval set.')\n else:\n raise CatboostError('You should train the model first.')\n return test_evals\n\n def get_evals_result(self):\n return self._object._get_metrics_evals()\n\n def get_best_score(self):\n return self._object._get_best_score()\n\n def get_best_iteration(self):\n return self._object._get_best_iteration()\n\n def _get_float_feature_indices(self):\n return self._object._get_float_feature_indices()\n\n def _get_cat_feature_indices(self):\n return self._object._get_cat_feature_indices()\n\n def _base_predict(self, pool, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n return self._object._base_predict(pool, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _base_predict_multi(self, pool, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n return self._object._base_predict_multi(pool, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _staged_predict_iterator(self, pool, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose):\n return self._object._staged_predict_iterator(pool, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def _base_eval_metrics(self, pool, metrics_description, ntree_start, ntree_end, eval_period, thread_count, result_dir, tmp_dir):\n metrics_description_list = metrics_description if isinstance(metrics_description, list) else [metrics_description]\n return self._object._base_eval_metrics(pool, metrics_description_list, ntree_start, ntree_end, eval_period, thread_count, result_dir, tmp_dir)\n\n def _calc_fstr(self, fstr_type, pool, thread_count, verbose):\n \"\"\"returns (fstr_values, feature_ids).\"\"\"\n return self._object._calc_fstr(fstr_type.name, pool, thread_count, verbose)\n\n def _calc_ostr(self, train_pool, test_pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose):\n return self._object._calc_ostr(train_pool, test_pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose)\n\n def _base_shrink(self, ntree_start, ntree_end):\n self._object._base_shrink(ntree_start, ntree_end)\n self._set_trained_model_attributes()\n\n def _base_drop_unused_features(self):\n self._object._base_drop_unused_features()\n\n def _save_model(self, output_file, format, export_parameters, pool):\n import json\n if self.is_fitted():\n params_string = \"\"\n if export_parameters:\n params_string = json.dumps(export_parameters, cls=_NumpyAwareEncoder)\n\n self._object._save_model(output_file, format, params_string, pool)\n\n def _load_model(self, model_file, format):\n self._object._load_model(model_file, format)\n self._set_trained_model_attributes()\n for key, value in iteritems(self._get_params()):\n self._init_params[key] = value\n\n def _serialize_model(self):\n return self._object._serialize_model()\n\n def _deserialize_model(self, dump_model_str):\n self._object._deserialize_model(dump_model_str)\n\n def _sum_models(self, models_base, weights=None, ctr_merge_policy='IntersectingCountersAverage'):\n if weights is None:\n weights = [1.0 for _ in models_base]\n models_inner = [model._object for model in models_base]\n self._object._sum_models(models_inner, weights, ctr_merge_policy)\n setattr(self, '_random_seed', 0)\n setattr(self, '_learning_rate', 0)\n setattr(self, '_tree_count', self._object._get_tree_count())\n\n def _get_params(self):\n params = self._object._get_params()\n init_params = self._init_params.copy()\n for key, value in iteritems(init_params):\n if key not in params:\n params[key] = value\n return params\n\n def _is_classification_objective(self, loss_function):\n return isinstance(loss_function, str) and is_classification_objective(loss_function)\n\n def _is_regression_objective(self, loss_function):\n return isinstance(loss_function, str) and is_regression_objective(loss_function)\n\n def get_metadata(self):\n return self._object._get_metadata_wrapper()\n\n @property\n def metadata_(self):\n raise CatboostError(\"metadata_ property is not supported anymore, use get_metadata() method instead.\")\n\n @property\n def is_fitted_(self):\n raise CatboostError(\"is_fitted_ property is not supported anymore, use is_fitted() method instead.\")\n\n @property\n def tree_count_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_tree_count')\n\n @property\n def random_seed_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_random_seed')\n\n @property\n def learning_rate_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_learning_rate')\n\n @property\n def feature_names_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return self._object._get_feature_names()\n\n @property\n def evals_result_(self):\n return self.get_evals_result()\n\n @property\n def best_score_(self):\n return self.get_best_score()\n\n @property\n def best_iteration_(self):\n return self.get_best_iteration()\n\n\ndef _check_param_types(params):\n if not isinstance(params, (Mapping, MutableMapping)):\n raise CatboostError(\"Invalid params type={}: must be dict().\".format(type(params)))\n if 'ctr_description' in params:\n if not isinstance(params['ctr_description'], Sequence):\n raise CatboostError(\"Invalid ctr_description type={} : must be list of strings\".format(type(params['ctr_description'])))\n if 'custom_loss' in params:\n if isinstance(params['custom_loss'], STRING_TYPES):\n params['custom_loss'] = [params['custom_loss']]\n if not isinstance(params['custom_loss'], Sequence):\n raise CatboostError(\"Invalid `custom_loss` type={} : must be string or list of strings.\".format(type(params['custom_loss'])))\n if 'custom_metric' in params:\n if isinstance(params['custom_metric'], STRING_TYPES):\n params['custom_metric'] = [params['custom_metric']]\n if not isinstance(params['custom_metric'], Sequence):\n raise CatboostError(\"Invalid `custom_metric` type={} : must be string or list of strings.\".format(type(params['custom_metric'])))\n\n\ndef _params_type_cast(params):\n casted_params = {}\n for key, value in iteritems(params):\n value = _cast_to_base_types(value)\n casted_params[key] = value\n return casted_params\n\n\nclass CatBoost(_CatBoostBase):\n \"\"\"\n CatBoost model, that contains training, prediction and evaluation.\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"\n Initialize the CatBoost.\n\n Parameters\n ----------\n params : dict\n Parameters for CatBoost.\n If None, all params are set to their defaults.\n If dict, overriding parameters present in dict.\n \"\"\"\n super(CatBoost, self).__init__(params)\n\n def _fit(self, X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id,\n pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,\n column_description, verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval):\n\n params = deepcopy(self._init_params)\n if params is None:\n params = {}\n\n _process_synonyms(params)\n\n if 'cat_features' in params:\n if isinstance(X, Pool):\n if set(X.get_cat_feature_indices()) != set(params['cat_features']):\n raise CatboostError(\"categorical features in the model are set to \" + str(params['cat_features']) +\n \" and train dataset categorical features are set to \" +\n str(X.get_cat_feature_indices()))\n elif isinstance(X, FeaturesData):\n raise CatboostError(\"Categorical features are set in the model. It is not allowed to use FeaturesData type for training dataset.\")\n else:\n if cat_features is not None and set(cat_features) != set(params['cat_features']):\n raise CatboostError(\"categorical features in the model are set to \" + str(params['cat_features']) +\n \". categorical features passed to fit function are set to \" + str(cat_features))\n cat_features = params['cat_features']\n del params['cat_features']\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval, silent)\n\n if metric_period is not None:\n params['metric_period'] = metric_period\n if logging_level is not None:\n params['logging_level'] = logging_level\n if verbose is not None:\n params['verbose'] = verbose\n if use_best_model is not None:\n params['use_best_model'] = use_best_model\n\n if early_stopping_rounds is not None:\n params['od_type'] = 'Iter'\n params['od_wait'] = early_stopping_rounds\n if 'od_pval' in params:\n del params['od_pval']\n\n if save_snapshot is not None:\n params['save_snapshot'] = save_snapshot\n\n if snapshot_file is not None:\n params['snapshot_file'] = snapshot_file\n\n if snapshot_interval is not None:\n params['snapshot_interval'] = snapshot_interval\n\n _check_param_types(params)\n params = _params_type_cast(params)\n _check_train_params(params)\n\n train_pool = _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)\n if train_pool.is_empty_:\n raise CatboostError(\"X is empty.\")\n\n allow_clear_pool = not isinstance(X, Pool)\n\n eval_set_list = eval_set if isinstance(eval_set, list) else [eval_set]\n eval_sets = []\n eval_total_row_count = 0\n for eval_set in eval_set_list:\n if isinstance(eval_set, Pool):\n eval_sets.append(eval_set)\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in Pool\")\n elif isinstance(eval_set, STRING_TYPES):\n eval_sets.append(Pool(eval_set, column_description=column_description))\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in file {}\".format(eval_set))\n elif isinstance(eval_set, tuple):\n if len(eval_set) != 2:\n raise CatboostError(\"Invalid shape of 'eval_set': {}, must be (X, y).\".format(str(tuple(type(_) for _ in eval_set))))\n eval_sets.append(Pool(eval_set[0], eval_set[1], cat_features=train_pool.get_cat_feature_indices()))\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in tuple\")\n elif eval_set is None:\n if len(eval_set_list) > 1:\n raise CatboostError(\"Multiple eval set shall not contain None\")\n else:\n raise CatboostError(\"Invalid type of 'eval_set': {}, while expected Pool or (X, y) or filename, or list thereof.\".format(type(eval_set)))\n\n if self.get_param('use_best_model') and eval_total_row_count == 0:\n raise CatboostError(\"To employ param {'use_best_model': True} provide non-empty 'eval_set'.\")\n\n with log_fixup(), plot_wrapper(plot, self.get_params()):\n self._train(train_pool, eval_sets, params, allow_clear_pool)\n\n if (not self._object._has_leaf_weights_in_model()) and allow_clear_pool:\n train_pool = _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)\n setattr(\n self,\n \"_feature_importance\",\n self.get_feature_importance(train_pool, EFstrType.FeatureImportance)\n )\n\n if 'loss_function' in params and self._is_classification_objective(params['loss_function']):\n setattr(self, \"_classes\", np.unique(train_pool.get_label()))\n return self\n\n def fit(self, X, y=None, cat_features=None, pairs=None, sample_weight=None, group_id=None,\n group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoost model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or catboost.FeaturesData\n or string.\n If not catboost.Pool or catboost.FeaturesData it must be 2 dimensional Feature matrix\n or string - file with dataset.\n\n Must be non-empty (contain > 0 objects)\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool and not catboost.FeaturesData\n\n pairs : list or numpy.array or pandas.DataFrame\n The pairs description.\n If list or numpy.arrays or pandas.DataFrame, giving 2 dimensional.\n The shape should be Nx2, where N is the pairs' count. The first element of pair is\n the index of winner object in training set. The second element of pair is\n the index of loser object in training set.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n group_id : list or numpy.array, optional (default=None)\n group id for each instance.\n If not None, giving 1 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n group_weight : list or numpy.array, optional (default=None)\n Group weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n subgroup_id : list or numpy.array, optional (default=None)\n subgroup id for each instance.\n If not None, giving 1 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n pairs_weight : list or numpy.array, optional (default=None)\n Weight for each pair.\n If not None, giving 1 dimensional array like pairs.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool, or list of catboost.Pool, or list of (X, y) tuples, optional (default=None)\n Used as a validation set for early-stopping.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait parameter set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n return self._fit(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id,\n pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,\n column_description, verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval)\n\n def _predict(self, data, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n verbose = verbose or self.get_param('verbose')\n if verbose is None:\n verbose = False\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n if data is None:\n raise CatboostError(\"Data to predict must be initialized\")\n is_data_single_object = False\n if not isinstance(data, Pool):\n if isinstance(data, ARRAY_TYPES):\n if not isinstance(data[0], ARRAY_TYPES):\n data = [data]\n is_data_single_object = True\n data = Pool(\n data=data,\n cat_features=self._get_cat_feature_indices() if not isinstance(data, FeaturesData) else None\n )\n if not isinstance(prediction_type, STRING_TYPES):\n raise CatboostError(\"Invalid prediction_type type={}: must be str().\".format(type(prediction_type)))\n if prediction_type not in ('Class', 'RawFormulaVal', 'Probability'):\n raise CatboostError(\"Invalid value of prediction_type={}: must be Class, RawFormulaVal or Probability.\".format(prediction_type))\n loss_function_type = self.get_param('loss_function')\n if loss_function_type is None:\n loss_function_type = self.get_param('objective')\n # TODO(kirillovs): very bad solution. user should be able to use custom multiclass losses\n if loss_function_type is not None and (loss_function_type == 'MultiClass' or loss_function_type == 'MultiClassOneVsAll'):\n return np.transpose(self._base_predict_multi(data, prediction_type, ntree_start, ntree_end, thread_count, verbose))\n predictions = np.array(self._base_predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose))\n if prediction_type == 'Probability':\n predictions = np.transpose([1 - predictions, predictions])\n if is_data_single_object:\n return predictions[0]\n return predictions\n\n def predict(self, data, prediction_type='RawFormulaVal', ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n or catboost.FeaturesData or single object\n Data to predict.\n\n prediction_type : string, optional (default='RawFormulaVal')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool, optional (default=False)\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single answer object prediction for single object\n \"\"\"\n return self._predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _staged_predict(self, data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose):\n verbose = verbose or self.get_param('verbose')\n if verbose is None:\n verbose = False\n if not self.is_fitted() or self.tree_count_ is None:\n raise CatboostError(\"There is no trained model to use staged_predict(). Use fit() to train model. Then use staged_predict().\")\n if data is None:\n raise CatboostError(\"Data to predict must be initialized\")\n is_data_single_object = False\n if not isinstance(data, Pool):\n if isinstance(data, ARRAY_TYPES):\n if not isinstance(data[0], ARRAY_TYPES):\n data = [data]\n is_data_single_object = True\n data = Pool(\n data=data,\n cat_features=self._get_cat_feature_indices() if not isinstance(data, FeaturesData) else None\n )\n if not isinstance(prediction_type, STRING_TYPES):\n raise CatboostError(\"Invalid prediction_type type={}: must be str().\".format(type(prediction_type)))\n if prediction_type not in ('Class', 'RawFormulaVal', 'Probability'):\n raise CatboostError(\"Invalid value of prediction_type={}: must be Class, RawFormulaVal or Probability.\".format(prediction_type))\n if ntree_end == 0:\n ntree_end = self.tree_count_\n staged_predict_iterator = self._staged_predict_iterator(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n loss_function = self.get_param('loss_function')\n if loss_function is None:\n loss_function = self.get_param('objective')\n while True:\n predictions = staged_predict_iterator.next()\n if loss_function is not None and (loss_function == 'MultiClass' or loss_function == 'MultiClassOneVsAll'):\n predictions = np.transpose(predictions)\n else:\n predictions = np.array(predictions[0])\n if prediction_type == 'Probability':\n predictions = np.transpose([1 - predictions, predictions])\n if is_data_single_object:\n predictions = predictions[0]\n yield predictions\n\n def staged_predict(self, data, prediction_type='RawFormulaVal', ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='RawFormulaVal')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def get_cat_feature_indices(self):\n if not self.is_fitted():\n raise CatboostError(\"Model is not fitted\")\n return self._get_cat_feature_indices()\n\n def _eval_metrics(self, data, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, plot):\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n if not isinstance(data, Pool):\n raise CatboostError(\"Invalid data type={}, must be catboost.Pool.\".format(type(data)))\n if data.is_empty_:\n raise CatboostError(\"Data is empty.\")\n if not isinstance(metrics, ARRAY_TYPES) and not isinstance(metrics, STRING_TYPES):\n raise CatboostError(\"Invalid metrics type={}, must be list() or str().\".format(type(metrics)))\n if not all(map(lambda metric: isinstance(metric, string_types), metrics)):\n raise CatboostError(\"Invalid metric type: must be string().\")\n if tmp_dir is None:\n tmp_dir = tempfile.mkdtemp()\n\n with log_fixup(), plot_wrapper(plot, self.get_params()):\n metrics_score, metric_names = self._base_eval_metrics(data, metrics, ntree_start, ntree_end, eval_period, thread_count, _get_train_dir(self.get_params()), tmp_dir)\n\n return dict(zip(metric_names, metrics_score))\n\n def eval_metrics(self, data, metrics, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, tmp_dir=None, plot=False):\n \"\"\"\n Calculate metrics.\n\n Parameters\n ----------\n data : catboost.Pool\n Data to eval metrics.\n\n metrics : list of strings\n List of eval metrics.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n tmp_dir : string (default=None)\n The name of the temporary directory for intermediate results.\n If None, then the name will be generated.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n Returns\n -------\n prediction : dict: metric -> array of shape [(ntree_end - ntree_start) / eval_period]\n \"\"\"\n return self._eval_metrics(data, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, plot)\n\n def create_metric_calcer(self, metrics, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, tmp_dir=None):\n \"\"\"\n Create batch metric calcer. Could be used to aggregate metric on several pools\n Parameters\n ----------\n Same as in eval_metrics except data\n Returns\n -------\n BatchMetricCalcer object\n\n Usage example\n -------\n # Large dataset is partitioned into parts [part1, part2]\n model.fit(params)\n batch_calcer = model.create_metric_calcer(['Logloss'])\n batch_calcer.add_pool(part1)\n batch_calcer.add_pool(part2)\n metrics = batch_calcer.eval_metrics()\n \"\"\"\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n return BatchMetricCalcer(self._object, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir)\n\n @property\n def feature_importances_(self):\n feature_importances_ = getattr(self, \"_feature_importance\", None)\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use `feature_importances_`. Use fit() to train model. Then use `feature_importances_`.\")\n return np.array(feature_importances_)\n\n def get_feature_importance(self, data=None, fstr_type=EFstrType.FeatureImportance, prettified=False, thread_count=-1, verbose=False):\n \"\"\"\n Parameters\n ----------\n data : catboost.Pool or None\n Data to get feature importance.\n If type == Shap data is a dataset. For every object in this dataset feature importances will be calculated.\n If type == 'FeatureImportance', data is None or train dataset (in case if model was explicitly trained with flag store no leaf weights).\n\n fstr_type : EFStrType or string (deprecated, converted to EFstrType), optional\n (default=EFstrType.FeatureImportance)\n Possible values:\n - FeatureImportance\n Calculate score for every feature.\n - ShapValues\n Calculate SHAP Values for every object.\n - Interaction\n Calculate pairwise score between every feature.\n\n prettified : bool, optional (default=False)\n used only for FeatureImportance fstr_type\n change returned data format to the list of (feature_id, importance) pairs sorted by importance\n\n thread_count : int, optional (default=-1)\n Number of threads.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool or int\n If False, then evaluation is not logged. If True, then each possible iteration is logged.\n If a positive integer, then it stands for the size of batch N. After processing each batch, print progress\n and remaining time.\n\n\n Returns\n -------\n depends on fstr_type:\n - FeatureImportance with prettified=False (default)\n list of length [n_features] with feature_importance values (float) for feature\n - FeatureImportance with prettified=True\n list of length [n_features] with (feature_id (string), feature_importance (float)) pairs, sorted by feature_importance in descending order\n - ShapValues\n np.array of shape (n_objects, n_features + 1) with Shap values (float) for (object, feature).\n In case of multiclass the returned value is np.array of shape\n (n_objects, classes_count, n_features + 1). For each object it contains Shap values (float).\n Values are calculated for RawFormulaVal predictions.\n - Interaction\n list of length [n_features] of 3-element lists of (first_feature_index, second_feature_index, interaction_score (float))\n \"\"\"\n\n if not isinstance(verbose, bool) and not isinstance(verbose, int):\n raise CatboostError('verbose should be bool or int.')\n verbose = int(verbose)\n if verbose < 0:\n raise CatboostError('verbose should be non-negative.')\n\n fstr_type = enum_from_enum_or_str(EFstrType, fstr_type)\n empty_data_is_ok = (((fstr_type == EFstrType.FeatureImportance) and self._object._has_leaf_weights_in_model())\n or (fstr_type == EFstrType.Interaction))\n if not empty_data_is_ok:\n if not isinstance(data, Pool):\n raise CatboostError(\"Invalid metric type={}, must be catboost.Pool.\".format(type(data)))\n if data.is_empty_:\n raise CatboostError(\"data is empty.\")\n\n with log_fixup():\n fstr, feature_names = self._calc_fstr(fstr_type, data, thread_count, verbose)\n if fstr_type == EFstrType.FeatureImportance:\n feature_importances = [value[0] for value in fstr]\n if prettified:\n return sorted(zip(feature_names, feature_importances), key=itemgetter(1), reverse=True)\n else:\n return feature_importances\n if fstr_type == EFstrType.ShapValues:\n if isinstance(fstr[0][0], ARRAY_TYPES):\n return np.array([np.array([np.array([\n value for value in dimension]) for dimension in doc]) for doc in fstr])\n else:\n return np.array([np.array([value for value in doc]) for doc in fstr])\n elif fstr_type == EFstrType.Interaction:\n return [[int(row[0]), int(row[1]), row[2]] for row in fstr]\n\n def get_object_importance(self, pool, train_pool, top_size=-1, ostr_type='Average', update_method='SinglePoint', importance_values_sign='All', thread_count=-1, verbose=False):\n \"\"\"\n This is the implementation of the LeafInfluence algorithm from the following paper:\n https://arxiv.org/pdf/1802.06640.pdf\n\n Parameters\n ----------\n pool : Pool\n The pool for which you want to evaluate the object importances.\n\n train_pool : Pool\n The pool on which the model was trained.\n\n top_size : int (default=-1)\n Method returns the result of the top_size most important train objects.\n If -1, then the top size is not limited.\n\n ostr_type : string, optional (default='Average')\n Possible values:\n - Average (Method returns the mean train objects scores for all input objects)\n - PerObject (Method returns the train objects scores for every input object)\n\n importance_values_sign : string, optional (default='All')\n Method returns only Positive, Negative or All values.\n Possible values:\n - Positive\n - Negative\n - All\n\n update_method : string, optional (default='SinglePoint')\n Possible values:\n - SinglePoint\n - TopKLeaves (It is posible to set top size : TopKLeaves:top=2)\n - AllPoints\n Description of the update set methods are given in section 3.1.3 of the paper.\n\n thread_count : int, optional (default=-1)\n Number of threads.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool or int\n If False, then evaluation is not logged. If True, then each possible iteration is logged.\n If a positive integer, then it stands for the size of batch N. After processing each batch, print progress\n and remaining time.\n\n Returns\n -------\n object_importances : tuple of two arrays (indices and scores) of shape = [top_size]\n \"\"\"\n\n if not isinstance(verbose, bool) and not isinstance(verbose, int):\n raise CatboostError('verbose should be bool or int.')\n verbose = int(verbose)\n if verbose < 0:\n raise CatboostError('verbose should be non-negative.')\n\n with log_fixup():\n result = self._calc_ostr(train_pool, pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose)\n return result\n\n def shrink(self, ntree_end, ntree_start=0):\n \"\"\"\n Shrink the model.\n\n Parameters\n ----------\n ntree_end: int\n Leave the trees with indices from the interval [ntree_start, ntree_end) (zero-based indexing).\n ntree_start: int, optional (default=0)\n Leave the trees with indices from the interval [ntree_start, ntree_end) (zero-based indexing).\n \"\"\"\n if ntree_start > ntree_end:\n raise CatboostError(\"ntree_start should be less than ntree_end.\")\n self._base_shrink(ntree_start, ntree_end)\n\n def drop_unused_features(self):\n \"\"\"\n Drop unused features information from model\n \"\"\"\n self._base_drop_unused_features()\n\n def save_model(self, fname, format=\"cbm\", export_parameters=None, pool=None):\n \"\"\"\n Save the model to a file.\n\n Parameters\n ----------\n fname : string\n Output file name.\n format : string\n Either 'cbm' for catboost binary format, or 'coreml' to export into Apple CoreML format, or 'cpp' to export as C++ code, or 'python' to export as Python code.\n export_parameters : dict\n Parameters for CoreML export:\n * prediction_type : string - either 'probability' or 'raw'\n * coreml_description : string\n * coreml_model_version : string\n * coreml_model_author : string\n * coreml_model_license: string\n pool : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or catboost.FeaturesData\n Training pool.\n \"\"\"\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use save_model(). Use fit() to train model. Then use save_model().\")\n if not isinstance(fname, STRING_TYPES):\n raise CatboostError(\"Invalid fname type={}: must be str().\".format(type(fname)))\n if pool is not None and not isinstance(pool, Pool):\n pool = Pool(\n data=pool,\n cat_features=self._get_cat_feature_indices() if not isinstance(pool, FeaturesData) else None\n )\n self._save_model(fname, format, export_parameters, pool)\n\n def load_model(self, fname, format='catboost'):\n \"\"\"\n Load model from a file.\n\n Parameters\n ----------\n fname : string\n Input file name.\n \"\"\"\n if not isinstance(fname, STRING_TYPES):\n raise CatboostError(\"Invalid fname type={}: must be str().\".format(type(fname)))\n self._load_model(fname, format)\n return self\n\n def get_param(self, key):\n \"\"\"\n Get param value from CatBoost model.\n\n Parameters\n ----------\n key : string\n The key to get param value from.\n\n Returns\n -------\n value :\n The param value of the key, returns None if param do not exist.\n \"\"\"\n params = self.get_params()\n if params is None:\n return {}\n return params.get(key)\n\n def get_params(self, deep=True):\n \"\"\"\n Get all params from CatBoost model.\n\n Returns\n -------\n result : dict\n Dictionary of {param_key: param_value}.\n \"\"\"\n params = self._init_params.copy()\n if deep:\n return deepcopy(params)\n else:\n return params\n\n def set_params(self, **params):\n \"\"\"\n Set parameters into CatBoost model.\n\n Parameters\n ----------\n **params : key=value format\n List of key=value paris. Example: model.set_params(iterations=500, thread_count=2).\n \"\"\"\n for key, value in iteritems(params):\n self._init_params[key] = value\n return self\n\n\nclass CatBoostClassifier(CatBoost):\n \"\"\"\n Implementation of the scikit-learn API for CatBoost classification.\n\n Parameters\n ----------\n iterations : int, [default=500]\n Max count of trees.\n range: [1,+inf]\n learning_rate : float, [default=0.03]\n Step size shrinkage used in update to prevents overfitting.\n range: (0,1]\n depth : int, [default=6]\n Depth of a tree. All trees are the same depth.\n range: [1,+inf]\n l2_leaf_reg : int, [default=3]\n L2 regularization term on weights.\n range: [0,+inf]\n model_size_reg : float, [default=None]\n Model size regularization coefficient.\n range: [0,+inf]\n rsm : float, [default=None]\n Subsample ratio of columns when constructing each tree.\n range: (0,1]\n loss_function : string or object, [default='Logloss']\n The metric to use in training and also selector of the machine learning\n problem to solve. If string, then the name of a supported metric,\n optionally suffixed with parameter description.\n If object, it shall provide methods 'calc_ders_range' or 'calc_ders_multi'.\n border_count : int, [default=32]\n The number of partitions for Num features. Used in the preliminary calculation.\n range: (0,+inf]\n feature_border_type : string, [default='MinEntropy']\n Type of binarization target. Used only in Reggression tasks.\n Possible values:\n - 'Median'\n - 'UniformAndQuantiles'\n - 'GreedyLogSum'\n - 'MaxLogSum'\n - 'MinEntropy'\n fold_permutation_block_size : int, [default=1]\n To accelerate the learning.\n The recommended value is within [1, 256]. On small samples, must be set to 1.\n range: [1,+inf]\n od_pval : float, [default=None]\n Use overfitting detector to stop training when reaching a specified threshold.\n Can be used only with eval_set.\n range: [0,1]\n od_wait : int, [default=None]\n Number of iterations which overfitting detector will wait after new best error.\n od_type : string, [default=None]\n Type of overfitting detector which will be used in program.\n Posible values:\n - 'IncToDec'\n - 'Iter'\n For 'Iter' type od_pval must not be set.\n If None, then od_type=IncToDec.\n nan_mode : string, [default=None]\n Way to process nan-values.\n Possible values:\n - 'Forbidden' - raises an exception if there is nan value in dataset.\n - 'Min' - each nan float feature will be processed as minimum value from dataset.\n - 'Max' - each nan float feature will be processed as maximum value from dataset.\n If None, then nan_mode=Min.\n counter_calc_method : string, [default=None]\n The method used to calculate counters for dataset with Counter type.\n Possible values:\n - 'PrefixTest' - only objects up to current in the test dataset are considered\n - 'FullTest' - all objects are considered in the test dataset\n - 'SkipTest' - Objects from test dataset are not considered\n - 'Full' - all objects are considered for both learn and test dataset\n If None, then counter_calc_method=PrefixTest.\n leaf_estimation_iterations : int, [default=None]\n The number of steps in the gradient when calculating the values in the leaves.\n If None, then leaf_estimation_iterations=1.\n range: [1,+inf]\n leaf_estimation_method : string, [default='Gradient']\n The method used to calculate the values in the leaves.\n Possible values:\n - 'Newton'\n - 'Gradient'\n thread_count : int, [default=None]\n Number of parallel threads used to run CatBoost.\n If None, then the number of thread is set to the number of cores.\n range: [1,+inf]\n random_seed : int, [default=None]\n Random number seed.\n If None, used random number.\n range: [0,+inf]\n use_best_model : bool, [default=None]\n To limit the number of trees in predict() using information about the optimal value of the error function.\n Can be used only with eval_set.\n best_model_min_trees : int, [default=None]\n The minimal number of trees the best model should have.\n verbose: bool\n When set to True, logging_level is set to 'Verbose'.\n When set to False, logging_level is set to 'Silent'.\n silent: bool, synonym for verbose\n logging_level : string, [default='Verbose']\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n metric_period : int, [default=1]\n The frequency of iterations to print the information to stdout. The value should be a positive integer.\n simple_ctr: list of strings, [default=None]\n Binarization settings for categorical features.\n Format : see documentation\n Example: ['Borders:CtrBorderCount=5:Prior=0:Prior=0.5', 'BinarizedTargetMeanValue:TargetBorderCount=10:TargetBorderType=MinEntropy', ...]\n CTR types:\n CPU and GPU\n - 'Borders'\n - 'Buckets'\n CPU only\n - 'BinarizedTargetMeanValue'\n - 'Counter'\n GPU only\n - 'FloatTargetMeanValue'\n - 'FeatureFreq'\n Number_of_borders, binarization type, target borders and binarizations, priors are optional parametrs\n combinations_ctr: list of strings, [default=None]\n per_feature_ctr: list of strings, [default=None]\n ctr_leaf_count_limit : int, [default=None]\n The maximum number of leaves with categorical features.\n If the number of leaves exceeds the specified limit, some leaves are discarded.\n The leaves to be discarded are selected as follows:\n - The leaves are sorted by the frequency of the values.\n - The top N leaves are selected, where N is the value specified in the parameter.\n - All leaves starting from N+1 are discarded.\n This option reduces the resulting model size\n and the amount of memory required for training.\n Note that the resulting quality of the model can be affected.\n range: [1,+inf] (for zero limit use ignored_features)\n store_all_simple_ctr : bool, [default=None]\n Ignore categorical features, which are not used in feature combinations,\n when choosing candidates for exclusion.\n Use this parameter with ctr_leaf_count_limit only.\n max_ctr_complexity : int, [default=4]\n The maximum number of Categ features that can be combined.\n range: [0,+inf]\n has_time : bool, [default=False]\n To use the order in which objects are represented in the input data\n (do not perform a random permutation on the stages of converting\n the Categ features to Num and the choice of a tree structure).\n allow_const_label : bool, [default=False]\n To allow the constant label value in dataset.\n classes_count : int, [default=None]\n The upper limit for the numeric class label.\n Defines the number of classes for multiclassification.\n Only non-negative integers can be specified.\n The given integer should be greater than any of the target values.\n If this parameter is specified the labels for all classes in the input dataset\n should be smaller than the given value.\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n class_weights : list of floats, [default=None]\n Classes weights. The values are used as multipliers for the object weights.\n If None, all classes are supposed to have weight one.\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n class_names: list of strings, [default=None]\n Class names. Allows to redefine the default values for class labels (integer numbers).\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n one_hot_max_size : int, [default=None]\n Convert the feature to float\n if the number of different values that it takes exceeds the specified value.\n Ctrs are not calculated for such features.\n random_strength : float, [default=1]\n Score standard deviation multiplier.\n name : string, [default='experiment']\n The name that should be displayed in the visualization tools.\n ignored_features : list, [default=None]\n Indices of features that should be excluded when training.\n train_dir : string, [default=None]\n The directory in which you want to record generated in the process of learning files.\n custom_metric : string or list of strings, [default=None]\n To use your own metric function.\n custom_loss: alias to custom_metric\n eval_metric : string or object, [default=None]\n To optimize your custom metric in loss.\n bagging_temperature : float, [default=None]\n Controls intensity of Bayesian bagging. The higher the temperature the more aggressive bagging is.\n Typical values are in range [0, 1] (0 - no bagging, 1 - default).\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n fold_len_multiplier : float, [default=None]\n Fold length multiplier. Should be greater than 1\n used_ram_limit : string or number, [default=None]\n Set a limit on memory consumption (value like '1.2gb' or 1.2e9).\n WARNING: Currently this option affects CTR memory usage only.\n gpu_ram_part : float, [default=0.95]\n Fraction of the GPU RAM to use for training, a value from (0, 1].\n pinned_memory_size: int [default=None]\n Size of additional CPU pinned memory used for GPU learning,\n usually is estimated automatically, thus usually should not be set.\n allow_writing_files : bool, [default=True]\n If this flag is set to False, no files with different diagnostic info will be created during training.\n With this flag no snapshotting can be done. Plus visualisation will not\n work, because visualisation uses files that are created and updated during training.\n final_ctr_computation_mode : string, [default='Default']\n Possible values:\n - 'Default' - Compute final ctrs for all pools.\n - 'Skip' - Skip final ctr computation. WARNING: model without ctrs can't be applied.\n approx_on_full_history : bool, [default=False]\n If this flag is set to True, each approximated value is calculated using all the preceeding rows in the fold (slower, more accurate).\n If this flag is set to False, each approximated value is calculated using only the beginning 1/fold_len_multiplier fraction of the fold (faster, slightly less accurate).\n boosting_type : string, default value depends on object count and feature count in train dataset and on learning mode.\n Boosting scheme.\n Possible values:\n - 'Ordered' - Gives better quality, but may slow down the training.\n - 'Plain' - The classic gradient boosting scheme. May result in quality degradation, but does not slow down the training.\n task_type : string, [default=None]\n The calcer type used to train the model.\n Possible values:\n - 'CPU'\n - 'GPU'\n device_config : string, [default=None], deprecated, use devices instead\n devices : list or string, [default=None], GPU devices to use.\n String format is: '0' for 1 device or '0:1:3' for multiple devices or '0-3' for range of devices.\n List format is : [0] for 1 device or [0,1,3] for multiple devices.\n\n bootstrap_type : string, Bayesian, Bernoulli, Poisson.\n Default bootstrap is Bayesian.\n Poisson bootstrap is supported only on GPU.\n\n subsample : float, [default=None]\n Sample rate for bagging. This parameter can be used Poisson or Bernoully bootstrap types.\n\n dev_score_calc_obj_block_size: int, [default=5000000]\n CPU only. Size of block of samples in score calculation. Should be > 0\n Used only for learning speed tuning.\n Changing this parameter can affect results due to numerical accuracy differences\n\n max_depth : int, Synonym for depth.\n\n n_estimators : int, synonym for iterations.\n\n num_trees : int, synonym for iterations.\n\n num_boost_round : int, synonym for iterations.\n\n colsample_bylevel : float, synonym for rsm.\n\n random_state : int, synonym for random_seed.\n\n reg_lambda : float, synonym for l2_leaf_reg.\n\n objective : string, synonym for loss_function.\n\n eta : float, synonym for learning_rate.\n\n max_bin : float, synonym for border_count.\n\n scale_pos_weight : float, synonym for class_weights.\n Can be used only for binary classification. Sets weight multiplier for\n class 1 to scale_pos_weight value.\n\n metadata : dict, string to string key-value pairs to be stored in model metadata storage\n\n early_stopping_rounds : int\n Synonym for od_wait. Only one of these parameters should be set.\n\n cat_features : list of numpy.array of integer feature indices.\n \"\"\"\n def __init__(\n self,\n iterations=None,\n learning_rate=None,\n depth=None,\n l2_leaf_reg=None,\n model_size_reg=None,\n rsm=None,\n loss_function='Logloss',\n border_count=None,\n feature_border_type=None,\n fold_permutation_block_size=None,\n od_pval=None,\n od_wait=None,\n od_type=None,\n nan_mode=None,\n counter_calc_method=None,\n leaf_estimation_iterations=None,\n leaf_estimation_method=None,\n thread_count=None,\n random_seed=None,\n use_best_model=None,\n best_model_min_trees=None,\n verbose=None,\n silent=None,\n logging_level=None,\n metric_period=None,\n ctr_leaf_count_limit=None,\n store_all_simple_ctr=None,\n max_ctr_complexity=None,\n has_time=None,\n allow_const_label=None,\n classes_count=None,\n class_weights=None,\n class_names=None,\n one_hot_max_size=None,\n random_strength=None,\n name=None,\n ignored_features=None,\n train_dir=None,\n custom_loss=None,\n custom_metric=None,\n eval_metric=None,\n bagging_temperature=None,\n save_snapshot=None,\n snapshot_file=None,\n snapshot_interval=None,\n fold_len_multiplier=None,\n used_ram_limit=None,\n gpu_ram_part=None,\n pinned_memory_size=None,\n allow_writing_files=None,\n final_ctr_computation_mode=None,\n approx_on_full_history=None,\n boosting_type=None,\n simple_ctr=None,\n combinations_ctr=None,\n per_feature_ctr=None,\n ctr_description=None,\n task_type=None,\n device_config=None,\n devices=None,\n bootstrap_type=None,\n subsample=None,\n dev_score_calc_obj_block_size=None,\n max_depth=None,\n n_estimators=None,\n num_boost_round=None,\n num_trees=None,\n colsample_bylevel=None,\n random_state=None,\n reg_lambda=None,\n objective=None,\n eta=None,\n max_bin=None,\n scale_pos_weight=None,\n gpu_cat_features_storage=None,\n data_partition=None,\n metadata=None,\n early_stopping_rounds=None,\n cat_features=None\n ):\n params = {}\n not_params = [\"not_params\", \"self\", \"params\", \"__class__\"]\n for key, value in iteritems(locals().copy()):\n if key not in not_params and value is not None:\n params[key] = value\n\n super(CatBoostClassifier, self).__init__(params)\n\n @property\n def classes_(self):\n return getattr(self, \"_classes\", None)\n\n def fit(self, X, y=None, cat_features=None, sample_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoostClassifier model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n If not catboost.Pool, 2 dimensional Feature matrix or string - file with dataset.\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool or list, optional (default=None)\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n\n params = self._init_params.copy()\n _process_synonyms(params)\n if 'loss_function' in params:\n self._check_is_classification_objective(params['loss_function'])\n\n self._fit(X, y, cat_features, None, sample_weight, None, None, None, None, baseline, use_best_model,\n eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period,\n silent, early_stopping_rounds, save_snapshot, snapshot_file, snapshot_interval)\n return self\n\n def predict(self, data, prediction_type='Class', ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='Class')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool, optional (default=False)\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def predict_proba(self, data, ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict class probability with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, 'Probability', ntree_start, ntree_end, thread_count, verbose)\n\n def staged_predict(self, data, prediction_type='Class', ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='Class')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def staged_predict_proba(self, data, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict classification target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, 'Probability', ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def score(self, X, y=None):\n \"\"\"\n Calculate accuracy.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n y : list or numpy.array\n True labels.\n\n Returns\n -------\n accuracy : float\n \"\"\"\n if isinstance(X, Pool):\n if X.get_label() is None:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n y = X.get_label()\n elif y is None:\n raise CatboostError(\"y should be specified.\")\n correct = []\n y = np.array(y, dtype=np.int32)\n for i, val in enumerate(self.predict(X)):\n correct.append(1 * (y[i] == np.int32(val)))\n return np.mean(correct)\n\n def _check_is_classification_objective(self, loss_function):\n if isinstance(loss_function, str) and not self._is_classification_objective(loss_function):\n raise CatboostError(\"Invalid loss_function='{}': for classifier use \"\n \"Logloss, CrossEntropy, MultiClass, MultiClassOneVsAll or custom objective object\".format(loss_function))\n\n\nclass CatBoostRegressor(CatBoost):\n \"\"\"\n Implementation of the scikit-learn API for CatBoost regression.\n\n Parameters\n ----------\n Like in CatBoostClassifier, except loss_function, classes_count, class_names and class_weights\n\n loss_function : string, [default='RMSE']\n 'RMSE'\n 'MAE'\n 'Quantile:alpha=value'\n 'LogLinQuantile:alpha=value'\n 'Poisson'\n 'MAPE'\n 'Lq:q=value'\n \"\"\"\n def __init__(\n self,\n iterations=None,\n learning_rate=None,\n depth=None,\n l2_leaf_reg=None,\n model_size_reg=None,\n rsm=None,\n loss_function='RMSE',\n border_count=None,\n feature_border_type=None,\n fold_permutation_block_size=None,\n od_pval=None,\n od_wait=None,\n od_type=None,\n nan_mode=None,\n counter_calc_method=None,\n leaf_estimation_iterations=None,\n leaf_estimation_method=None,\n thread_count=None,\n random_seed=None,\n use_best_model=None,\n best_model_min_trees=None,\n verbose=None,\n silent=None,\n logging_level=None,\n metric_period=None,\n ctr_leaf_count_limit=None,\n store_all_simple_ctr=None,\n max_ctr_complexity=None,\n has_time=None,\n allow_const_label=None,\n one_hot_max_size=None,\n random_strength=None,\n name=None,\n ignored_features=None,\n train_dir=None,\n custom_metric=None,\n eval_metric=None,\n bagging_temperature=None,\n save_snapshot=None,\n snapshot_file=None,\n snapshot_interval=None,\n fold_len_multiplier=None,\n used_ram_limit=None,\n gpu_ram_part=None,\n pinned_memory_size=None,\n allow_writing_files=None,\n final_ctr_computation_mode=None,\n approx_on_full_history=None,\n boosting_type=None,\n simple_ctr=None,\n combinations_ctr=None,\n per_feature_ctr=None,\n ctr_description=None,\n task_type=None,\n device_config=None,\n devices=None,\n bootstrap_type=None,\n subsample=None,\n dev_score_calc_obj_block_size=None,\n max_depth=None,\n n_estimators=None,\n num_boost_round=None,\n num_trees=None,\n colsample_bylevel=None,\n random_state=None,\n reg_lambda=None,\n objective=None,\n eta=None,\n max_bin=None,\n gpu_cat_features_storage=None,\n data_partition=None,\n metadata=None,\n early_stopping_rounds=None,\n cat_features=None\n ):\n params = {}\n not_params = [\"not_params\", \"self\", \"params\", \"__class__\"]\n for key, value in iteritems(locals().copy()):\n if key not in not_params and value is not None:\n params[key] = value\n\n super(CatBoostRegressor, self).__init__(params)\n\n def fit(self, X, y=None, cat_features=None, sample_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoost model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n If not catboost.Pool, 2 dimensional Feature matrix or string - file with dataset.\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool or list, optional (default=None)\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n\n params = deepcopy(self._init_params)\n _process_synonyms(params)\n if 'loss_function' in params:\n self._check_is_regressor_loss(params['loss_function'])\n\n return self._fit(X, y, cat_features, None, sample_weight, None, None, None, None, baseline,\n use_best_model, eval_set, verbose, logging_level, plot, column_description,\n verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval)\n\n def predict(self, data, ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, \"RawFormulaVal\", ntree_start, ntree_end, thread_count, verbose)\n\n def staged_predict(self, data, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, \"RawFormulaVal\", ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def score(self, X, y=None):\n \"\"\"\n Calculate RMSE.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n y : list or numpy.array\n True labels.\n\n Returns\n -------\n RMSE : float\n \"\"\"\n if isinstance(X, Pool):\n if X.get_label() is None:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n y = X.get_label()\n elif y is None:\n raise CatboostError(\"y should be specified.\")\n error = []\n y = np.array(y, dtype=np.float64)\n for i, val in enumerate(self.predict(X)):\n error.append(pow(y[i] - val, 2))\n return np.sqrt(np.mean(error))\n\n def _check_is_regressor_loss(self, loss_function):\n if isinstance(loss_function, str) and not self._is_regression_objective(loss_function):\n raise CatboostError(\"Invalid loss_function='{}': for regressor use \"\n \"RMSE, MAE, Quantile, LogLinQuantile, Poisson, MAPE, Lq or custom objective object\".format(loss_function))\n\n\ndef train(pool=None, params=None, dtrain=None, logging_level=None, verbose=None, iterations=None,\n num_boost_round=None, evals=None, eval_set=None, plot=None, verbose_eval=None, metric_period=None,\n early_stopping_rounds=None, save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Train CatBoost model.\n\n Parameters\n ----------\n params : dict\n Parameters for CatBoost.\n If None, all params are set to their defaults.\n If dict, overriding parameters present in the dict.\n\n pool : catboost.Pool or tuple (X, y)\n Data to train on.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n evals : catboost.Pool or tuple (X, y)\n Synonym for eval_set. Only one of these parameters should be set.\n\n dtrain : catboost.Pool or tuple (X, y)\n Synonym for pool parameter. Only one of these parameters should be set.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n num_boost_round : int\n Synonym for iterations. Only one of these parameters should be set.\n\n eval_set : catboost.Pool or tuple (X, y) or list [(X, y)]\n Dataset for evaluation.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost class\n \"\"\"\n\n if params is None:\n raise CatboostError(\"params should be set.\")\n\n if dtrain is not None:\n if pool is None:\n pool = dtrain\n else:\n raise CatboostError(\"Only one of the parameters pool and dtrain should be set.\")\n\n if num_boost_round is not None:\n if iterations is None:\n iterations = num_boost_round\n else:\n raise CatboostError(\"Only one of the parameters iterations and num_boost_round should be set.\")\n if iterations is not None:\n params = deepcopy(params)\n params.update({\n 'iterations': iterations\n })\n\n if early_stopping_rounds is not None:\n params.update({\n 'od_type': 'Iter'\n })\n if 'od_pval' in params:\n del params['od_pval']\n params.update({\n 'od_wait': early_stopping_rounds\n })\n\n if evals is not None:\n if eval_set is not None:\n raise CatboostError('Only one of the parameters evals, eval_set should be set.')\n eval_set = evals\n\n model = CatBoost(params)\n model.fit(X=pool, eval_set=eval_set, logging_level=logging_level, plot=plot, verbose=verbose,\n verbose_eval=verbose_eval, metric_period=metric_period,\n early_stopping_rounds=early_stopping_rounds, save_snapshot=save_snapshot,\n snapshot_file=snapshot_file, snapshot_interval=snapshot_interval)\n return model\n\n\ndef cv(pool=None, params=None, dtrain=None, iterations=None, num_boost_round=None,\n fold_count=None, nfold=None, inverted=False, partition_random_seed=0, seed=None,\n shuffle=True, logging_level=None, stratified=False, as_pandas=True, metric_period=None,\n verbose=None, verbose_eval=None, plot=False, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None, iterations_batch_size=100):\n \"\"\"\n Cross-validate the CatBoost model.\n\n Parameters\n ----------\n pool : catboost.Pool\n Data to cross-validatte.\n\n params : dict\n Parameters for CatBoost.\n CatBoost has many of parameters, all have default values.\n If None, all params still defaults.\n If dict, overriding some (or all) params.\n\n dtrain : catboost.Pool or tuple (X, y)\n Synonym for pool parameter. Only one of these parameters should be set.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n num_boost_round : int\n Synonym for iterations. Only one of these parameters should be set.\n\n fold_count : int, optional (default=3)\n The number of folds to split the dataset into.\n\n nfold : int\n Synonym for fold_count.\n\n inverted : bool, optional (default=False)\n Train on the test fold and evaluate the model on the training folds.\n\n partition_random_seed : int, optional (default=0)\n Use this as the seed value for random permutation of the data.\n Permutation is performed before splitting the data for cross validation.\n Each seed generates unique data splits.\n\n seed : int, optional\n Synonym for partition_random_seed. This parameter is deprecated. Use\n partition_random_seed instead.\n If both parameters are initialised partition_random_seed parameter is\n ignored.\n\n shuffle : bool, optional (default=True)\n Shuffle the dataset objects before splitting into folds.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n stratified : bool, optional (default=False)\n Perform stratified sampling.\n\n as_pandas : bool, optional (default=True)\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return dict.\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n iterations_batch_size: int [default:100]\n Number of iterations to compute for each fold before aggregating results.\n\n Returns\n -------\n cv results : pandas.core.frame.DataFrame with cross-validation results\n columns are: test-error-mean test-error-std train-error-mean train-error-std\n \"\"\"\n if params is None:\n raise CatboostError(\"params should be set.\")\n\n params = deepcopy(params)\n _process_synonyms(params)\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval)\n\n if verbose is not None:\n params.update({\n 'verbose': verbose\n })\n\n if logging_level is not None:\n params.update({\n 'logging_level': logging_level\n })\n\n if metric_period is not None:\n params.update({\n 'metric_period': metric_period\n })\n\n if early_stopping_rounds is not None:\n params.update({\n 'od_type': 'Iter'\n })\n if 'od_pval' in params:\n del params['od_pval']\n params.update({\n 'od_wait': early_stopping_rounds\n })\n\n if dtrain is not None:\n if pool is None:\n pool = dtrain\n else:\n raise CatboostError(\"Only one of the parameters pool and dtrain should be set.\")\n\n if num_boost_round is not None:\n if iterations is None:\n iterations = num_boost_round\n else:\n raise CatboostError(\"Only one of the parameters iterations and num_boost_round should be set.\")\n\n if iterations is not None:\n params.update({\n 'iterations': iterations\n })\n\n if seed is not None:\n partition_random_seed = seed\n\n if save_snapshot is not None:\n params['save_snapshot'] = save_snapshot\n\n if snapshot_file is not None:\n params['snapshot_file'] = snapshot_file\n\n if snapshot_interval is not None:\n params['snapshot_interval'] = snapshot_interval\n\n if nfold is None and fold_count is None:\n fold_count = 3\n elif fold_count is None:\n fold_count = nfold\n else:\n assert nfold is None or nfold == fold_count\n\n with log_fixup(), plot_wrapper(plot, params):\n return _cv(params, pool, fold_count, inverted, partition_random_seed, shuffle, stratified,\n as_pandas, iterations_batch_size)\n\n\nclass BatchMetricCalcer(_MetricCalcerBase):\n\n def __init__(self, catboost, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir):\n super(BatchMetricCalcer, self).__init__(catboost)\n if tmp_dir is None:\n tmp_dir = tempfile.mkdtemp()\n delete_temp_dir_flag = True\n else:\n delete_temp_dir_flag = False\n\n if isinstance(metrics, str):\n metrics = [metrics]\n self._create_calcer(metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, delete_temp_dir_flag)\n\n\ndef sum_models(models, weights=None, ctr_merge_policy='IntersectingCountersAverage'):\n result = CatBoost()\n result._sum_models(models, weights, ctr_merge_policy)\n return result\n" ]
[ [ "numpy.transpose", "numpy.dtype", "numpy.reshape", "numpy.int32", "numpy.expand_dims", "numpy.shape", "numpy.array", "numpy.mean" ] ]
ygarrot/plantcv
[ "0e11c7f63d96a52487e01e3b67744aa8697eedb2" ]
[ "plantcv/plantcv/spectral_index/spectral_index.py" ]
[ "# Extract one of the predefined indices from a hyperspectral datacube\n\nimport os\nimport numpy as np\nimport cv2\nfrom plantcv.plantcv import params\nfrom plantcv.plantcv._debug import _debug\nfrom plantcv.plantcv import fatal_error\nfrom plantcv.plantcv import Spectral_data\nfrom plantcv.plantcv.transform import rescale\nfrom plantcv.plantcv.hyperspectral import _find_closest\n\n\ndef ndvi(hsi, distance=20):\n \"\"\"Normalized Difference Vegetation Index.\n\n NDVI = (R800 - R670) / (R800 + R670)\n\n The theoretical range for NDVI is [-1.0, 1.0]\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 670:\n # Obtain index that best represents NIR and red bands\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800 = (hsi.array_data[:, :, r800_index])\n r670 = (hsi.array_data[:, :, r670_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r670) / (r800 + r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"NDVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating NDVI. Try increasing distance.\")\n\n\ndef gdvi(hsi, distance=20):\n \"\"\"Green Difference Vegetation Index.\n\n GDVI = R800 - R550\n\n The theoretical range for GDVI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 550:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r800 = (hsi.array_data[:, :, r800_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -1 to 1\n index_array_raw = r800 - r550\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"GDVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating GDVI. Try increasing distance.\")\n\n\ndef savi(hsi, distance=20):\n \"\"\"Soil Adjusted Vegetation Index.\n\n SAVI = (1.5 * (R800 - R680)) / (R800 + R680 + 0.5)\n\n The theoretical range for SAVI is [-1.2, 1.2].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800 = (hsi.array_data[:, :, r800_index])\n r680 = (hsi.array_data[:, :, r680_index])\n # Naturally ranges from -1.2 to 1.2\n index_array_raw = (1.5 * (r800 - r680)) / (r800 + r680 + 0.5)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SAVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating SAVI. Try increasing distance.\")\n\n\ndef pri(hsi, distance=20):\n \"\"\"Photochemical Reflectance Index.\n\n PRI = (R531 - R570) / (R531 + R570)\n\n The theoretical range for PRI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 570 and (float(hsi.min_wavelength) - distance) <= 531:\n # Obtain index that best approximates 570 and 531 nm bands\n r570_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 570)\n r531_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 531)\n r570 = (hsi.array_data[:, :, r570_index])\n r531 = (hsi.array_data[:, :, r531_index])\n index_array_raw = (r531 - r570) / (r531 + r570)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating PRI. Try increasing distance.\")\n\n\ndef ari(hsi, distance=20):\n \"\"\"Anthocyanin Reflectance Index.\n\n ARI = (1 / R550) - (1 / R700)\n\n The theoretical range for ARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r550 = (hsi.array_data[:, :, r550_index])\n r700 = (hsi.array_data[:, :, r700_index])\n index_array_raw = (1 / r550) - (1 / r700)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"ARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating ARI. Try increasing distance.\")\n\n\ndef ci_rededge(hsi, distance=20):\n \"\"\"Chlorophyll Index Red Edge.\n\n CI_REDEDGE = (R800 / R700) - 1\n\n The theoretical range for CI_REDEDGE is [-1.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 700:\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r700 = (hsi.array_data[:, :, r700_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to inf\n index_array_raw = (r800 / r700) - 1\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CI_REDEDGE\")\n fatal_error(\"Available wavelengths are not suitable for calculating CI_REDEDGE. Try increasing distance.\")\n\n\ndef cri550(hsi, distance=20):\n \"\"\"Carotenoid Reflectance Index 550.\n\n CRI550 = (1 / R510) - (1 / R550)\n\n The theoretical range for CRI550 is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 550 and (float(hsi.min_wavelength) - distance) <= 510:\n r510_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 510)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r510 = (hsi.array_data[:, :, r510_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (1 / r510) - (1 / r550)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CRI510\")\n fatal_error(\"Available wavelengths are not suitable for calculating CRI510. Try increasing distance.\")\n\n\ndef cri700(hsi, distance=20):\n \"\"\"Carotenoid Reflectance Index 700.\n\n CRI700 = (1 / R510) - (1 / R700)\n\n The theoretical range for CRI700 is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 510:\n r510_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 510)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r510 = (hsi.array_data[:, :, r510_index])\n r700 = (hsi.array_data[:, :, r700_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (1 / r510) - (1 / r700)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CRI700\")\n fatal_error(\"Available wavelengths are not suitable for calculating CRI700. Try increasing distance.\")\n\n\ndef egi(rgb_img):\n \"\"\"Excess Green Index.\n\n r = R / (R + G + B)\n g = G / (R + G + B)\n b = B / (R + G + B)\n EGI = 2g - r - b\n\n The theoretical range for EGI is (-1, 2).\n\n Inputs:\n rgb_img = Color image (np.array)\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param rgb_img: np.array\n :return index_array: np.array\n \"\"\"\n # Split the RGB image into component channels\n blue, green, red = cv2.split(rgb_img)\n # Calculate float32 sum of all channels\n total = red.astype(np.float32) + green.astype(np.float32) + blue.astype(np.float32)\n # Calculate normalized channels\n r = red.astype(np.float32) / total\n g = green.astype(np.float32) / total\n b = blue.astype(np.float32) / total\n index_array_raw = (2 * g) - r - b\n\n hsi = Spectral_data(array_data=None, max_wavelength=0, min_wavelength=0, max_value=255, min_value=0,\n d_type=np.uint8, wavelength_dict={}, samples=None, lines=None, interleave=None,\n wavelength_units=None, array_type=None, pseudo_rgb=None, filename=None, default_bands=None)\n\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"EGI\")\n\n\ndef evi(hsi, distance=20):\n \"\"\"Enhanced Vegetation index.\n\n EVI = (2.5 * (R800 - R670)) / (1 + R800 + (6 * R670) - (7.5 * R480))\n\n The theoretical range for EVI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 480:\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r480 = (hsi.array_data[:, :, r480_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (2.5 * (r800 - r670)) / (1 + r800 + (6 * r670) - (7.5 * r480))\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"EVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating EVI. Try increasing distance.\")\n\n\ndef mari(hsi, distance=20):\n \"\"\"Modified Anthocyanin Reflectance Index.\n\n MARI = ((1 / R550) - (1 / R700)) * R800\n\n The theoretical range for MARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r550 = (hsi.array_data[:, :, r550_index])\n r700 = (hsi.array_data[:, :, r700_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = ((1 / r550) - (1 / r700)) * r800\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MARI. Try increasing distance.\")\n\n\ndef mcari(hsi, distance=20):\n \"\"\"Modified Chlorophyll Absorption in Reflectance Index.\n\n MCARI = ((R700 - R670) - 0.2 * (R700 - R550)) * (R700 / R670)\n\n The theoretical range for MCARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r550 = (hsi.array_data[:, :, r550_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r700 = (hsi.array_data[:, :, r700_index])\n # Naturally ranges from -inf to inf\n index_array_raw = ((r700 - r670) - 0.2 * (r700 - r550)) * (r700 / r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MCARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MCARI. Try increasing distance.\")\n\n\ndef mtci(hsi, distance=20):\n \"\"\"MERIS Terrestrial Chlorophyll Index.\n\n MTCI = (R753.75 - R708.75) / (R708.75 - R681.25)\n\n The theoretical range for MTCI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 753.75 and (float(hsi.min_wavelength) - distance) <= 681.25:\n r681_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 681.25)\n r708_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 708.75)\n r753_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 753.75)\n r681 = (hsi.array_data[:, :, r681_index])\n r708 = (hsi.array_data[:, :, r708_index])\n r753 = (hsi.array_data[:, :, r753_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r753 - r708) / (r708 - r681)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MTCI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MTCI. Try increasing distance.\")\n\n\ndef ndre(hsi, distance=20):\n \"\"\"Normalized Difference Red Edge.\n\n NDRE = (R790 - R720) / (R790 + R720)\n\n The theoretical range for NDRE is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 790 and (float(hsi.min_wavelength) - distance) <= 720:\n r720_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 720)\n r790_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 790)\n r790 = (hsi.array_data[:, :, r790_index])\n r720 = (hsi.array_data[:, :, r720_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r790 - r720) / (r790 + r720)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"NDRE\")\n fatal_error(\"Available wavelengths are not suitable for calculating NDRE. Try increasing distance.\")\n\n\ndef psnd_chla(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Chlorophyll a.\n\n PSND_CHLA = (R800 - R680) / (R800 + R680)\n\n The theoretical range for PSND_CHLA is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680 = (hsi.array_data[:, :, r680_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r680) / (r800 + r680)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CHLA\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CHLA. Try increasing distance.\")\n\n\ndef psnd_chlb(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Chlorophyll b.\n\n PSND_CHLB = (R800 - R635) / (R800 + R635)\n\n The theoretical range for PSND_CHLB is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 635:\n r635_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 635)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r635 = (hsi.array_data[:, :, r635_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r635) / (r800 + r635)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CHLB\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CHLB. Try increasing distance.\")\n\n\ndef psnd_car(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Caroteniods.\n\n PSND_CAR = (R800 - R470) / (R800 + R470)\n\n The theoretical range for PSND_CAR is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 470:\n r470_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 470)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r470 = (hsi.array_data[:, :, r470_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r470) / (r800 + r470)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CAR\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CAR. Try increasing distance.\")\n\n\ndef psri(hsi, distance=20):\n \"\"\"Plant Senescence Reflectance Index.\n\n PSRI = (R678 - R500) / R750\n\n The theoretical range for PSRI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 750 and (float(hsi.min_wavelength) - distance) <= 500:\n r500_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 500)\n r678_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 678)\n r750_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 750)\n r500 = (hsi.array_data[:, :, r500_index])\n r678 = (hsi.array_data[:, :, r678_index])\n r750 = (hsi.array_data[:, :, r750_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r678 - r500) / r750\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSRI. Try increasing distance.\")\n\n\ndef pssr_chla(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Chlorophyll a.\n\n PSSR_CHLA = R800 / R680\n\n The theoretical range for PSSR_CHLA is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800 = (hsi.array_data[:, :, r800_index])\n r680 = (hsi.array_data[:, :, r680_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r680\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CHLA\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CHLA. Try increasing distance.\")\n\n\ndef pssr_chlb(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Chlorophyll b.\n\n PSSR_CHLB = R800 / R635\n\n The theoretical range for PSSR_CHLB is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 635:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r635_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 635)\n r800 = (hsi.array_data[:, :, r800_index])\n r635 = (hsi.array_data[:, :, r635_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r635\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CHLB\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CHLB. Try increasing distance.\")\n\n\ndef pssr_car(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Caroteniods.\n\n PSSR_CAR = R800 / R470\n\n The theoretical range for PSSR_CAR is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 470:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r470_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 470)\n r800 = (hsi.array_data[:, :, r800_index])\n r470 = (hsi.array_data[:, :, r470_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r470\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CAR\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CAR. Try increasing distance.\")\n\n\ndef rgri(hsi, distance=20):\n \"\"\"Red/green ratio index (Gamon and Surfus, 1999)\n The theoretical range for RGRI is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 560:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r560_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 560)\n r670 = (hsi.array_data[:, :, r670_index])\n r560 = (hsi.array_data[:, :, r560_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r670 / r560\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"RGRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating RGRI. Try increasing distance.\")\n\n\ndef rvsi(hsi, distance=20):\n \"\"\"Red-Edge Vegetation Stress Index.\n\n RVSI = ((R714 + R752) / 2) - R733\n\n The theoretical range for RVSI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 752 and (float(hsi.min_wavelength) - distance) <= 714:\n r714_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 714)\n r733_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 733)\n r752_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 752)\n r714 = (hsi.array_data[:, :, r714_index])\n r733 = (hsi.array_data[:, :, r733_index])\n r752 = (hsi.array_data[:, :, r752_index])\n # Naturally ranges from -1 to 1\n index_array_raw = ((r714 + r752) / 2) - r733\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"RVSI\")\n fatal_error(\"Available wavelengths are not suitable for calculating RVSI. Try increasing distance.\")\n\n\ndef sipi(hsi, distance=20):\n \"\"\"Structure-Independent Pigment Index.\n\n SIPI = (R800 - R670) / (R800 - R480)\n\n The theoretical range for SIPI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 480:\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r445 = (hsi.array_data[:, :, r480_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r800 - r670) / (r800 - r445)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SIPI\")\n fatal_error(\"Available wavelengths are not suitable for calculating SIPI. Try increasing distance.\")\n\n\ndef sr(hsi, distance=20):\n \"\"\"Simple Ratio.\n\n SR = R800 / R670\n\n The theoretical range for SR is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 670:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r670\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SR\")\n fatal_error(\"Available wavelengths are not suitable for calculating SR. Try increasing distance.\")\n\n\ndef vari(hsi, distance=20):\n \"\"\"Visible Atmospherically Resistant Index.\n\n VARI = (R550 - R670) / (R550 + R670 - R480)\n\n The theoretical range for VARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 480:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670 = (hsi.array_data[:, :, r670_index])\n r550 = (hsi.array_data[:, :, r550_index])\n r480 = (hsi.array_data[:, :, r480_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r550 - r670) / (r550 + r670 - r480)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"VARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating VARI. Try increasing distance.\")\n\n\ndef vi_green(hsi, distance=20):\n \"\"\"Vegetation Index using green bands.\n\n VIgreen = (R550 - R670) / (R550 + R670)\n\n The theoretical range for VI_GREEN is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 550:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r670 = (hsi.array_data[:, :, r670_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r550 - r670) / (r550 + r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"VI_GREEN\")\n fatal_error(\"Available wavelengths are not suitable for calculating VI_GREEN. Try increasing distance.\")\n\n\ndef wi(hsi, distance=20):\n \"\"\"Water Index.\n\n WI = R900 / R970\n\n The theoretical range for WI is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 970 and (float(hsi.min_wavelength) - distance) <= 900:\n r900_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 900)\n r970_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 970)\n r900 = (hsi.array_data[:, :, r900_index])\n r970 = (hsi.array_data[:, :, r970_index])\n # Naturally ranges from 0 to Inf\n index_array_raw = r900 / r970\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"WI\")\n fatal_error(\"Available wavelengths are not suitable for calculating WBI. Try increasing distance.\")\n\n\ndef _package_index(hsi, raw_index, method):\n \"\"\"Private function to package raw index array as a Spectral_data object.\n Inputs:\n hsi = hyperspectral data (Spectral_data object)\n raw_index = raw index array\n method = index method (e.g. NDVI)\n\n Returns:\n index = index image as a Spectral_data object.\n\n :params hsi: __main__.Spectral_data\n :params raw_index: np.array\n :params method: str\n :params index: __main__.Spectral_data\n \"\"\"\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Resulting array is float 32 from varying natural ranges, transform into uint8 for plotting\n all_positive = np.add(raw_index, 2 * np.ones(np.shape(raw_index)))\n scaled = rescale(all_positive)\n\n # Find array min and max values\n obs_max_pixel = float(np.nanmax(raw_index))\n obs_min_pixel = float(np.nanmin(raw_index))\n\n index = Spectral_data(array_data=raw_index, max_wavelength=0,\n min_wavelength=0, max_value=obs_max_pixel,\n min_value=obs_min_pixel, d_type=np.uint8,\n wavelength_dict={}, samples=hsi.samples,\n lines=hsi.lines, interleave=hsi.interleave,\n wavelength_units=hsi.wavelength_units,\n array_type=\"index_\" + method.lower(),\n pseudo_rgb=scaled, filename=hsi.filename, default_bands=None)\n\n # Restore debug mode\n params.debug = debug\n\n _debug(visual=index.pseudo_rgb,\n filename=os.path.join(params.debug_outdir, str(params.device) + method + \"_index.png\"))\n\n return index\n" ]
[ [ "numpy.shape", "numpy.nanmax", "numpy.nanmin" ] ]
jmagine/rf-selection
[ "ba9dcb5ca550916873ce68baa71da983f2dd4be5" ]
[ "sim/pid.py" ]
[ "'''*-----------------------------------------------------------------------*---\n Author: Jason Ma\n Date : Oct 18 2018\n TODO\n\n File Name : pid.py\n Description: TODO\n---*-----------------------------------------------------------------------*'''\n\nimport time\nimport matplotlib.animation as anim\nimport matplotlib.pyplot as plt\nimport threading\nimport math\nimport numpy as np\n\n'''[Global Vars]------------------------------------------------------------'''\nORIGIN_X = 0.0\nORIGIN_Y = 0.0\nC_R = 10\n\n#plt.autoscale(enable=True, axis=\"both\")\n\nfig = plt.figure()\nax = fig.add_subplot(2,1,1)\nax2 = fig.add_subplot(2,1,2)\nscat = ax.scatter([], [])\nax.set_xlim([-1 * C_R - 1, C_R + 1])\nax.set_ylim([-1 * C_R - 1, C_R + 1])\nscat.set_facecolors(['g', 'r'])\nscat.set_sizes([31, 31])\nprev_time = time.time()\nvel = np.array([0.0, 0.0])\n\nerrors = [0, 1]\nerror_plot, = ax2.plot([i for i in range(len(errors))], errors, color=\"g\")\n\nclass drone(): \n def __init__(self, p, vel):\n self.pos = np.array(p)\n self.v = np.array(vel)\n self.prev_error = np.zeros((2))\n self.integral = np.zeros((2))\n self.dt = 0.01\n self.kp = 0.8 * 2.0\n self.ki = 0\n self.kd = 0\n \n #self.ki = 2.0 * self.kp / 2.0\n #self.kd = self.kp * 2.0 / 8.0\n \n #self.ki = 2 * self.kp / 1.0 \n #self.kd = self.kp * 0.01 / 8\n \n def callback(self):\n pass\n\n def run(self, ref_pos, vx=None, vy=None):\n self.pos += self.v\n\n #print(self.integral)\n\n if vx:\n self.v[0] = vx\n \n if vy:\n self.v[1] = vy\n\n #compute PID output\n error = ref_pos - self.pos\n \n self.integral = self.integral * 0.99 + error * self.dt\n '''\n for i in range(2):\n if self.integral[i] > 1:\n self.integral[i] = 1\n elif self.integral[i] < -1:\n self.integral[i] = -1\n '''\n #print(self.integral)\n\n derivative = (error - self.prev_error) / self.dt\n \n for i in range(2):\n if derivative[i] > 0.1:\n derivative[i] = 0.1\n elif derivative[i] < -0.1:\n derivative[i] = -0.1\n self.prev_error = error\n pid_output = (self.kp * error) + (self.ki * self.integral) + (self.kd * derivative)\n print(self.pos, pid_output, self.kp * error, self.ki * self.integral, self.kd * derivative)\n #print(error[0])\n #errors.append(error[0])\n\n return pid_output\n\nd = drone([ORIGIN_X + C_R, ORIGIN_Y], [0.0, 0.0])\n\ndef dist(x1, y1, x2, y2):\n return ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))**(1/2)\n\ndef dist(p1, p2):\n assert len(p1) == len(p2)\n dims = len(p1)\n \n total = 0\n for i in range(dims):\n total += (p2[i] - p1[i]) * (p2[i] - p1[i])\n\n return (total)**(1/2)\n\n#def pid_angle(x, y, ref_x, ref_y, d):\n# return math.atan(-1 * (C_R - dist(x, y, ORIGIN_X, ORIGIN_Y)) / d) + math.atan((y - ORIGIN_Y) / (x - ORIGIN_X)) + math.pi / 2\n\ndef ref(t):\n return np.array([ORIGIN_X + C_R * math.cos(t), ORIGIN_Y + C_R * math.sin(t)])\n\ndef update(i):\n global prev_time, vel\n #update reference point position\n curr_time = time.time()\n ref_point = ref(i / 25.0)\n #ref_x = ref_point[0]\n #ref_y = ref_point[1]\n out = d.run(ref_point)\n \n for i in range(2):\n if out[i] > 10 or out[i] < -10:\n out = out * 10 / out[i]\n\n #print(d.pos, out)\n\n d.v = out\n\n while time.time() - prev_time < d.dt:\n time.sleep(d.dt / 10)\n \n prev_time = time.time()\n #print the desired angle of drone\n #pid_ang = pid_angle(d.x, d.y, ref_point[0], ref_point[1], 0.05)\n #print(math.cos(pid_ang), math.sin(pid_ang))\n #d.run(math.cos(pid_ang), math.sin(pid_ang))\n \n scat.set_offsets([[ref_point[0], ref_point[1]], [d.pos[0], d.pos[1]]])\n\n errors.append(dist(ref_point, d.pos))\n error_plot.set_xdata([i for i in range(len(errors))])\n error_plot.set_ydata(errors)\n ax2.set_xlim([-1, len(errors) + 1])\n ax2.set_ylim([1, min(errors)])\n\ndef main():\n d = drone(ORIGIN_X + C_R, ORIGIN_Y, 1)\n \n\nif __name__ == '__main__':\n #main()\n a = anim.FuncAnimation(fig, update, range(1000), interval=1, blit=False, repeat=False)\n plt.show()\n\n\n" ]
[ [ "numpy.array", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.zeros" ] ]
sfillwo/stog
[ "b965c47c17472eea11ab63aab9aa738af7875f06" ]
[ "stog/modules/initializers.py" ]
[ "\"\"\"\nAdopted from AllenNLP:\n https://github.com/allenai/allennlp/blob/v0.6.1/allennlp/nn/initializers.py\n\nAn initializer is just a PyTorch function.\nHere we implement a proxy class that allows us\nto register them and supply any additional function arguments\n(for example, the ``mean`` and ``std`` of a normal initializer)\nas named arguments to the constructor.\nThe available initialization functions are\n* `\"normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_>`_\n* `\"uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_>`_\n* `\"constant\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_>`_\n* `\"eye\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_>`_\n* `\"dirac\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_>`_\n* `\"xavier_uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_>`_\n* `\"xavier_normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_>`_\n* `\"kaiming_uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_>`_\n* `\"kaiming_normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_>`_\n* `\"orthogonal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_>`_\n* `\"sparse\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_>`_\n* :func:`\"block_orthogonal\" <block_orthogonal>`\n* :func:`\"uniform_unit_scaling\" <uniform_unit_scaling>`\n\"\"\"\nimport re\nimport math\nfrom typing import Callable, List, Tuple, Type, Iterable\nimport itertools\n\nimport torch\nimport torch.nn.init\n\nfrom stog.utils import logging\nfrom stog.utils.checks import ConfigurationError\n\nlogger = logging.init_logger() # pylint: disable=invalid-name\n\n\ndef uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = \"linear\"):\n \"\"\"\n An initaliser which preserves output variance for approximately gaussian\n distributed inputs. This boils down to initialising layers using a uniform\n distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where\n ``dim[0]`` is equal to the input dimension of the parameter and the ``scale``\n is a constant scaling factor which depends on the non-linearity used.\n See `Random Walk Initialisation for Training Very Deep Feedforward Networks\n <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_\n for more information.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n The tensor to initialise.\n nonlinearity : ``str``, optional (default = \"linear\")\n The non-linearity which is performed after the projection that this\n tensor is involved in. This must be the name of a function contained\n in the ``torch.nn.functional`` package.\n Returns\n -------\n The initialised tensor.\n \"\"\"\n size = 1.\n # Estimate the input size. This won't work perfectly,\n # but it covers almost all use cases where this initialiser\n # would be expected to be useful, i.e in large linear and\n # convolutional layers, as the last dimension will almost\n # always be the output size.\n for dimension in list(tensor.size())[:-1]:\n size *= dimension\n\n activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)\n max_value = math.sqrt(3 / size) * activation_scaling\n\n return tensor.uniform_(-max_value, max_value)\n\n\ndef block_orthogonal(tensor: torch.Tensor,\n split_sizes: List[int],\n gain: float = 1.0) -> None:\n \"\"\"\n An initializer which allows initializing model parameters in \"blocks\". This is helpful\n in the case of recurrent models which use multiple gates applied to linear projections,\n which can be computed efficiently if they are concatenated together. However, they are\n separate parameters which should be initialized independently.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n A tensor to initialize.\n split_sizes : List[int], required.\n A list of length ``tensor.ndim()`` specifying the size of the\n blocks along that particular dimension. E.g. ``[10, 20]`` would\n result in the tensor being split into chunks of size 10 along the\n first dimension and 20 along the second.\n gain : float, optional (default = 1.0)\n The gain (scaling) applied to the orthogonal initialization.\n \"\"\"\n data = tensor.data\n sizes = list(tensor.size())\n if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):\n raise ConfigurationError(\"tensor dimensions must be divisible by their respective \"\n \"split_sizes. Found size: {} and split_sizes: {}\".format(sizes, split_sizes))\n indexes = [list(range(0, max_size, split))\n for max_size, split in zip(sizes, split_sizes)]\n # Iterate over all possible blocks within the tensor.\n for block_start_indices in itertools.product(*indexes):\n # A list of tuples containing the index to start at for this block\n # and the appropriate step size (i.e split_size[i] for dimension i).\n index_and_step_tuples = zip(block_start_indices, split_sizes)\n # This is a tuple of slices corresponding to:\n # tensor[index: index + step_size, ...]. This is\n # required because we could have an arbitrary number\n # of dimensions. The actual slices we need are the\n # start_index: start_index + step for each dimension in the tensor.\n block_slice = tuple([slice(start_index, start_index + step)\n for start_index, step in index_and_step_tuples])\n data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)\n\n\ndef zero(tensor: torch.Tensor) -> None:\n return tensor.data.zero_()\n\ndef lstm_hidden_bias(tensor: torch.Tensor) -> None:\n \"\"\"\n Initialize the biases of the forget gate to 1, and all other gates to 0,\n following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures\n \"\"\"\n # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)\n tensor.data.zero_()\n hidden_size = tensor.shape[0] // 4\n tensor.data[hidden_size:(2 * hidden_size)] = 1.0\n" ]
[ [ "torch.nn.init.calculate_gain" ] ]
mesnardo/petibm-decoupledibpm
[ "675615a882cc8418b15a34e1100ccfb421f1d9d1" ]
[ "runs/cylinder2dRe40/scripts/plot_pressure_coefficients.py" ]
[ "\"\"\"Plot the surface pressure coefficient at final time step.\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\nimport rodney\n\n\ndef get_pressure(simudir, timestep):\n name = 'p' # name of the field variable to load\n datadir = simudir / 'output'\n # Load the gridlines from file.\n filepath = datadir / 'grid.h5'\n x, y = petibmpy.read_grid_hdf5(filepath, name)\n # Load the field from file.\n filepath = datadir / f'{timestep:0>7}.h5'\n p = petibmpy.read_field_hdf5(filepath, name)\n return (x, y), p\n\n\ndef compute_surface_pressure_coefficient(p, x, y):\n # Define circle outside support region of delta function.\n N = 500\n dx = 1.5 / 90 # grid-spacing size in the uniform region\n R = 0.5 + 3 * dx # radius 3 cells away from real boundary\n theta = numpy.linspace(0.0, 2 * numpy.pi, num=N + 1)[:-1]\n xc, yc = 0.0, 0.0\n xb_ext, yb_ext = xc + R * numpy.cos(theta), yc + R * numpy.sin(theta)\n\n # Interpolate the field on extended boundary.\n pb = numpy.empty_like(xb_ext)\n for i, (xbi, ybi) in enumerate(zip(xb_ext, yb_ext)):\n pi = petibmpy.linear_interpolation(p, y, ybi)\n pb[i] = petibmpy.linear_interpolation(pi, x, xbi)\n\n # Compute the pressure coefficient.\n rho = 1.0 # fluid density\n U_inf = 1.0 # freestream speed\n p_inf = 0.0 # far-away pressure\n cp = (pb - p_inf) / (0.5 * rho * U_inf**2)\n return theta, cp\n\n\ndef split_lower_upper(theta, cp):\n mask = numpy.where((theta >= numpy.pi) & (theta < 2 * numpy.pi))[0]\n theta_lower = theta[mask] % numpy.pi\n cp_lower = cp[mask]\n mask = numpy.where((theta >= 0.0) & (theta < numpy.pi))[0]\n theta_upper = numpy.flip(numpy.pi - theta[mask])\n cp_upper = numpy.flip(cp[mask])\n return (dict(theta=theta_lower, cp=cp_lower),\n dict(theta=theta_upper, cp=cp_upper))\n\n\nargs = rodney.parse_command_line()\nmaindir = pathlib.Path(__file__).absolute().parents[1]\ntimestep = 5000 # final time-step index\n\nlabel1 = r'500 markers ($\\Delta s \\approx 0.38 \\Delta x$)'\nsimudir1 = maindir / '500_markers'\ngrid, p = get_pressure(simudir1, timestep)\ntheta, cp = compute_surface_pressure_coefficient(p, *grid)\nlower1, upper1 = split_lower_upper(theta, cp)\n\nlabel2 = r'189 markers ($\\Delta s \\approx \\Delta x$)'\nsimudir2 = maindir / '189_markers'\ngrid, p = get_pressure(simudir2, timestep)\ntheta, cp = compute_surface_pressure_coefficient(p, *grid)\nlower2, upper2 = split_lower_upper(theta, cp)\n\n# Plot the distribution of the surface pressure coefficient.\npyplot.rc('font', family='serif', size=14)\nfig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(10.0, 4.0))\nax1.set_title(label1, fontsize=14)\nax1.set_xlabel(r'$\\theta$')\nax1.set_ylabel('$C_p$')\nax1.plot(numpy.degrees(lower1['theta']), lower1['cp'],\n label='lower surface')\nax1.plot(numpy.degrees(upper1['theta']), upper1['cp'],\n label='upper surface', linestyle='--')\n\nax2.set_title(label2, fontsize=14)\nax2.set_xlabel(r'$\\theta$')\nax2.set_ylabel('$C_p$')\nax2.plot(numpy.degrees(lower2['theta']), lower2['cp'],\n label='lower surface')\nax2.plot(numpy.degrees(upper2['theta']), upper2['cp'],\n label='upper surface', linestyle='--')\n\nif args.extra_data:\n # Load digitized values from Li et al. (2016).\n theta_li, cp_li = rodney.lietal2016_load_cp(40)\n ax1.scatter(theta_li, cp_li, label='Li et al. (2016)',\n c='black', marker='s', s=10)\n ax2.scatter(theta_li, cp_li, label='Li et al. (2016)',\n c='black', marker='s', s=10)\n\nfor ax in (ax1, ax2):\n ax.set_xlim(0.0, 180.0)\n ax.set_ylim(-1.5, 1.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\nax2.legend(frameon=False)\nfig.tight_layout()\n\nif args.save_figures:\n # Save the figure.\n figdir = maindir / 'figures'\n figdir.mkdir(parents=True, exist_ok=True)\n filepath = figdir / f'cp_{timestep:0>7}.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\nif args.show_figures:\n pyplot.show()\n" ]
[ [ "numpy.degrees", "matplotlib.pyplot.rc", "numpy.cos", "matplotlib.pyplot.subplots", "numpy.empty_like", "matplotlib.pyplot.show", "numpy.flip", "numpy.sin", "numpy.where", "numpy.linspace" ] ]
kristianeschenburg/parcellearning
[ "93811f7d11c1c5583d8f541c7629dbbaa1785304" ]
[ "parcellearning/fn/ops.py" ]
[ "import torch\nimport dgl\n\n\ndef cosine(nodes):\n \"\"\"\n Compute the cosine distance between all pairs of nodes adjacent on a source node.\n \"\"\"\n\n # ```m``` is a matrix of N nodes x E edges x F features\n # representing the messages incident on source nodes with E edges\n m = nodes.mailbox['m']\n\n N = m.shape[1]\n N = (N*(N-1))/2\n\n if m.ndim > 3:\n m = m.transpose(1,2)\n e = torch.matmul(m, m.transpose(2, 3))\n else:\n e = torch.matmul(m, m.transpose(1,2))\n\n e = torch.triu(e, diagonal=1).sum(-1).sum(-1)\n e = e/N\n\n return {'cos': e}\n" ]
[ [ "torch.triu" ] ]
jin530/pytorch-widedeep
[ "4ff1008ba6c62e64383267c924cfd3cf3cbc609c" ]
[ "tests/test_data_utils/test_du_deep_image.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\n\nfrom pytorch_widedeep.preprocessing import ImagePreprocessor\n\n\nfull_path = os.path.realpath(__file__)\npath = os.path.split(full_path)[0]\ndf = pd.DataFrame({\"galaxies\": [\"galaxy1.png\", \"galaxy2.png\"]})\nimg_col = \"galaxies\"\nimd_dir = os.path.join(path, \"images\")\nprocessor = ImagePreprocessor(img_col=img_col, img_path=imd_dir)\nX_imgs = processor.fit_transform(df)\n\n\n###############################################################################\n# There is not much to test here, since I only resize.\n###############################################################################\ndef test_sizes():\n img_width = X_imgs.shape[1]\n img_height = X_imgs.shape[2]\n assert np.all((img_width == processor.width, img_height == processor.height))\n" ]
[ [ "pandas.DataFrame", "numpy.all" ] ]
ethansimpson285/StarlingPy
[ "0afba0444b695c431227f4e28d6d3edde5b56af7" ]
[ "src/starlingpy/StarlingClass.py" ]
[ "from requests import get\nimport datetime\n\nimport pandas as pd \n\nfrom starlingpy.StarlingAPIs import Account_APIs\n\nBASE_PATH = \"https://api.starlingbank.com/api/v2/\"\n\n\nclass TransactionHistory:\n\n \"\"\"\n A history of transactions associated with the Starling account, between stipulated datetimes.\n Requires the StarlingAccount object to be passed\n \"\"\"\n\n\n def __init__(self,Account,**kwargs):\n\n self.associated_Account = Account \n output = Account.get_transactions(**kwargs)\n self.start_date , self.end_date = output[0] , output[1]\n self.transaction_List = output[2]\n self.full_Dataframe = self.generate_transaction_dataframe(**kwargs)\n self.summary_Dataframe = self.summary_transaction_dataframe()\n\n\n def generate_transaction_dataframe(self,**kwargs):\n\n \"\"\" Generates full transaction dataframe between dates \"\"\"\n\n df = pd.DataFrame(self.transaction_List)\n running_balance = self.generate_running_balance_list(self.transaction_List)\n df['Balance Before'] = running_balance[1:]\n df['Balance After'] = running_balance[:-1]\n df[\"transactionTime\"]= pd.to_datetime(df[\"transactionTime\"])\n\n return df\n\n \n def summary_transaction_dataframe(self):\n\n \"\"\" Generates an abridged summary dataframe for using with plotting macros \"\"\"\n\n df = self.full_Dataframe\n\n Amounts_df=df[\"amount\"].apply(pd.Series)\n dfN = pd.concat([Amounts_df,df[\"transactionTime\"],df[\"spendingCategory\"]],ignore_index=False,axis=1)\n pd.to_numeric(dfN['minorUnits'],downcast='float')\n dfN.loc[dfN['spendingCategory'].isin(['INCOME']),'minorUnits'] = -dfN[\"minorUnits\"]\n\n return dfN\n\n\n def generate_running_balance_list(self,transaction_list,**kwargs):\n\n \"\"\" Computes running balance based on totalEffectiveBalance field \"\"\"\n\n balance = self.associated_Account.get_balance()[\"totalEffectiveBalance\"][\"minorUnits\"]\n running_balance = [balance]\n for trans in transaction_list:\n amount = trans[\"amount\"][\"minorUnits\"] \n if trans[\"spendingCategory\"]=='INCOME': amount = -amount\n balance += amount\n running_balance.append(balance)\n \n return running_balance\n\n\n def discrete_time_summary(self,time_block):\n\n # Get the range of times\n rng = pd.date_range(self.start_date, self.end_date, freq=time_block)\n\n # Split the summary_Dataframe by the time blcok\n g=self.summary_Dataframe.groupby(pd.Grouper(key='transactionTime', freq=time_block))\n dfs = [group for _,group in g]\n\n summary_dict = {}\n\n for i,T_df in enumerate(dfs):\n\n if T_df.empty:\n continue\n\n m = T_df['spendingCategory'] != 'INCOME'\n out_df, in_df = T_df[m], T_df[~m]\n\n total_expend = out_df['minorUnits'].sum()/1e2\n total_income = in_df['minorUnits'].sum()/1e2\n if total_income < 0: total_income = -total_income\n\n summary_dict[rng[i]] = {'outgoings': out_df , 'incomings': in_df, 'expenditure': total_expend,'income':total_income} \n \n return pd.DataFrame(summary_dict).T\n\n\n \n\n\n\nclass StarlingAccount:\n\n \"\"\"\n Class which aligns to Starling bank account, with relevant attributes for that bank account, \n Class methods for extracting information from the \n \"\"\"\n\n def fetch(self,url):\n r = get(url,headers=self.headers)\n r.raise_for_status()\n return r.json()\n\n def access_account_details(self):\n url = BASE_PATH + \"accounts\"\n return self.fetch(url)\n\n def __init__(self,PAT,**kwargs):\n self.PAT = PAT\n self.headers = {\"Authorization\": \"Bearer \" + PAT}\n self.requests_object = self.access_account_details()\n self.account_details = self.requests_object['accounts'][0]\n self.accountUid = self.account_details['accountUid']\n self.defaultCategory = self.account_details['defaultCategory']\n\n def get_balance(self):\n url = BASE_PATH + Account_APIs[\"Account Balance\"].format(self.accountUid)\n return self.fetch(url)\n\n def show_balance(self):\n tEF = self.get_balance()[\"totalEffectiveBalance\"]\n print(str( tEF[\"minorUnits\"]/1e2) + \" \" + tEF[\"currency\"])\n\n\n def get_recurring_payments(self):\n url = BASE_PATH + Account_APIs[\"Recurring Payments\"].format(self.accountUid)\n return self.fetch(url) \n\n\n def get_feed(self):\n url = BASE_PATH + Account_APIs[\"Feed\"].format(self.accountUid,self.defaultCategory)\n return self.fetch(url) \n\n\n def get_payees(self):\n url = BASE_PATH + Account_APIs[\"Payees\"]\n return self.fetch(url) \n\n\n def get_transactions(self,**kwargs):\n start_date = kwargs[\"start_date\"] if \"start_date\" in kwargs else (datetime.datetime.now()-datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\") + \"T00:00:00Z\"\n end_date = kwargs[\"end_date\"] if \"end_date\" in kwargs else datetime.datetime.now().strftime(\"%Y-%m-%d\") + \"T00:00:00Z\"\n url = BASE_PATH + Account_APIs[\"Transactions Between\"].format(self.accountUid,self.defaultCategory,start_date,end_date)\n return start_date,end_date,self.fetch(url)['feedItems']\n\n\n\n\n\n\n\n \n\n\n\n\n\n" ]
[ [ "pandas.date_range", "pandas.to_numeric", "pandas.DataFrame", "pandas.to_datetime", "pandas.Grouper", "pandas.concat" ] ]
RichardOkubo/DS-Scripts
[ "adf3845802b52e8901d381ffff60f9c1276dabe1" ]
[ "machine-learning-algorithms/reducao-rbm.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\n\nbase = datasets.load_digits()\nprevisores = np.asarray(base.data, 'float32')\nclasse = base.target\n\nnormalizador = MinMaxScaler(feature_range=(0,1))\nprevisores = normalizador.fit_transform(previsores)\n\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size = 0.2, random_state=0)\n\nrbm = BernoulliRBM(random_state = 0)\nrbm.n_iter = 25\nrbm.n_components = 50\nnaive_rbm = GaussianNB()\nclassificador_rbm = Pipeline(steps = [('rbm', rbm), ('naive', naive_rbm)])\nclassificador_rbm.fit(previsores_treinamento, classe_treinamento)\n\nplt.figure(figsize=(20,20))\nfor i, comp in enumerate(rbm.components_):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape((8,8)), cmap=plt.cm.gray_r)\n plt.xticks(())\n plt.yticks(())\nplt.show()\n\nprevisoes_rbm = classificador_rbm.predict(previsores_teste)\nprecisao_rbm = metrics.accuracy_score(previsoes_rbm, classe_teste)\n\nnaive_simples = GaussianNB()\nnaive_simples.fit(previsores_treinamento, classe_treinamento)\nprevisoes_naive = naive_simples.predict(previsores_teste)\nprecisao_naive = metrics.accuracy_score(previsoes_naive, classe_teste)" ]
[ [ "matplotlib.pyplot.xticks", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.figure", "numpy.asarray", "sklearn.neural_network.BernoulliRBM", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "sklearn.datasets.load_digits", "sklearn.pipeline.Pipeline", "matplotlib.pyplot.yticks", "sklearn.model_selection.train_test_split", "sklearn.naive_bayes.GaussianNB" ] ]
mmezic/netket
[ "629e885212d981d7748d155310abca4a1f9d5481" ]
[ "test/jax/test_vjp_batched.py" ]
[ "import pytest\n\nimport jax\nimport netket as nk\nimport numpy as np\nfrom functools import partial\n\n\[email protected](\"jit\", [False, True])\[email protected](\"batch_size\", [None, 16, 10000, 1000000])\[email protected](\"return_forward\", [False, True])\[email protected](\"batch_argnums\", [1, (1,)])\[email protected](\"nondiff_argnums\", [1, (1,)])\ndef test_vjp_batched(batch_size, jit, return_forward, batch_argnums, nondiff_argnums):\n @partial(jax.vmap, in_axes=(None, 0))\n def f(p, x):\n return jax.lax.log(p.dot(jax.lax.sin(x)))\n\n k = jax.random.split(jax.random.PRNGKey(123), 4)\n p = jax.random.uniform(k[0], shape=(8,))\n X = jax.random.uniform(k[2], shape=(10000, 8))\n w = jax.random.uniform(k[3], shape=(10000,))\n\n vjp_fun_batched = nk.jax.vjp_batched(\n f,\n p,\n X,\n batch_argnums=batch_argnums,\n batch_size=batch_size,\n nondiff_argnums=nondiff_argnums,\n return_forward=return_forward,\n )\n y_expected, vjp_fun = jax.vjp(f, p, X)\n\n if jit:\n vjp_fun_batched = jax.jit(vjp_fun_batched)\n vjp_fun = jax.jit(vjp_fun)\n\n res_expected = vjp_fun(w)[:1]\n\n if return_forward:\n y, res = vjp_fun_batched(w)\n np.testing.assert_allclose(y, y_expected)\n else:\n res = vjp_fun_batched(w)\n\n np.testing.assert_allclose(res, res_expected)\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
FSEC-Photovoltaics/pvrpm-lcoe
[ "dbe0bb30ffa1041ec004f84c57aac44f47bdf6d2" ]
[ "pvrpm/core/simulation.py" ]
[ "import os\nimport time\nimport warnings\nimport multiprocessing as mp\nfrom typing import List\n\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nfrom pvrpm.core.enums import ConfigKeys as ck\nfrom pvrpm.core.case import SamCase\nfrom pvrpm.core.components import Components\nfrom pvrpm.core.utils import summarize_dc_energy, component_degradation\nfrom pvrpm.core.logger import logger\n\n\ndef cf_interval(alpha: float, std: float, num_samples: int) -> float:\n \"\"\"\n Calculates the two tails margin of error given the desired input. The margin of error is the value added and subtracted by the sample mean to obtain the confidence interval\n\n Sample sizes less then equal to 30 use t score, greater then 30 use z score\n\n Args:\n alpha (float): The significance level for the interval\n std (float): The standard deviation of the data\n num_samples (int): The number of samples in the data\n\n Returns:\n float: The margin of error\n \"\"\"\n # two tails\n alpha = alpha / 2\n\n if num_samples > 30:\n score = stats.norm.ppf(alpha)\n else:\n score = stats.t.ppf(1 - alpha, num_samples - 1)\n\n return score * std / np.sqrt(num_samples)\n\n\ndef simulate_day(case: SamCase, comp: Components, day: int):\n \"\"\"\n Updates and increments the simulation by a day, performing all neccesary component updates.\n\n Args:\n case (:obj:`SamCase`): The current Sam Case of the simulation\n comp (:obj:`Components`): The components class containing all the outputs for this simulation\n day (int): Current day in the simulation\n \"\"\"\n # static monitoring starts the day, if available. This is updated independently of component levels\n comp.update_indep_monitor(day)\n\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n df = comp.comps[c]\n # if component can't fail, just continue\n if case.config[c][ck.CAN_FAIL]:\n # decrement time to failures for operational modules\n # fail components when their time has come\n comp.update_fails(c, day)\n\n # update monitoring\n comp.update_monitor(c, day)\n\n if case.config[c][ck.CAN_REPAIR]:\n # repair components when they are done and can be repaired\n comp.update_repairs(c, day)\n\n if case.config[c].get(ck.WARRANTY, None):\n df[\"time_left_on_warranty\"] -= 1\n\n # availability\n if c == ck.GRID:\n # for the grid only, the availability is based on the full 24-hour day.\n df.loc[df[\"state\"] == 0, \"avail_downtime\"] += 24\n else:\n # else, use the sun hours for this day\n df.loc[df[\"state\"] == 0, \"avail_downtime\"] += case.daylight_hours[day % 365]\n\n # module can still degrade even if it cant fail\n if case.config[c].get(ck.DEGRADE, None):\n df[\"days_of_degradation\"] += 1\n df[\"degradation_factor\"] = [\n component_degradation(case.config[c][ck.DEGRADE] / 365, d) for d in df[\"days_of_degradation\"]\n ]\n\n\ndef run_system_realization(\n case: SamCase, seed: bool = False, realization_num: int = 0, progress_bar: bool = False, debug: int = 0,\n) -> Components:\n \"\"\"\n Run a full realization for calculating costs\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n seed (bool, Optional): Whether to seed the random number generator, for multiprocessing\n realization_num (int, Optional): Current realization number, used for multiprocessing\n progress_bar (bool, Optional): Whether to display progress bar during the realization\n debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)\n\n Returns:\n :obj:`Components`: The components object which contains all the data for this realization\n \"\"\"\n if seed:\n np.random.seed()\n\n # data storage\n comp = Components(case)\n lifetime = case.config[ck.LIFETIME_YRS]\n\n if case.config[ck.TRACKING]:\n comp.tracker_power_loss_factor[0] = 1\n comp.tracker_availability[0] = 1\n\n # initial timestep\n comp.module_degradation_factor[0] = comp.current_degradation()\n comp.dc_power_availability[0] = comp.dc_availability()\n comp.ac_power_availability[0] = comp.ac_availability()\n\n if progress_bar:\n iterator = tqdm(\n range(1, lifetime * 365),\n ascii=True,\n desc=f\"Running realization {realization_num}\",\n unit=\"day\",\n position=mp.current_process()._identity[0],\n leave=False,\n )\n else:\n logger.info(f\"Running realization {realization_num}...\")\n iterator = range(1, lifetime * 365)\n\n for i in iterator:\n # calculate new labor rate each year\n if i == 1 or i % 365 == 0:\n year = np.floor(i / 365)\n inflation = np.power(1 + case.config[ck.INFLATION] / 100, year)\n comp.update_labor_rates(case.config[ck.LABOR_RATE] * inflation)\n # Decided to remove since it doesnt make sense for only trackers to rise with inflation and not\n # all other failures. Plus, this was broken.\n # need to store original cost of tracker failures for each failure and increase based on that cost\n # also need to take in concurrent failures\n # if case.config[ck.TRACKING]:\n # for fail in case.config[ck.TRACKER][ck.FAILURE].keys():\n # case.config[ck.TRACKER][ck.FAILURE][fail][ck.COST] *= inflation\n\n # save state if debugging\n if debug > 0 and i % debug == 0:\n state_dict = comp.snapshot()\n folder = f\"debug_day_{i}\"\n save_path = os.path.join(case.config[ck.RESULTS_FOLDER], folder)\n os.makedirs(save_path, exist_ok=True)\n for key, val in state_dict.items():\n val.to_csv(os.path.join(save_path, f\"{key}_state.csv\"), index=True)\n\n # timestep is applied each day\n simulate_day(case, comp, i)\n\n if case.config[ck.TRACKING]:\n comp.tracker_availability[i], comp.tracker_power_loss_factor[i] = comp.tracker_power_loss(i)\n\n comp.module_degradation_factor[i] = comp.current_degradation()\n comp.dc_power_availability[i] = comp.dc_availability()\n comp.ac_power_availability[i] = comp.ac_availability()\n\n # create same performance adjustment tables for avail, degradation, tracker losses\n if case.config[ck.TRACKING]:\n daily_dc_loss = 100 * (\n 1 - (comp.dc_power_availability * comp.module_degradation_factor * comp.tracker_power_loss_factor)\n )\n else:\n daily_dc_loss = 100 * (1 - (comp.dc_power_availability * comp.module_degradation_factor))\n\n daily_ac_loss = 100 * (1 - comp.ac_power_availability)\n\n case.value(\"en_dc_lifetime_losses\", 1)\n case.value(\"dc_lifetime_losses\", list(daily_dc_loss))\n\n case.value(\"en_ac_lifetime_losses\", 1)\n case.value(\"ac_lifetime_losses\", list(daily_ac_loss))\n\n o_m_yearly_costs = np.zeros(lifetime)\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n comp_yearly_cost = np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1)\n o_m_yearly_costs += comp_yearly_cost\n\n case.value(\"om_fixed\", list(o_m_yearly_costs))\n\n case.simulate()\n\n # add the results of the simulation to the components class and return\n comp.timeseries_dc_power = case.output(\"dc_net\")\n comp.timeseries_ac_power = case.value(\"gen\")\n comp.lcoe = case.output(\"lcoe_real\")\n comp.npv = case.get_npv()\n\n # remove the first element from cf_energy_net because it is always 0, representing year 0\n comp.annual_energy = np.array(case.output(\"cf_energy_net\")[1:])\n\n # more results, for graphing and what not\n try:\n comp.tax_cash_flow = case.output(\"cf_after_tax_cash_flow\")\n except AttributeError:\n comp.tax_cash_flow = case.output(\"cf_pretax_cashflow\")\n\n for loss in ck.losses:\n try:\n comp.losses[loss] = case.output(loss)\n except:\n comp.losses[loss] = 0\n\n return comp\n\n\ndef gen_results(case: SamCase, results: List[Components]) -> List[pd.DataFrame]:\n \"\"\"\n Generates results for the given SAM case and list of component objects containing the results of each realization.\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n results (:obj:`list(Components)`): List of component objects that contain the results for each realization\n\n Returns:\n :obj:`list(pd.DataFrame)`: List of dataframes containing the results.\n\n Note:\n The order of the returned dataframes is:\n - Summary Results\n - Degradation Results\n - DC Power\n - AC Power\n - Yearly Costs\n \"\"\"\n summary_index = [\"Base Case\"]\n summary_data = {\"lcoe\": [case.base_lcoe], \"npv\": [case.base_npv]}\n lifetime = case.config[ck.LIFETIME_YRS]\n p_vals = [99, 95, 90, 75, 50, 10]\n\n # ac energy\n cumulative_ac_energy = np.cumsum(case.base_annual_energy)\n\n for i in range(int(lifetime)):\n summary_data[f\"annual_ac_energy_{i+1}\"] = [case.base_annual_energy[i]]\n # split up so the order of columns is nicer\n for i in range(int(lifetime)):\n summary_data[f\"cumulative_ac_energy_{i+1}\"] = [cumulative_ac_energy[i]]\n\n # dc energy\n for i in range(len(case.base_dc_energy)):\n summary_data[f\"dc_energy_{i+1}\"] = [case.base_dc_energy[i]]\n\n # TODO: also, need to clean this up, i just use dictionaries and fill in blanks for base case, but this can be much cleaner\n # per realization results\n day_index = np.arange(lifetime * 365) + 1\n timeseries_index = np.arange(len(results[0].timeseries_dc_power))\n year_index = np.arange(lifetime) + 1\n yearly_cost_index = []\n degradation_data = {}\n timeseries_dc_data = {}\n timeseries_ac_data = {}\n yearly_cost_data = {}\n yearly_fail_data = {}\n for i, comp in enumerate(results):\n # daily degradation\n degradation_data[f\"Realization {i+1}\"] = comp.module_degradation_factor\n\n # power\n timeseries_dc_data[f\"Realization {i+1}\"] = comp.timeseries_dc_power\n timeseries_ac_data[f\"Realization {i+1}\"] = comp.timeseries_ac_power\n\n # yearly cost and total fails for each component\n yearly_cost_index.append(f\"Realization {i+1}\")\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n if c not in yearly_cost_data:\n yearly_cost_data[c] = []\n if c not in yearly_fail_data:\n yearly_fail_data[c] = []\n\n yearly_cost_data[c] += list(np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1))\n # add total fails per year for each failure mode for this component level\n total_fails = np.zeros(lifetime * 365)\n for f in comp.summarize_failures(c).values():\n total_fails += f\n yearly_fail_data[c] += list(np.sum(np.reshape(total_fails, (lifetime, 365)), axis=1))\n\n # summary\n summary_index.append(f\"Realization {i+1}\")\n summary_data[\"lcoe\"] += [comp.lcoe]\n summary_data[\"npv\"] += [comp.npv]\n\n # ac energy\n # remove the first element from cf_energy_net because it is always 0, representing year 0\n cumulative_ac_energy = np.cumsum(comp.annual_energy)\n\n for i in range(int(lifetime)):\n summary_data[f\"annual_ac_energy_{i+1}\"] += [comp.annual_energy[i]]\n summary_data[f\"cumulative_ac_energy_{i+1}\"] += [cumulative_ac_energy[i]]\n\n # dc energy\n dc_energy = summarize_dc_energy(comp.timeseries_dc_power, lifetime)\n for i in range(len(dc_energy)):\n summary_data[f\"dc_energy_{i+1}\"] += [dc_energy[i]]\n\n # calculate total failures, availability, mttr, mtbf, etc\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n if f\"{c}_total_failures\" not in summary_data:\n summary_data[f\"{c}_total_failures\"] = [None] # no failures for base case\n if f\"{c}_mtbf\" not in summary_data:\n summary_data[f\"{c}_mtbf\"] = [None]\n if f\"{c}_mttr\" not in summary_data:\n summary_data[f\"{c}_mttr\"] = [None]\n if f\"{c}_mttd\" not in summary_data:\n summary_data[f\"{c}_mttd\"] = [None]\n\n if case.config[c][ck.CAN_FAIL]:\n sum_fails = comp.comps[c][\"cumulative_failures\"].sum()\n summary_data[f\"{c}_total_failures\"] += [sum_fails]\n for fail in case.config[c].get(ck.FAILURE, {}).keys():\n if f\"{c}_failures_by_type_{fail}\" not in summary_data:\n summary_data[f\"{c}_failures_by_type_{fail}\"] = [None]\n summary_data[f\"{c}_failures_by_type_{fail}\"] += [comp.comps[c][f\"failure_by_type_{fail}\"].sum()]\n\n # partial failures\n for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():\n if f\"{c}_failures_by_type_{fail}\" not in summary_data:\n summary_data[f\"{c}_failures_by_type_{fail}\"] = [None]\n summary_data[f\"{c}_failures_by_type_{fail}\"] += [comp.comps[c][f\"failure_by_type_{fail}\"].sum()]\n\n # if the component had no failures, set everything here and continue\n if sum_fails == 0:\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365]\n summary_data[f\"{c}_mttr\"] += [0]\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n # mean time between failure\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / sum_fails]\n\n # mean time to repair\n if case.config[c][ck.CAN_REPAIR]:\n # take the number of fails minus whatever components have not been repaired by the end of the simulation to get the number of repairs\n sum_repairs = sum_fails - len(comp.comps[c].loc[(comp.comps[c][\"state\"] == 0)])\n if sum_repairs > 0:\n summary_data[f\"{c}_mttr\"] += [comp.total_repair_time[c] / sum_repairs]\n else:\n summary_data[f\"{c}_mttr\"] += [0]\n else:\n summary_data[f\"{c}_mttr\"] += [0]\n\n # mean time to detection (mean time to acknowledge)\n if (\n case.config[c][ck.CAN_MONITOR]\n or case.config[c].get(ck.COMP_MONITOR, None)\n or case.config[c].get(ck.INDEP_MONITOR, None)\n ):\n # take the number of fails minus the components that have not been repaired and also not be detected by monitoring\n mask = (comp.comps[c][\"state\"] == 0) & (comp.comps[c][\"time_to_detection\"] > 1)\n sum_monitor = sum_fails - len(comp.comps[c].loc[mask])\n if sum_monitor > 0:\n summary_data[f\"{c}_mttd\"] += [comp.total_monitor_time[c] / sum_monitor]\n else:\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n # mean time between failure\n summary_data[f\"{c}_total_failures\"] += [0]\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365]\n summary_data[f\"{c}_mttr\"] += [0]\n summary_data[f\"{c}_mttd\"] += [0]\n\n # availability\n if f\"{c}_availability\" not in summary_data:\n summary_data[f\"{c}_availability\"] = [None]\n summary_data[f\"{c}_availability\"] += [\n (\n 1\n - (comp.comps[c][\"avail_downtime\"].sum() / (lifetime * case.annual_daylight_hours))\n / case.config[c][ck.NUM_COMPONENT]\n )\n ]\n\n # generate dataframes\n summary_results = pd.DataFrame(index=summary_index, data=summary_data)\n summary_results.index.name = \"Realization\"\n # reorder columns for summary results\n reorder = list(summary_results.columns[0:2]) # lcoe and npv\n reorder += list(summary_results.columns[lifetime * 3 + 2 :]) # failures and avail\n reorder += list(summary_results.columns[2 : lifetime * 3 + 2]) # energy\n summary_results = summary_results[reorder]\n\n degradation_results = pd.DataFrame(index=day_index, data=degradation_data)\n dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)\n ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)\n dc_power_results.index.name = \"Hour\"\n ac_power_results.index.name = \"Hour\"\n degradation_results.index.name = \"Day\"\n\n cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=[\"Realization\", \"Year\"])\n yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)\n yearly_cost_results[\"total\"] = yearly_cost_results.sum(axis=1)\n\n # fails per year, same multi index as cost\n yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)\n yearly_fail_results[\"total\"] = yearly_fail_results.sum(axis=1)\n\n stats_append = []\n summary_no_base = summary_results.iloc[1:]\n min = summary_no_base.min()\n min.name = \"min\"\n stats_append.append(min)\n\n max = summary_no_base.max()\n max.name = \"max\"\n stats_append.append(max)\n\n mean = summary_no_base.mean()\n mean.name = \"mean\"\n stats_append.append(mean)\n\n median = summary_no_base.median()\n median.name = \"median\"\n stats_append.append(median)\n\n std = summary_no_base.std()\n std.name = \"stddev\"\n stats_append.append(std)\n\n conf_interval = case.config[ck.CONF_INTERVAL]\n conf_int = cf_interval(1 - (conf_interval / 100), std, case.config[ck.NUM_REALIZATION])\n\n lower_conf = mean - conf_int\n lower_conf.name = f\"{conf_interval}% lower confidence interval of mean\"\n stats_append.append(lower_conf)\n\n upper_conf = mean + conf_int\n upper_conf.name = f\"{conf_interval}% upper confidence interval of mean\"\n stats_append.append(upper_conf)\n\n # p test, which is using the ppf of the normal distribituion with our calculated mean and std. We use scipy's functions for this\n # see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope\n for p in p_vals:\n values = []\n # calculate the p value for every column\n for m, s in zip(mean, std):\n if s != 0: # for columns with no STDDEV\n values.append(stats.norm.ppf((1 - p / 100), loc=m, scale=s))\n else:\n values.append(None)\n # save results\n values = pd.Series(values, index=mean.index)\n values.name = f\"P{p}\"\n stats_append.append(values)\n\n # since pandas wants to depercate append, gotta convert series into dataframes\n summary_results = pd.concat([summary_results, *[s.to_frame().transpose() for s in stats_append]])\n\n return [\n summary_results,\n degradation_results,\n dc_power_results,\n ac_power_results,\n yearly_cost_results,\n yearly_fail_results,\n ]\n\n\ndef graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:\n \"\"\"\n Generate graphs from a list of Component objects from each realization\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n results (:obj:`list(Components)`): List of component objects that contain the results for each realization\n save_path (str, Optional): Path to save graphs to, if provided\n \"\"\"\n lifetime = case.config[ck.LIFETIME_YRS]\n colors = [\n \"r\",\n \"g\",\n \"b\",\n \"c\",\n \"m\",\n \"y\",\n \"k\",\n \"tab:orange\",\n \"tab:brown\",\n \"lime\",\n \"tab:gray\",\n \"indigo\",\n \"navy\",\n \"pink\",\n \"coral\",\n \"yellow\",\n \"teal\",\n \"fuchsia\",\n \"palegoldenrod\",\n \"darkgreen\",\n ]\n # base case data to compare to\n base_losses = case.base_losses\n base_load = np.array(case.base_load) if case.base_load is not None else None\n base_ac_energy = np.array(case.base_ac_energy)\n base_annual_energy = np.array(case.base_annual_energy)\n base_tax_cash_flow = np.array(case.base_tax_cash_flow)\n\n # parse data\n avg_ac_energy = np.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file\n avg_annual_energy = np.zeros(lifetime)\n avg_losses = np.zeros(len(ck.losses))\n avg_tax_cash_flow = np.zeros(lifetime + 1) # add 1 for year 0\n avg_failures = np.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components\n\n # computing the average across every realization\n for comp in results:\n avg_ac_energy += np.array(comp.timeseries_ac_power)\n avg_annual_energy += np.array(comp.annual_energy)\n avg_losses += np.array(list(comp.losses.values()))\n avg_tax_cash_flow += np.array(comp.tax_cash_flow)\n for i, c in enumerate(ck.component_keys):\n if not case.config.get(c, None):\n continue\n for f in comp.summarize_failures(c).values():\n avg_failures[i] += f\n\n # monthly and annual energy\n avg_ac_energy /= len(results)\n avg_annual_energy /= len(results)\n avg_losses /= len(results)\n avg_tax_cash_flow /= len(results)\n avg_failures /= len(results)\n\n # sum up failures to be per year\n avg_failures = np.sum(np.reshape(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)\n # determine the frequency of the data, same as frequncy of supplied weather file\n total = int(len(avg_ac_energy) / lifetime)\n if total == 8760:\n freq = 1\n else:\n freq = 0\n while total > 8760:\n freq += 1\n total /= freq\n\n avg_ac_energy = np.reshape(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour\n avg_ac_energy = np.sum(avg_ac_energy, axis=0) / lifetime # yearly energy average\n avg_ac_energy = np.reshape(avg_ac_energy, (365, 24)) # day energy by hour\n avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation\n avg_ac_energy = np.sum(avg_ac_energy, axis=1) # energy per day\n\n base_ac_energy = np.reshape(base_ac_energy[0::freq], (lifetime, 8760))\n base_ac_energy = np.sum(base_ac_energy, axis=0) / lifetime\n base_ac_energy = np.reshape(base_ac_energy, (365, 24))\n base_day_energy_by_hour = base_ac_energy.copy() # copy for heatmap yearly energy generation\n base_ac_energy = np.sum(base_ac_energy, axis=1)\n\n # daily load, load is the same between realizations and base\n if base_load is not None:\n base_load = np.reshape(base_load, (365, 24))\n base_load = np.sum(base_load, axis=1)\n\n avg_losses = {k: v for k, v in zip(ck.losses, avg_losses)} # create losses dictionary\n\n # calculate per month energy averaged across every year on every realization\n current_month = datetime(datetime.utcnow().year, 1, 1)\n # relative deltas allow dynamic month lengths such that each month has the proper number of days\n delta = relativedelta(months=1)\n start = 0\n monthly_energy = {}\n monthly_load = {}\n base_monthly_energy = {}\n for _ in range(12):\n month = current_month.strftime(\"%b\")\n num_days = ((current_month + delta) - current_month).days # number of days in this month\n\n monthly_energy[month] = np.sum(avg_ac_energy[start : start + num_days])\n base_monthly_energy[month] = np.sum(base_ac_energy[start : start + num_days])\n\n if base_load is not None:\n monthly_load[month] = np.sum(base_load[start : start + num_days])\n\n current_month += delta\n start += num_days\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n ax1.bar(list(monthly_energy.keys()), list(monthly_energy.values()))\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Month\")\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(list(monthly_energy.keys()), list(base_monthly_energy.values()))\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Month\")\n ax2.set_ylabel(\"kWh\")\n\n fig.suptitle(\"Monthly Energy Production\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Monthly Energy Production.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # graph the monthly energy against the monthly load\n if base_load is not None:\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n ind = np.arange(len(monthly_energy))\n ax1.bar(ind - 0.2, list(monthly_energy.values()), width=0.4, label=\"AC Energy\")\n ax1.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color=\"tab:gray\", label=\"Electricity Load\")\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Month\")\n ax1.set_xticks(ind)\n ax1.set_xticklabels(labels=list(monthly_energy.keys()))\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(ind - 0.2, list(base_monthly_energy.values()), width=0.4)\n ax2.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color=\"tab:gray\")\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Month\")\n ax2.set_xticks(ind)\n ax2.set_xticklabels(labels=list(monthly_energy.keys()))\n ax2.set_ylabel(\"kWh\")\n\n fig.legend()\n fig.suptitle(\"Monthly Energy and Load\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Monthly Energy and Load.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n # add 1 to have years 1->25\n ax1.bar(np.arange(lifetime) + 1, avg_annual_energy)\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(np.arange(lifetime) + 1, base_annual_energy)\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"kWh\")\n\n fig.suptitle(\"Annual Energy Production\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Annual Energy Production.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # this helper function just makes it easier since the base case requires this as well\n def gen_loss_data(losses):\n # losses\n loss_data = {\n \"POA front-side shading loss\": losses[\"annual_poa_shading_loss_percent\"],\n \"POA front-side soiling loss\": losses[\"annual_poa_soiling_loss_percent\"],\n \"POA front-side reflection (IAM) loss\": losses[\"annual_poa_cover_loss_percent\"],\n \"DC module deviation from STC\": losses[\"annual_dc_module_loss_percent\"],\n \"DC inverter MPPT clipping loss\": losses[\"annual_dc_mppt_clip_loss_percent\"],\n \"DC mismatch loss\": losses[\"annual_dc_mismatch_loss_percent\"],\n \"DC diodes and connections loss\": losses[\"annual_dc_diodes_loss_percent\"],\n \"DC wiring loss\": losses[\"annual_dc_wiring_loss_percent\"],\n \"DC tracking loss\": losses[\"annual_dc_tracking_loss_percent\"],\n \"DC nameplate loss\": losses[\"annual_dc_nameplate_loss_percent\"],\n \"DC power optimizer loss\": losses[\"annual_dc_optimizer_loss_percent\"],\n \"DC performance adjustment loss\": losses[\"annual_dc_perf_adj_loss_percent\"],\n \"AC inverter power clipping loss\": losses[\"annual_ac_inv_clip_loss_percent\"],\n \"AC inverter power consumption loss\": losses[\"annual_ac_inv_pso_loss_percent\"],\n \"AC inverter night tare loss\": losses[\"annual_ac_inv_pnt_loss_percent\"],\n \"AC inverter efficiency loss\": losses[\"annual_ac_inv_eff_loss_percent\"],\n \"AC wiring loss\": losses[\"ac_loss\"],\n \"Transformer loss percent\": losses[\"annual_xfmr_loss_percent\"],\n \"AC performance adjustment loss\": losses[\"annual_ac_perf_adj_loss_percent\"],\n \"AC transmission loss\": losses[\"annual_transmission_loss_percent\"],\n }\n\n return loss_data\n\n loss_data = gen_loss_data(avg_losses)\n base_loss_data = gen_loss_data(base_losses)\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n for i, (k, c) in enumerate(zip(sorted(list(loss_data.keys())), colors)):\n ax1.bar(i, loss_data[k], width=0.3, color=c, label=k)\n ax2.bar(i, base_loss_data[k], width=0.3, color=c)\n\n ax1.set_title(\"Realization Average\")\n ax2.set_title(\"Base Case\")\n\n # remove x axis labels\n ax1.xaxis.set_visible(False)\n ax2.xaxis.set_visible(False)\n\n ax1.set_ylabel(\"Percent\")\n ax2.set_ylabel(\"Percent\")\n\n fig.legend(bbox_to_anchor=(0.8, 0.0, 0.5, 0.5))\n fig.suptitle(\"Annual Energy Loss\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Annual Energy Loss.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # heatmap of ac energy averaged throughout each year\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n # calculate the min/max value of the base case and realizations for coloring consistency\n vmin = np.amin([np.amin(avg_day_energy_by_hour), np.amin(base_day_energy_by_hour)])\n vmax = np.amax([np.amax(avg_day_energy_by_hour), np.amax(base_day_energy_by_hour)])\n\n # transpose so the x axis is day\n cb = ax1.pcolormesh(\n np.arange(365), np.arange(24), avg_day_energy_by_hour.T, cmap=\"plasma\", vmin=vmin, vmax=vmax, shading=\"auto\"\n )\n ax2.pcolormesh(\n np.arange(365), np.arange(24), base_day_energy_by_hour.T, cmap=\"plasma\", vmin=vmin, vmax=vmax, shading=\"auto\"\n )\n\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Day\")\n ax1.set_ylabel(\"Hour\")\n\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Day\")\n ax2.set_ylabel(\"Hour\")\n\n fig.suptitle(\"Yearly System Power Generated (kW)\")\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([1, 0.15, 0.05, 0.7])\n fig.colorbar(cb, cax=cbar_ax)\n\n with warnings.catch_warnings(): # matplotlib sucks\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n\n if save_path:\n plt.savefig(os.path.join(save_path, \"Yearly System Power Generated.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n ax1.bar(np.arange(lifetime + 1), avg_tax_cash_flow)\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"USD\")\n\n ax2.bar(np.arange(lifetime + 1), base_tax_cash_flow)\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"USD\")\n\n # determine if stored value is pretax or after tax cash flow, depending on financial model\n flow = None\n try:\n case.output(\"cf_after_tax_cash_flow\")\n flow = \"After\"\n except AttributeError:\n case.output(\"cf_pretax_cashflow\")\n flow = \"Pre\"\n\n fig.suptitle(f\"{flow} Tax Cash Flow for System Lifetime\")\n fig.tight_layout()\n if save_path:\n plt.savefig(\n os.path.join(save_path, f\"{flow} Tax Cash Flow for System Lifetime.png\"), bbox_inches=\"tight\", dpi=200\n )\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # box plot for lcoe\n lcoe = np.array([comp.lcoe for comp in results])\n plt.boxplot(lcoe, vert=True, labels=[\"LCOE\"])\n plt.title(\"LCOE Box Plot for Realizations\")\n plt.ylabel(\"LCOE (cents/kWh)\")\n plt.tight_layout()\n\n if save_path:\n plt.savefig(os.path.join(save_path, \"LCOE Box Plot.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # number of failures per component per year averaged across the realizations\n for i, c in enumerate(ck.component_keys):\n if not case.config.get(c, None) or np.count_nonzero(avg_failures[i]) == 0:\n continue\n plt.plot(np.arange(lifetime) + 1, avg_failures[i], marker=\"o\", markersize=5, color=colors[i])\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of Failures\")\n plt.title(f\"Number of failures for {c} per year\")\n plt.tight_layout()\n if save_path:\n plt.savefig(\n os.path.join(save_path, f\"{c.capitalize()} Failures Per Year.png\"), bbox_inches=\"tight\", dpi=200\n )\n else:\n plt.show()\n\n plt.close()\n\n # plot total number of failures\n plt.plot(\n np.arange(lifetime) + 1, np.sum(avg_failures, axis=0).T, label=\"total\", marker=\"o\", markersize=5, color=\"lime\"\n )\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of Failures\")\n plt.title(f\"Total number of failures per year\")\n plt.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, f\"Total Failures Per Year.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close()\n\n\ndef pvrpm_sim(\n case: SamCase,\n save_results: bool = False,\n save_graphs: bool = False,\n progress_bar: bool = False,\n debug: int = 0,\n threads: int = 1,\n) -> List[Components]:\n \"\"\"\n Run the PVRPM simulation on a specific case. Results will be saved to the folder specified in the configuration.\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n save_results (bool, Optional): Whether to save output csv results\n save_graphs (bool, Optional): Whether to save output graphs\n progress_bar (bool, Optional): Whether to display progress bar for each realization\n debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)\n threads (int, Optional): Number of threads to use for paralizing realizations\n\n Returns:\n :obj:`list(Components)`: Returns the list of results Component objects for each realization\n \"\"\"\n # tqdm multiprocessing setup\n mp.freeze_support() # for Windows support\n tqdm.set_lock(mp.RLock()) # for managing output contention\n\n save_path = case.config[ck.RESULTS_FOLDER]\n lifetime = case.config[ck.LIFETIME_YRS]\n if threads <= -1:\n threads = mp.cpu_count()\n elif threads == 0:\n threads = 1\n\n logger.info(\"Running base case simulation...\")\n start = time.time()\n case.base_case_sim()\n logger.info(\"Base case simulation took: {:.2f} seconds\".format(time.time() - start))\n\n # realize what we are doing in life\n results = []\n args = [(case, True, i + 1, progress_bar, debug) for i in range(case.config[ck.NUM_REALIZATION])]\n with mp.Pool(threads, initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),)) as p:\n results = p.starmap(run_system_realization, args)\n\n logger.info(\"Generating results...\")\n\n # gen all those results\n (\n summary_results,\n degradation_results,\n dc_power_results,\n ac_power_results,\n yearly_cost_results,\n yearly_fail_results,\n ) = gen_results(case, results,)\n\n # finally, graph results\n if save_graphs:\n graph_results(case, results, save_path=save_path)\n logger.info(f\"Graphs saved to {save_path}\")\n else:\n graph_results(case, results)\n\n # save results\n if save_results:\n summary_results.to_csv(os.path.join(save_path, \"PVRPM_Summary_Results.csv\"), index=True)\n degradation_results.to_csv(os.path.join(save_path, \"Daily_Degradation.csv\"), index=True)\n dc_power_results.to_csv(os.path.join(save_path, \"Timeseries_DC_Power.csv\"), index=True)\n ac_power_results.to_csv(os.path.join(save_path, \"Timeseries_AC_Power.csv\"), index=True)\n yearly_cost_results.to_csv(os.path.join(save_path, \"Yearly_Costs_By_Component.csv\"), index=True)\n yearly_fail_results.to_csv(os.path.join(save_path, \"Yearly_Failures_By_Component.csv\"), index=True)\n logger.info(f\"Results saved to {save_path}\")\n\n return results\n" ]
[ [ "numpy.sum", "pandas.Series", "scipy.stats.norm.ppf", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "matplotlib.pyplot.ylabel", "numpy.amax", "matplotlib.pyplot.boxplot", "numpy.reshape", "matplotlib.pyplot.title", "numpy.zeros", "scipy.stats.t.ppf", "pandas.MultiIndex.from_product", "matplotlib.pyplot.subplots", "numpy.count_nonzero", "numpy.arange", "numpy.power", "matplotlib.pyplot.close", "numpy.array", "numpy.cumsum", "pandas.DataFrame", "numpy.floor", "numpy.amin", "matplotlib.pyplot.show", "numpy.sqrt", "matplotlib.pyplot.xlabel" ] ]
tempcyc/networkx
[ "cae83ba501c242567cb2454f97f851898276f06e" ]
[ "networkx/linalg/laplacianmatrix.py" ]
[ "\"\"\"Laplacian matrix of graphs.\n\"\"\"\n# Copyright (C) 2004-2013 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n__author__ = \"\\n\".join(['Aric Hagberg <[email protected]>',\n 'Pieter Swart ([email protected])',\n 'Dan Schult ([email protected])',\n 'Alejandro Weinstein <[email protected]>'])\n__all__ = ['laplacian_matrix',\n 'normalized_laplacian_matrix',\n 'directed_laplacian_matrix']\n\n@not_implemented_for('directed')\ndef laplacian_matrix(G, nodelist=None, weight='weight'):\n \"\"\"Return the Laplacian matrix of G.\n\n The graph Laplacian is the matrix L = D - A, where\n A is the adjacency matrix and D is the diagonal matrix of node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : SciPy sparse matrix\n The Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges weights are summed.\n\n See Also\n --------\n to_numpy_matrix\n normalized_laplacian_matrix\n \"\"\"\n import scipy.sparse\n if nodelist is None:\n nodelist = G.nodes()\n A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n format='csr')\n n,m = A.shape\n diags = A.sum(axis=1)\n D = scipy.sparse.spdiags(diags.flatten(), [0], m, n, format='csr')\n return D - A\n\n@not_implemented_for('directed')\ndef normalized_laplacian_matrix(G, nodelist=None, weight='weight'):\n r\"\"\"Return the normalized Laplacian matrix of G.\n\n The normalized graph Laplacian is the matrix\n\n .. math::\n\n NL = D^{-1/2} L D^{-1/2}\n\n where `L` is the graph Laplacian and `D` is the diagonal matrix of\n node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : NumPy matrix\n The normalized Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges weights are summed.\n See to_numpy_matrix for other options.\n\n If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is\n the adjencency matrix [2]_.\n\n See Also\n --------\n laplacian_matrix\n\n References\n ----------\n .. [1] Fan Chung-Graham, Spectral Graph Theory,\n CBMS Regional Conference Series in Mathematics, Number 92, 1997.\n .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized\n Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,\n March 2007.\n \"\"\"\n import scipy\n import scipy.sparse\n if nodelist is None:\n nodelist = G.nodes()\n A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n format='csr')\n # the convention for normalized Laplacian is to not count self loops\n # twice in the diagonal. So we remove one here.\n for n,_ in G.selfloop_edges():\n A[n,n] -= 1\n n,m = A.shape\n diags = A.sum(axis=1).flatten()\n D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')\n L = D - A\n with scipy.errstate(divide='ignore'):\n diags_sqrt = 1.0/scipy.sqrt(diags)\n diags_sqrt[scipy.isinf(diags_sqrt)] = 0\n DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')\n return DH.dot(L.dot(DH))\n\n###############################################################################\n# Code based on\n# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/\n\n@not_implemented_for('undirected')\n@not_implemented_for('multigraph')\ndef directed_laplacian_matrix(G, nodelist=None, weight='weight',\n walk_type=None, alpha=0.95):\n r\"\"\"Return the directed Laplacian matrix of G.\n\n The graph directed Laplacian is the matrix\n\n .. math::\n\n L = I - (\\Phi^{1/2} P \\Phi^{-1/2} + \\Phi^{-1/2} P^T \\Phi^{1/2} ) / 2\n\n where `I` is the identity matrix, `P` is the transition matrix of the\n graph, and `\\Phi` a matrix with the Perron vector of `P` in the diagonal and\n zeros elsewhere.\n\n Depending on the value of walk_type, `P` can be the transition matrix\n induced by a random walk, a lazy random walk, or a random walk with\n teleportation (PageRank).\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n walk_type : string or None, optional (default=None)\n If None, `P` is selected depending on the properties of the\n graph. Otherwise is one of 'random', 'lazy', or 'pagerank'\n\n alpha : real\n (1 - alpha) is the teleportation probability used with pagerank\n\n Returns\n -------\n L : NumPy array\n Normalized Laplacian of G.\n\n Raises\n ------\n NetworkXError\n If NumPy cannot be imported\n\n NetworkXNotImplemnted\n If G is not a DiGraph\n\n Notes\n -----\n Only implemented for DiGraphs\n\n See Also\n --------\n laplacian_matrix\n\n References\n ----------\n .. [1] Fan Chung (2005).\n Laplacians and the Cheeger inequality for directed graphs.\n Annals of Combinatorics, 9(1), 2005\n \"\"\"\n import scipy as sp\n from scipy.sparse import identity, spdiags, linalg\n if walk_type is None:\n if nx.is_strongly_connected(G):\n if nx.is_aperiodic(G):\n walk_type = \"random\"\n else:\n walk_type = \"lazy\"\n else:\n walk_type = \"pagerank\"\n\n M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n dtype=float)\n n, m = M.shape\n if walk_type in [\"random\", \"lazy\"]:\n DI = spdiags(1.0/sp.array(M.sum(axis=1).flat), [0], n, n)\n if walk_type == \"random\":\n P = DI * M\n else:\n I = identity(n)\n P = (I + DI * M) / 2.0\n\n elif walk_type == \"pagerank\":\n if not (0 < alpha < 1):\n raise nx.NetworkXError('alpha must be between 0 and 1')\n # this is using a dense representation\n M = M.todense()\n # add constant to dangling nodes' row\n dangling = sp.where(M.sum(axis=1) == 0)\n for d in dangling[0]:\n M[d] = 1.0 / n\n # normalize\n M = M / M.sum(axis=1)\n P = alpha * M + (1 - alpha) / n\n else:\n raise nx.NetworkXError(\"walk_type must be random, lazy, or pagerank\")\n\n evals, evecs = linalg.eigs(P.T, k=1)\n v = evecs.flatten().real\n p = v / v.sum()\n sqrtp = sp.sqrt(p)\n Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0/sqrtp, [0], n, n)\n I = sp.identity(len(G))\n\n return I - (Q + Q.T) /2.0\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n except:\n raise SkipTest(\"NumPy not available\")\n" ]
[ [ "scipy.sqrt", "scipy.errstate", "scipy.sparse.spdiags", "scipy.isinf", "scipy.sparse.linalg.eigs", "scipy.sparse.identity" ] ]
simo-tuomisto/portfolio
[ "7f70bdfe027fcab75970e5f8a81036ca905c893b" ]
[ "Computational Nanoscience 2013 - Final project/Code/plot_data.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as mpl\nfrom matplotlib.ticker import NullFormatter\nimport sys\nfrom glob import glob\nimport re\nimport os\n\nif __name__==\"__main__\":\n\n\t# Plot these ranges\n\tplotranges = {\n\t5.0\t:\t[0.009\t,\t0.011],\n\t10.0:\t[0.007\t,\t0.011],\n\t15.0:\t[0.005\t,\t0.010],\n\t20.0:\t[0.005\t,\t0.010],\n\t25.0:\t[0.005\t,\t0.010],\n\t30.0:\t[0.005\t,\t0.010]\n\t}\n\t\n\tcolormap_f={\n\t0.005 \t: 'b',\n\t0.006\t: 'c',\n\t0.007\t: 'g',\n\t0.008\t: 'y',\n\t0.009\t: 'r',\n\t0.010\t: 'm'\n\t}\n\t\n\tcolormap_r={\n\t5 \t: 'b',\n\t10\t: 'c',\n\t15\t: 'g',\n\t20\t: 'y',\n\t25\t: 'r',\n\t30\t: 'm'\n\t}\n\n\tstrainreg\t= re.compile('.*-r_(?P<r>.+)-f_(?P<f>.+)-t_(?P<t>.+)_strain.npy')\n\t\n\tstrainfiles = glob('*_strain.npy')\n\t\n\tavglen = 3\n\t\n\trdict = dict()\n\t\n\tfor strainfile in strainfiles:\n\t\tmatch\t= strainreg.match(strainfile)\n\t\tif match != None:\n\t\t\tstressfile = strainfile[:-10] + 'stress.npy'\n\t\t\tif os.path.exists(stressfile):\n\t\t\t\tgroups \t\t= match.groupdict()\n\t\t\t\tr\t\t\t= float(groups['r'])\n\t\t\t\tif r not in rdict:\n\t\t\t\t\trdict[r] = []\n\t\t\t\tf\t\t\t= float(groups['f'])\n\t\t\t\tt\t\t\t= int(groups['t'])\n\t\t\t\tstraindata\t= np.load(strainfile)\n\t\t\t\tstressdata\t= np.load(stressfile)\n\t\t\t\trdict[r].append([f,t,straindata,stressdata])\n\t\t\t\t\n\tmeasured_data\t= []\n\t\t\t\t\n\tfor r,dataarrays in rdict.items():\n\t\t\n\t\tif r not in plotranges:\n\t\t\tcontinue\n\t\t\n\t\tlowlimit \t= plotranges[r][0]\n\t\thighlimit \t= plotranges[r][1]\n\t\t\n\t\tfig1 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfig2 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfig3 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfor dataarray in dataarrays:\n\t\t\tf,t,straindata,stressdata = dataarray\n\t\t\tstressdata = stressdata/(np.pi*np.power(r,2))\n\t\t\tif ((f<lowlimit) or (f>highlimit)):\n\t\t\t\tcontinue\n\t\t\tavgstress\t= np.zeros_like(stressdata)\n\t\t\tfor i in np.arange(avglen,len(stressdata)-avglen-1):\n\t\t\t\tavgstress[i] = np.average(stressdata[i-avglen:i+avglen+1])\n\t\t\t#mpl.loglog(straindata, stressdata)\n\t\t\tstressmax\t= np.amax(stressdata)\n\t\t\tstrain = (straindata - straindata[0])/straindata[0]\n\t\t\tstrainmax\t= np.amax(strain)\n\t\t\tstrainrate\t= strainmax/len(strain)\n\t\t\tmeasured_data.append([r,f,stressmax,strainmax,strainrate])\n\t\t\tmpl.figure(fig1.number)\n\t\t\tmpl.plot(strain[avglen:-avglen-1], avgstress[avglen:-avglen-1], label='f=%f' % f, color=colormap_f.get(f,'k'))\n\n\t\t\tmpl.figure(fig2.number)\n\t\t\tmpl.plot(0.5*np.arange(0, len(strain)),strain, label='f=%f' % f, color=colormap_f.get(f,'k'))\n\t\t\t\n\t\t\tif (f == 0.008):\n\t\t\t\tmpl.figure(fig3.number)\n\t\t\t\tt = 0.5*np.arange(avglen, len(avgstress)+avglen)\n\t\t\t\tmpl.plot(t[avgstress>0],avgstress[avgstress>0],label='f=%f' % f, color=colormap_f.get(f,'k'))\n\t\tmpl.figure(fig1.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('strain')\n\t\tmpl.ylabel('stress')\n\t\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\tmpl.legend(loc=1)\n\t\tmpl.savefig('strain_vs_stress-r_%d.pdf' % int(r))\n\t\t\n\t\tmpl.figure(fig2.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('time')\n\t\tmpl.ylabel('strain')\n\t\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\tmpl.legend(loc=3)\n\t\tmpl.savefig('time_vs_strain-r_%d.pdf' % int(r))\n\t\t\n\t\tmpl.figure(fig3.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('time')\n\t\tmpl.ylabel('strain')\n\t\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\t#mpl.legend(loc=3)\n\t\tmpl.savefig('time_vs_stress-r_%d.pdf' % int(r))\n\t\t#break\n\t\t\n\tmeasured_data = np.asfarray(measured_data)\n\t\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstressmax = measured_data[measured_data[:,1] == f,2]\n\t\tif (f==0.009):\n\t\t\tfit \t= np.polyfit(np.log(r), np.log(stressmax), deg=1)\n\t\t\tfitr \t= r\n\t\tmpl.plot(r,stressmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,stressmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.plot(fitr,np.exp(np.polyval(fit,np.log(fitr))), label='Fit with exponent %f' % fit[0])\n\tmpl.xlabel('r')\n\tmpl.ylabel('Maximum stress')\n\tmpl.legend(loc=1)\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_stressmax.pdf')\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\t\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstressmax = measured_data[measured_data[:,1] == f,2]\n\t\tmpl.loglog(r,stressmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.loglog(r,stressmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.xlabel('r')\n\tmpl.ylabel('Maximum stress')\n\tmpl.legend(loc=4)\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_stressmax_loglog.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstressmax = measured_data[measured_data[:,0] == r,2]\n\t\tmpl.plot(f,stressmax,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,stressmax,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Maximum stress')\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=4)\n\tmpl.savefig('f_vs_stressmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstrainmax = measured_data[measured_data[:,1] == f,3]\n\t\tmpl.plot(r,strainmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,strainmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.xlabel('r')\n\tmpl.ylabel('Strain at the time of failure')\n\tmpl.legend(loc=0)\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_strainmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstrainmax = measured_data[measured_data[:,0] == r,3]\n\t\tmpl.plot(f,strainmax,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,strainmax,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Strain at the time of failure')\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=0)\n\tmpl.savefig('f_vs_strainmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstrainrate = measured_data[measured_data[:,1] == f,4]\n\t\tif (f==0.010):\n\t\t\tfit \t= np.polyfit(np.log(r), np.log(strainrate), deg=1)\n\t\t\tfitr \t= r\n\t\tmpl.plot(r,strainrate,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,strainrate,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.plot(fitr,np.exp(np.polyval(fit,np.log(fitr))), label='Fit with exponent %f' % fit[0])\n\tmpl.xlabel('r')\n\tmpl.ylabel('Strain rate')\n\tmpl.legend(loc=0)\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_strainrate.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstrainrate = measured_data[measured_data[:,0] == r,4]\n\t\tmpl.plot(f,strainrate,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,strainrate,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Strain rate')\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=3)\n\tmpl.savefig('f_vs_strainrate.pdf')\n\t" ]
[ [ "numpy.zeros_like", "numpy.load", "numpy.unique", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gca", "matplotlib.ticker.NullFormatter", "numpy.log", "matplotlib.pyplot.ylabel", "numpy.power", "numpy.amax", "numpy.average", "matplotlib.pyplot.xlabel", "numpy.asfarray" ] ]
Guangxuan-Xiao/deepsnap
[ "b0b222c8093b6273c648c51585c9ebbb2f4112fa" ]
[ "deepsnap/batch.py" ]
[ "import torch\nfrom deepsnap.graph import Graph\nfrom deepsnap.hetero_graph import HeteroGraph\nfrom typing import (\n Callable,\n Dict,\n List\n)\n\n\nclass Batch(Graph):\n r\"\"\"\n A plain old python object modeling a batch of\n :class:`deepsnap.graph.Graph` objects as one big (disconnected) graph,\n with :class:`torch_geometric.data.Data` being the\n base class, all its methods can also be used here.\n In addition, single graphs can be reconstructed via the assignment vector\n :obj:`batch`, which maps each node to its respective graph identifier.\n \"\"\"\n def __init__(self, batch=None, **kwargs):\n super(Batch, self).__init__(**kwargs)\n\n self.batch = batch\n self.__data_class__ = Graph\n self.__slices__ = None\n\n @staticmethod\n def collate(follow_batch=[], transform=None, **kwargs):\n return lambda batch: Batch.from_data_list(\n batch, follow_batch, transform, **kwargs\n )\n\n @staticmethod\n def from_data_list(\n data_list: List[Graph],\n follow_batch: List = None,\n transform: Callable = None,\n **kwargs\n ):\n r\"\"\"\n Constructs A :class:`deepsnap.batch.Batch` object from a python list\n holding :class:`torch_geometric.data.Data` objects.\n The assignment vector :obj:`batch` is created on the fly.\n Additionally, creates assignment batch vectors for each key in\n :obj:`follow_batch`.\n\n Args:\n data_list (list): A list of `deepsnap.graph.Graph` objects.\n follow_batch (list, optional): Creates assignment batch vectors\n for each key.\n transform: If apply transform when batching.\n **kwargs: Other parameters.\n \"\"\"\n if follow_batch is None:\n follow_batch = []\n if transform is not None:\n data_list = [\n data.apply_transform(\n transform,\n deep_copy=True,\n **kwargs,\n )\n for data in data_list\n ]\n # is_train is in data.keys, but it shouldn't be.\n keys = [set(data.keys) for data in data_list]\n keys = list(set.union(*keys))\n assert \"batch\" not in keys\n\n batch, cumsum = Batch._init_batch_fields(keys, follow_batch)\n batch.__data_class__ = data_list[0].__class__\n batch.batch = []\n for i, data in enumerate(data_list):\n # Note: in heterogeneous graph, __inc__ logic is different\n Batch._collate_dict(\n data, cumsum,\n batch.__slices__, batch,\n data, follow_batch, i=i\n )\n if isinstance(data, Graph):\n if isinstance(data, HeteroGraph):\n num_nodes = sum(data.num_nodes().values())\n else:\n num_nodes = data.num_nodes\n else:\n raise TypeError(\n \"element in self.graphs of unexpected type\"\n )\n if num_nodes is not None:\n item = torch.full((num_nodes, ), i, dtype=torch.long)\n batch.batch.append(item)\n\n if num_nodes is None:\n batch.batch = None\n\n Batch._dict_list_to_tensor(batch, data_list[0])\n\n return batch.contiguous()\n\n @staticmethod\n def _init_batch_fields(keys, follow_batch):\n batch = Batch()\n batch.__slices__ = {key: [0] for key in keys}\n\n for key in keys:\n batch[key] = []\n\n for key in follow_batch:\n batch[f\"{key}_batch\"] = []\n\n cumsum = {key: 0 for key in keys}\n return batch, cumsum\n\n @staticmethod\n def _collate_dict(\n curr_dict,\n cumsum: Dict[str, int],\n slices,\n batched_dict,\n graph,\n follow_batch,\n i=None\n ):\n r\"\"\" Called in from_data_list to collate a dictionary.\n This can also be applied to Graph object, since it has support for\n keys and __getitem__().\n\n Args:\n curr_dict: current dictionary to be added to the\n collated dictionary.\n cumsum: cumulative sum to be used for indexing.\n slices: a dictionary of the same structure as batched_dict,\n slices[key] indicates the indices to slice batch[key] into\n tensors for all graphs in the batch.\n batched_dict: the batched dictionary of the same structure\n as curr_dict. But all graph data are batched together.\n \"\"\"\n if isinstance(curr_dict, dict):\n keys = curr_dict.keys()\n else:\n keys = curr_dict.keys\n for key in keys:\n item = curr_dict[key]\n if isinstance(item, dict):\n # recursively collate every key in the dictionary\n if isinstance(batched_dict[key], list):\n # nested dictionary not initialized yet\n assert len(batched_dict[key]) == 0\n # initialize the nested dictionary for batch\n cumsum[key] = {inner_key: 0 for inner_key in item.keys()}\n slices[key] = {inner_key: [0] for inner_key in item.keys()}\n batched_dict[key] = {}\n for inner_key in item.keys():\n batched_dict[key][inner_key] = []\n for inner_key in follow_batch:\n batched_dict[key][f\"{key}_batch\"] = []\n Batch._collate_dict(\n item, cumsum[key],\n slices[key], batched_dict[key],\n graph, follow_batch, i=i\n )\n continue\n if torch.is_tensor(item) and item.dtype != torch.bool:\n item = item + cumsum[key]\n if torch.is_tensor(item):\n size = item.size(graph.__cat_dim__(key, curr_dict[key]))\n else:\n size = 1\n slices[key].append(size + slices[key][-1])\n cumsum[key] = cumsum[key] + graph.__inc__(key, item)\n batched_dict[key].append(item)\n\n if key in follow_batch:\n item = torch.full((size, ), i, dtype=torch.long)\n batched_dict[f\"{key}_batch\"].append(item)\n\n @staticmethod\n def _dict_list_to_tensor(dict_of_list, graph):\n r\"\"\"Convert a dict/Graph with list as values to a dict/Graph with\n concatenated/stacked tensor as values.\n \"\"\"\n if isinstance(dict_of_list, dict):\n keys = dict_of_list.keys()\n else:\n keys = dict_of_list.keys\n for key in keys:\n if isinstance(dict_of_list[key], dict):\n # recursively convert the dictionary of list to dict of tensor\n Batch._dict_list_to_tensor(dict_of_list[key], graph)\n continue\n item = dict_of_list[key][0]\n if torch.is_tensor(item):\n if (\n Graph._is_graph_attribute(key)\n and item.ndim == 1\n and (not item.dtype == torch.long)\n and \"feature\" in key\n ):\n # special consideration: 1D tensor for graph\n # attribute (classification)\n # named as: \"graph_xx_feature\"\n # batch by stacking the first dim\n dict_of_list[key] = torch.stack(\n dict_of_list[key],\n dim=0\n )\n else:\n # concat at the __cat_dim__\n dict_of_list[key] = torch.cat(\n dict_of_list[key],\n dim=graph.__cat_dim__(key, item)\n )\n elif isinstance(item, (float, int)):\n dict_of_list[key] = torch.tensor(dict_of_list[key])\n\n def to_data_list(self):\n r\"\"\"\n Reconstructs the list of :class:`torch_geometric.data.Data` objects\n from the batch object.\n The batch object must have been created via :meth:`from_data_list` in\n order to be able reconstruct the initial objects.\n \"\"\"\n if self.__slices__ is None:\n raise RuntimeError(\n \"Cannot reconstruct data list from batch because the \"\n \"batch object was not created using Batch.from_data_list()\"\n )\n\n keys = [key for key in self.keys if key[-5:] != \"batch\"]\n cumsum = {key: 0 for key in keys}\n data_list = []\n for i in range(len(self.__slices__[keys[0]]) - 1):\n # i: from 0 up to num graphs in the batch\n data = self.__data_class__()\n self._reconstruct_dict(\n i, keys, data, cumsum, self.__slices__, self, data\n )\n data_list.append(data)\n\n return data_list\n\n def _reconstruct_dict(\n self, graph_idx: int, keys, data_dict,\n cumsum: Dict[str, int], slices, batched_dict, graph):\n\n for key in keys:\n if isinstance(batched_dict[key], dict):\n # recursively unbatch the dict\n data_dict[key] = {}\n inner_keys = [\n inner_key\n for inner_key in batched_dict[key].keys()\n if inner_key[-5:] != \"batch\"\n ]\n inner_cumsum = {inner_key: 0 for inner_key in inner_keys}\n inner_slices = slices[key]\n self._reconstruct_dict(\n graph_idx, inner_keys,\n data_dict[key], inner_cumsum,\n inner_slices, batched_dict[key], graph\n )\n continue\n\n if torch.is_tensor(batched_dict[key]):\n data_dict[key] = batched_dict[key].narrow(\n graph.__cat_dim__(key, batched_dict[key]),\n slices[key][graph_idx],\n slices[key][graph_idx + 1] - slices[key][graph_idx]\n )\n if batched_dict[key].dtype != torch.bool:\n data_dict[key] = data_dict[key] - cumsum[key]\n else:\n data_dict[key] = (\n batched_dict[key][\n slices[key][graph_idx]:slices[key][graph_idx + 1]\n ]\n )\n cumsum[key] = cumsum[key] + graph.__inc__(key, data_dict[key])\n\n @property\n def num_graphs(self) -> int:\n r\"\"\"\n Returns the number of graphs in the batch.\n\n Returns:\n int: The number of graphs in the batch.\n \"\"\"\n return self.batch[-1].item() + 1\n\n def apply_transform(\n self,\n transform,\n update_tensor: bool = True,\n update_graph: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Applies a transformation to each graph object in parallel by first\n calling `to_data_list`, applying the transform, and then perform\n re-batching again to a `Batch`.\n A transform should edit the graph object,\n including changing the graph structure, and adding\n node/edge/graph attributes.\n The rest are automatically handled by the\n :class:`deepsnap.graph.Graph` object, including everything\n ended with index.\n\n Args:\n transform: Transformation function applied to each graph object.\n update_tensor: Whether use nx graph to update tensor attributes.\n update_graph: Whether use tensor attributes to update nx graphs.\n deep_copy: :obj:`True` if a new deep copy of batch is returned.\n This option allows modifying the batch of graphs without\n changing the graphs in the original dataset.\n kwargs: Parameters used in transform function in\n :class:`deepsnap.graph.Graph` objects.\n\n Returns:\n a batch object containing all transformed graph objects.\n\n \"\"\"\n # TODO: transductive setting, assert update_tensor == True\n return self.from_data_list(\n [\n Graph(graph).apply_transform(\n transform, update_tensor, update_graph, deep_copy, **kwargs\n )\n for graph in self.G\n ]\n )\n\n def apply_transform_multi(\n self,\n transform,\n update_tensors: bool = True,\n update_graphs: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Comparison to apply_transform, this allows multiple graph objects\n to be returned by the supplied transform function.\n\n Args:\n transform: (Multiple return value) tranformation function\n applied to each graph object. It needs to return a tuple of\n Graph objects or internal .G (NetworkX) objects.\n\n Returns:\n a tuple of batch objects. The i-th batch object contains the i-th\n return value of the transform function applied to all graphs\n in the batch.\n \"\"\"\n g_lists = (\n zip(\n *[\n Graph(graph).apply_transform_multi(\n transform, update_tensors, update_graphs,\n deep_copy, **kwargs,\n )\n for graph in self.G\n ]\n )\n )\n return (self.from_data_list(g_list) for g_list in g_lists)\n" ]
[ [ "torch.tensor", "torch.stack", "torch.is_tensor", "torch.full" ] ]
furkanc/Yolov3-Face-Recognition
[ "d3074490a6a7bf83925319ed521b557919d0af7e" ]
[ "face_module/mtcnn_pytorch/src/get_nets.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nimport numpy as np\n\n\nclass Flatten(nn.Module):\n\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, c, h, w].\n Returns:\n a float tensor with shape [batch_size, c*h*w].\n \"\"\"\n\n # without this pretrained model isn't working\n x = x.transpose(3, 2).contiguous()\n\n return x.view(x.size(0), -1)\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n\n super(PNet, self).__init__()\n\n # suppose we have input with size HxW, then\n # after first layer: H - 2,\n # after pool: ceil((H - 2)/2),\n # after second conv: ceil((H - 2)/2) - 2,\n # after last conv: ceil((H - 2)/2) - 4,\n # and the same for W\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 10, 3, 1)),\n ('prelu1', nn.PReLU(10)),\n ('pool1', nn.MaxPool2d(2, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(10, 16, 3, 1)),\n ('prelu2', nn.PReLU(16)),\n\n ('conv3', nn.Conv2d(16, 32, 3, 1)),\n ('prelu3', nn.PReLU(32))\n ]))\n\n self.conv4_1 = nn.Conv2d(32, 2, 1, 1)\n self.conv4_2 = nn.Conv2d(32, 4, 1, 1)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/pnet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n b: a float tensor with shape [batch_size, 4, h', w'].\n a: a float tensor with shape [batch_size, 2, h', w'].\n \"\"\"\n x = self.features(x)\n a = self.conv4_1(x)\n b = self.conv4_2(x)\n a = F.softmax(a, dim=-1)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n\n super(RNet, self).__init__()\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 28, 3, 1)),\n ('prelu1', nn.PReLU(28)),\n ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(28, 48, 3, 1)),\n ('prelu2', nn.PReLU(48)),\n ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(48, 64, 2, 1)),\n ('prelu3', nn.PReLU(64)),\n\n ('flatten', Flatten()),\n ('conv4', nn.Linear(576, 128)),\n ('prelu4', nn.PReLU(128))\n ]))\n\n self.conv5_1 = nn.Linear(128, 2)\n self.conv5_2 = nn.Linear(128, 4)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/rnet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n b: a float tensor with shape [batch_size, 4].\n a: a float tensor with shape [batch_size, 2].\n \"\"\"\n x = self.features(x)\n a = self.conv5_1(x)\n b = self.conv5_2(x)\n a = F.softmax(a, dim=-1)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n\n super(ONet, self).__init__()\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 32, 3, 1)),\n ('prelu1', nn.PReLU(32)),\n ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(32, 64, 3, 1)),\n ('prelu2', nn.PReLU(64)),\n ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(64, 64, 3, 1)),\n ('prelu3', nn.PReLU(64)),\n ('pool3', nn.MaxPool2d(2, 2, ceil_mode=True)),\n\n ('conv4', nn.Conv2d(64, 128, 2, 1)),\n ('prelu4', nn.PReLU(128)),\n\n ('flatten', Flatten()),\n ('conv5', nn.Linear(1152, 256)),\n ('drop5', nn.Dropout(0.25)),\n ('prelu5', nn.PReLU(256)),\n ]))\n\n self.conv6_1 = nn.Linear(256, 2)\n self.conv6_2 = nn.Linear(256, 4)\n self.conv6_3 = nn.Linear(256, 10)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/onet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n c: a float tensor with shape [batch_size, 10].\n b: a float tensor with shape [batch_size, 4].\n a: a float tensor with shape [batch_size, 2].\n \"\"\"\n x = self.features(x)\n a = self.conv6_1(x)\n b = self.conv6_2(x)\n c = self.conv6_3(x)\n a = F.softmax(a, dim = -1)\n return c, b, a\n" ]
[ [ "numpy.load", "torch.FloatTensor", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.PReLU", "torch.nn.functional.softmax", "torch.nn.Conv2d", "torch.nn.Dropout" ] ]
Xarthisius/yt
[ "aad3cfa3b4ebab7838352ab467275a27c26ff363" ]
[ "yt/frontends/halo_catalog/io.py" ]
[ "from collections import defaultdict\n\nimport numpy as np\n\nfrom yt.frontends.gadget_fof.io import IOHandlerGadgetFOFHaloHDF5\nfrom yt.funcs import parse_h5_attr\nfrom yt.units.yt_array import uvstack\nfrom yt.utilities.io_handler import BaseIOHandler\nfrom yt.utilities.on_demand_imports import _h5py as h5py\n\n\nclass IOHandlerYTHaloCatalog(BaseIOHandler):\n _dataset_type = \"ythalocatalog\"\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n raise NotImplementedError\n\n def _read_particle_coords(self, chunks, ptf):\n # This will read chunks and yield the results.\n chunks = list(chunks)\n data_files = set()\n # Only support halo reading for now.\n assert len(ptf) == 1\n assert list(ptf.keys())[0] == \"halos\"\n ptype = \"halos\"\n for chunk in chunks:\n for obj in chunk.objs:\n data_files.update(obj.data_files)\n pn = \"particle_position_%s\"\n for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n pos = data_file._get_particle_positions(ptype, f=f)\n x, y, z = (self.ds.arr(pos[:, i], units) for i in range(3))\n yield \"halos\", (x, y, z)\n\n def _yield_coordinates(self, data_file):\n pn = \"particle_position_%s\"\n with h5py.File(data_file.filename, mode=\"r\") as f:\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n x, y, z = (\n self.ds.arr(f[pn % ax][()].astype(\"float64\"), units) for ax in \"xyz\"\n )\n pos = uvstack([x, y, z]).T\n pos.convert_to_units(\"code_length\")\n yield \"halos\", pos\n\n def _read_particle_fields(self, chunks, ptf, selector):\n # Now we have all the sizes, and we can allocate\n chunks = list(chunks)\n data_files = set()\n # Only support halo reading for now.\n assert len(ptf) == 1\n assert list(ptf.keys())[0] == \"halos\"\n for chunk in chunks:\n for obj in chunk.objs:\n data_files.update(obj.data_files)\n pn = \"particle_position_%s\"\n for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):\n si, ei = data_file.start, data_file.end\n with h5py.File(data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(ptf.items()):\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n pos = data_file._get_particle_positions(ptype, f=f)\n x, y, z = (self.ds.arr(pos[:, i], units) for i in range(3))\n mask = selector.select_points(x, y, z, 0.0)\n del x, y, z\n if mask is None:\n continue\n for field in field_list:\n data = f[field][si:ei][mask].astype(\"float64\")\n yield (ptype, field), data\n\n def _count_particles(self, data_file):\n si, ei = data_file.start, data_file.end\n nhalos = data_file.header[\"num_halos\"]\n if None not in (si, ei):\n nhalos = np.clip(nhalos - si, 0, ei - si)\n return {\"halos\": nhalos}\n\n def _identify_fields(self, data_file):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n fields = [\n (\"halos\", field) for field in f if not isinstance(f[field], h5py.Group)\n ]\n units = {(\"halos\", field): parse_h5_attr(f[field], \"units\") for field in f}\n return fields, units\n\n\nclass HaloDatasetIOHandler:\n \"\"\"\n Base class for io handlers to load halo member particles.\n \"\"\"\n\n def _read_particle_coords(self, chunks, ptf):\n pass\n\n def _read_particle_fields(self, dobj, ptf):\n # separate member particle fields from scalar fields\n scalar_fields = defaultdict(list)\n member_fields = defaultdict(list)\n for ptype, field_list in sorted(ptf.items()):\n for field in field_list:\n if (ptype, field) in self.ds.scalar_field_list:\n scalar_fields[ptype].append(field)\n else:\n member_fields[ptype].append(field)\n\n all_data = self._read_scalar_fields(dobj, scalar_fields)\n all_data.update(self._read_member_fields(dobj, member_fields))\n\n for field, field_data in all_data.items():\n yield field, field_data\n\n # This will be refactored.\n _read_particle_selection = IOHandlerGadgetFOFHaloHDF5._read_particle_selection\n\n\nclass IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog):\n _dataset_type = \"ythalo\"\n\n def _identify_fields(self, data_file):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n scalar_fields = [\n (\"halos\", field) for field in f if not isinstance(f[field], h5py.Group)\n ]\n units = {(\"halos\", field): parse_h5_attr(f[field], \"units\") for field in f}\n if \"particles\" in f:\n id_fields = [(\"halos\", field) for field in f[\"particles\"]]\n else:\n id_fields = []\n\n return scalar_fields + id_fields, scalar_fields, id_fields, units\n\n def _read_member_fields(self, dobj, member_fields):\n all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64))\n if not member_fields:\n return all_data\n field_start = 0\n for i, data_file in enumerate(dobj.field_data_files):\n start_index = dobj.field_data_start[i]\n end_index = dobj.field_data_end[i]\n pcount = end_index - start_index\n if pcount == 0:\n continue\n field_end = field_start + end_index - start_index\n with h5py.File(data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(member_fields.items()):\n for field in field_list:\n field_data = all_data[(ptype, field)]\n my_data = f[\"particles\"][field][start_index:end_index].astype(\n \"float64\"\n )\n field_data[field_start:field_end] = my_data\n field_start = field_end\n return all_data\n\n def _read_scalar_fields(self, dobj, scalar_fields):\n all_data = {}\n if not scalar_fields:\n return all_data\n with h5py.File(dobj.scalar_data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(scalar_fields.items()):\n for field in field_list:\n data = np.array([f[field][dobj.scalar_index]]).astype(\"float64\")\n all_data[(ptype, field)] = data\n return all_data\n" ]
[ [ "numpy.array", "numpy.clip", "numpy.empty" ] ]
d813s909q/tensortflow
[ "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5" ]
[ "tensorflow/python/ops/while_v2.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"while_v2 and gradient.\n\nThis is a version of while_loop that emits a single While op, as well as the\ngradient function for While ops produced by while_loop. This will eventually\nreplace the current tf.while_loop implementation once it reaches feature and\nperformance parity.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import function_def_to_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_util_v2 as util\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import list_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n# pylint: disable=protected-access\n\n# TODO(b/79881896): Handle external control dependencies. tf.while_loop allows\n# control dependencies on external nodes with at least 1 output.\n# Another idea is to create const nodes outside the loop and add control edges\n# to them and then pass those in as data inputs. This should probably be\n# handled in the CapturingGraph itself.\n\n\ndef while_loop(cond,\n body,\n loop_vars,\n shape_invariants=None,\n maximum_iterations=None,\n name=None,\n return_same_structure=True):\n \"\"\"Like tf.while_loop, except emits a single While op.\"\"\"\n maximum_iterations = _validate_and_convert_to_tensor(maximum_iterations)\n # Keep the original loop_vars around to know which args were TensorArrays.\n orig_loop_vars = loop_vars\n # Cache its length since we use it at multiple places below.\n len_orig_loop_vars = len(orig_loop_vars)\n\n # Convert TensorArrays to their flow variables. These get converted back to\n # TensorArrays before calling `cond` and `body`. See `wrapped_cond` and\n # `wrapped_body` below.\n loop_vars = list(_tensor_array_to_flow(orig_loop_vars))\n loop_vars = nest.map_structure(\n ops.internal_convert_to_tensor_or_indexed_slices, loop_vars)\n if shape_invariants is not None:\n nest.assert_same_structure(orig_loop_vars, shape_invariants)\n else:\n shape_invariants = nest.map_structure(lambda t: t.shape, loop_vars)\n\n if not name:\n name = \"while\"\n\n with ops.name_scope(name) as scope:\n with ops.name_scope(None):\n cond_name = util.unique_fn_name(scope, \"cond\")\n body_name = util.unique_fn_name(scope, \"body\")\n\n loop_counter = constant_op.constant(\n 0,\n dtype=maximum_iterations.dtype\n if maximum_iterations is not None else None,\n name=\"loop_counter\")\n # Add loop counter needed for computing gradients.\n loop_vars = [loop_counter] + loop_vars\n\n shape_invariants = type(shape_invariants)([tensor_shape.scalar()\n ]) + shape_invariants\n\n # Automatic control dependencies are added in defuns, but not in v1\n # graphs. Propagate that behavior here.\n add_control_dependencies = util.in_defun()\n\n # Build a `cond` wrapper that can handle the extra counter loop_var.\n def wrapped_cond(loop_counter, *args):\n # Convert the flow variables in `args` to TensorArrays. `args` should\n # already have the same structure as `orig_loop_vars` but currently there\n # is no nest.zip so we call `_pack_sequence_as` which flattens both\n # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays\n # and packs it into the structure of `orig_loop_vars`.\n if maximum_iterations is None:\n return cond(*_pack_sequence_as(orig_loop_vars, args))\n else:\n return math_ops.logical_and(\n loop_counter < maximum_iterations,\n cond(*_pack_sequence_as(orig_loop_vars, args)))\n\n cond_graph = func_graph_module.func_graph_from_py_func(\n cond_name,\n wrapped_cond,\n loop_vars, {},\n signature=_build_signature(loop_vars, shape_invariants),\n func_graph=util.WhileCondFuncGraph(cond_name),\n add_control_dependencies=add_control_dependencies)\n\n # Add external_captures of cond to the list of loop vars.\n # Note that external tensors will be treated as loop invariants, i.e.,\n # the value of that tensor in each iteration is the same as it was at the\n # beginning of the loop execution.\n loop_vars = loop_vars + cond_graph.external_captures\n shape_invariants = shape_invariants + type(shape_invariants)(\n [t.shape for t in cond_graph.external_captures])\n\n def wrapped_body(loop_counter, *args):\n \"\"\"Loop body augmented with counter update.\n\n Args:\n loop_counter: Loop counter which needs to be incremented in the body.\n *args: List of args\n args[:len_orig_loop_vars] - Args for the original loop body.\n args[len_orig_loop_vars:] - External captures of cond. These get\n passed through as is.\n\n Returns:\n A list of tensors the same length as args.\n \"\"\"\n # Convert the flow variables in `args` to TensorArrays. `args` should\n # already have the same structure as `orig_loop_vars` but currently there\n # is no nest.zip so we call `_pack_sequence_as` which flattens both\n # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays\n # and packs it into the structure of `orig_loop_vars`.\n outputs = body(\n *_pack_sequence_as(orig_loop_vars, args[:len_orig_loop_vars]))\n if not nest.is_sequence(outputs):\n outputs = [outputs]\n # Compare the structure of input and output of body converting the\n # top-level tuples to list to be compatible with legacy while_loop.\n nest.assert_same_structure(list(outputs), list(orig_loop_vars))\n\n outputs = _tensor_array_to_flow(outputs)\n\n # Return the external_captures of cond_graph as is, i.e., treat them as\n # loop invariants.\n # TODO(srbs): Update lowering code to create _Enter nodes with\n # is_constant=True for inputs that are directly passed to outputs.\n return [loop_counter + 1] + list(outputs) + list(\n args[len_orig_loop_vars:])\n\n body_graph = func_graph_module.func_graph_from_py_func(\n body_name,\n wrapped_body,\n loop_vars, {},\n signature=_build_signature(loop_vars, shape_invariants),\n func_graph=util.WhileBodyFuncGraph(body_name),\n add_control_dependencies=add_control_dependencies)\n # Add external captures of body to the list of loop vars.\n # Note that external tensors will be treated as loop invariants, i.e.,\n # the value of that tensor in each iteration is the same as it was at the\n # beginning of the loop execution.\n loop_vars = loop_vars + body_graph.external_captures\n # TODO(srbs): Update lowering code to create _Enter nodes with\n # is_constant=True for inputs that are directly passed to outputs.\n body_graph.outputs.extend(body_graph.internal_captures)\n\n # Capture `external_captures` of `body_graph` in `cond_graph` so that it\n # expects to receive those as arguments.\n # TODO(b/118457764): Dedup tensors that are captured in both the cond and\n # body. This logic already exists in cond_v2.\n with cond_graph.as_default():\n for external_capture in body_graph.external_captures:\n assert external_capture not in cond_graph.captures, (\n \"Looks like both cond and body are capturing the same tensor %s. \"\n \"This is not supported yet. For now consider passing,\"\n \" this as a loop variable.\" % str(external_capture))\n cond_graph.capture(external_capture)\n\n # Make sure that the shapes of the loop outputs are compatible with the\n # shape invariants, or the shapes of the loop vars if the invariants are not\n # specified.\n num_flattened_outputs = len(nest.flatten(orig_loop_vars))\n _check_shapes_compat(\n body_graph.outputs[1:1 + num_flattened_outputs],\n nest.flatten(shape_invariants[1:1 + len_orig_loop_vars]),\n nest.flatten(loop_vars[1:1 + len_orig_loop_vars]))\n flattened_loop_vars = nest.flatten(loop_vars)\n _check_num_inputs_outputs(cond_graph, body_graph,\n len(flattened_loop_vars))\n\n outputs = gen_functional_ops._while(\n flattened_loop_vars,\n util.create_new_tf_function(cond_graph),\n util.create_new_tf_function(body_graph),\n output_shapes=[t.shape for t in body_graph.outputs],\n name=scope)\n\n _copy_handle_data(body_graph.outputs, outputs)\n util.maybe_set_lowering_attr(outputs[0].op)\n _maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)\n\n # Return identities for each output of the While op, rather than the output\n # of the While op directly. This makes pruning work if the output of\n # while_loop() is fetched: the lowering pass converts the While outputs into\n # IdentityN outputs, which if fetched will cause all ops in the body to be\n # run (since it takes all exit ops as input). After lowering, each output\n # identity op will end up with only the appropriate exit op as input.\n outputs = tuple(array_ops.identity(t) for t in outputs)\n\n # First var is loop counter.\n outputs = _pack_sequence_as(orig_loop_vars,\n outputs[1:1 + num_flattened_outputs])\n\n if return_same_structure:\n return outputs\n\n flattened_outputs = nest.flatten(outputs)\n if len(flattened_outputs) == 1:\n return flattened_outputs[0]\n else:\n return outputs\n\n\[email protected](\"While\")\ndef _WhileGrad(op, *grads): # pylint: disable=invalid-name\n \"\"\"The gradient of a While op produced by while_loop.\"\"\"\n cond_graph = _get_graph(op, \"cond\")\n body_graph = _get_graph(op, \"body\")\n orig_num_params = len(body_graph.outputs)\n\n maximum_iterations = op.get_attr(\n \"_maximum_iterations\") if _is_in_xla_context() else None\n assert not _is_in_xla_context() or maximum_iterations is not None\n\n # Set the incoming gradient of non-trainable inputs to None. It is possible\n # that we receive non-None gradients for non-trainable types in nested while\n # loops because we accumulate outputs of the inner while as variant tensors\n # which are trainable and hence receive zeros_like tensors in the gradient\n # pass. The non-trainable tensors then receive the popped zeros tensor from\n # this zeros variant. The gradient for the loop vars corresponding to these\n # tensors is None or zeros (this happens only if the loop var is accumulated\n # as well) in _grad_fn so we reset these.\n # TODO(b/118712257): Remove the IsTrainable filter once we can handle None\n # output grads in _grad_fn.\n grads = [\n None if not _is_trainable(output) else grad\n for grad, output in zip(grads, body_graph.outputs)\n ]\n\n # Ensure that all non-resource trainable outputs have incoming gradients.\n assert all(g is not None or o.dtype == dtypes.resource or not _is_trainable(o)\n for o, g in zip(body_graph.outputs, grads)\n ), \"All trainable loop vars must receive incoming gradients.\"\n # We compute the gradient for the sub-graph between trainable ys and xs\n # with non-None incoming gradients. We later pad the None's to the list of\n # outputs.\n ys, xs, non_none_grads = zip(*[(y, x, grad) for (y, x, grad) in zip(\n body_graph.outputs, body_graph.inputs, grads) if grad is not None])\n\n body_grad_graph, args = _create_grad_func(\n ys, xs, non_none_grads, cond_graph, body_graph,\n util.unique_grad_fn_name(body_graph.name), op, maximum_iterations)\n\n if body_grad_graph.while_op_needs_rewrite:\n # Modify 'op' to output the intermediate accumulators needed by the grad\n # function.\n # NOTE(skyewm): if there are any active sessions, this modification to `op`\n # may make them unrunnable!\n\n cond_graph.name += \"_rewritten\"\n body_graph.name += \"_rewritten\"\n\n new_inputs = body_grad_graph.empty_tensor_lists\n new_outputs = body_graph.outputs[orig_num_params:]\n\n op._set_func_attr(\"cond\", util.create_new_tf_function(cond_graph))\n op._set_func_attr(\"body\", util.create_new_tf_function(body_graph))\n op._set_type_list_attr(\"T\", body_graph.output_types)\n op._set_shape_list_attr(\"output_shapes\", body_graph.output_shapes)\n op._add_while_inputs(new_inputs)\n op._add_outputs([t.dtype for t in new_outputs],\n [t.shape for t in new_outputs])\n _copy_handle_data(new_outputs, op.outputs[orig_num_params:])\n\n captured_inputs = _resolve_grad_captures(body_graph, body_grad_graph, op)\n loop_vars = args + captured_inputs\n\n def grad_cond(counter, max_iters, *unused_args):\n return counter < max_iters\n\n grad_cond_name = util.unique_grad_fn_name(op.get_attr(\"cond\").name)\n cond_grad_graph = func_graph_module.func_graph_from_py_func(\n grad_cond_name, grad_cond, loop_vars, {},\n func_graph=util.WhileCondFuncGraph(grad_cond_name))\n\n _check_num_inputs_outputs(cond_grad_graph, body_grad_graph, len(loop_vars))\n\n outputs = gen_functional_ops._while(\n loop_vars,\n util.create_new_tf_function(cond_grad_graph),\n util.create_new_tf_function(body_grad_graph),\n output_shapes=[t.shape for t in body_grad_graph.outputs],\n name=\"%s_grad\" % op.name)\n\n _copy_handle_data(body_grad_graph.outputs, outputs)\n util.maybe_set_lowering_attr(outputs[0].op)\n _maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)\n\n # See comment in while_loop.\n outputs = [array_ops.identity(t) for t in outputs]\n\n # Set None as the output gradient for tensors with None input gradient.\n # outputs[0] is the loop counter.\n # outputs[1] is the total number of loop iterations.\n index = 2\n none_padded_outputs = []\n for g in grads:\n if g is None:\n none_padded_outputs.append(None)\n else:\n none_padded_outputs.append(outputs[index])\n index += 1\n return none_padded_outputs\n\n\ndef _is_trainable(tensor):\n \"\"\"Returns whether the given tensor is trainable.\"\"\"\n if not gradients_impl.IsTrainable(tensor):\n return False\n\n # Special case: untrainable accumulator output. The gradients algorithm\n # doesn't know about tensor lists of untrainable elements. In theory the\n # tensor list gradient functions should return None as appropriate, but\n # because we can't return None from the gradient function we filter out\n # untrainable accumulator output here to avoid computing the gradient at all.\n if tensor.op.type == \"TensorListPopBack\" and tensor.value_index == 0:\n assert tensor.dtype == dtypes.variant\n element_type = tensor.op.get_attr(\"element_dtype\")\n return gradients_impl.IsTrainable(element_type)\n\n return True\n\n\ndef _validate_and_convert_to_tensor(maximum_iterations):\n \"\"\"Checks that `maximum_iterations` is valid.\n\n In XLA context, `maximum_iterations` is required and must be statically\n inferable, e.g. output tensor of a Const node.\n\n Args:\n maximum_iterations: The maximum_iterations passed to while_loop.\n\n Returns:\n A scalar valued tensor of type int32 or None.\n\n Raises:\n ValueError: If `maximum_iterations` is invalid.\n \"\"\"\n if _is_in_xla_context():\n if maximum_iterations is None:\n raise ValueError(\"maximum_iterations is None. It is required and must \"\n \"be statically known (e.g. a constant value or known \"\n \"shape dimension) when building while_loop in XLA \"\n \"context.\")\n if isinstance(maximum_iterations, ops.Tensor):\n # Get the constant value from the `maximum_iterations` tensor to avoid\n # capturing a Const tensor from outside this graph.\n maximum_iterations = tensor_util.constant_value(maximum_iterations)\n if maximum_iterations is None:\n raise ValueError(\"maximum_iterations must be statically known (e.g. a \"\n \"constant value or known shape dimension) when \"\n \"building while_loop in XLA context.\")\n\n if maximum_iterations is not None:\n # EmptyTensorList expects `max_num_elements` to be of type int32.\n maximum_iterations = ops.convert_to_tensor(\n maximum_iterations, dtype=dtypes.int32, name=\"maximum_iterations\")\n if maximum_iterations.shape.ndims != 0:\n raise ValueError(\"maximum_iterations must be a scalar, saw shape: %s\" %\n maximum_iterations.shape)\n return maximum_iterations\n\n\n# TODO(srbs): Pull this into common utils for cond_v2 and while_v2.\ndef _get_graph(while_op, func_attr_name):\n \"\"\"Returns `FuncGraph` for the given function attribute.\n\n Args:\n while_op: The While Operation.\n func_attr_name: string\n\n Returns:\n `FuncGraph`\n \"\"\"\n # TODO(srbs): Handle TensorShapeProto in function_def_to_graph.input_shapes.\n input_shapes = [\n tensor_shape.TensorShape(s) for s in while_op.get_attr(\"output_shapes\")\n ]\n func_name = while_op.get_attr(func_attr_name).name\n fdef = while_op.graph._get_function(func_name).definition\n # `while_op.graph` may not be the same as `ops.get_default_graph()` e.g.\n # if the `while_op` is in the body of another if/while/defun. We build the\n # `func_graph` with `while_op.graph` as its `outer_graph`. This resembles how\n # the `FuncGraph` was built in the forward pass. We need this so that we can\n # appropriately capture references to outer tensors in the nested grad graphs.\n with while_op.graph.as_default():\n func_graph = function_def_to_graph.function_def_to_graph(fdef, input_shapes)\n func_graph._while = while_op\n return func_graph\n\n\ndef _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op,\n max_iters):\n \"\"\"Builds and returns the gradient FuncGraph of `func_graph` and its args.\n\n The returned grad_func_graph must be called with the returned\n args + grad_func_graph.captures.\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n grads: The incoming grads for `ys`.\n cond_graph: FuncGraph for the forward cond function.\n body_graph: FuncGraph for the forward body function.\n name: Name of the returned gradient function.\n while_op: The forward While op.\n max_iters: the maximum number of iterations, or None if no limit.\n\n Returns:\n 2-tuple of (grad_func_graph, args).\n \"\"\"\n assert len(ys) == len(grads)\n\n total_iters = while_op.outputs[0]\n counter = constant_op.constant(\n 0, dtype=total_iters.dtype, name=\"grad_counter\")\n\n args = [counter, total_iters] + list(grads)\n # Note: The returned function does not have `args` in the list of\n # `external_captures`.\n grad_func_graph = func_graph_module.func_graph_from_py_func(\n name,\n lambda *args: _grad_fn(ys, xs, args, body_graph),\n args, {},\n func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph,\n max_iters))\n\n # Add the popped accumulators to the list of outputs.\n for internal_capture in grad_func_graph.internal_captures:\n if internal_capture in grad_func_graph.popped_tensor_lists:\n grad_func_graph.outputs.append(\n grad_func_graph.popped_tensor_lists[internal_capture])\n elif internal_capture.dtype == dtypes.resource:\n grad_func_graph.outputs.append(internal_capture)\n else:\n raise ValueError(\"Tensor %s is in list of internal_captures but is\"\n \" neither a resource nor is in popped_tensor_lists.\" %\n str(internal_capture))\n\n return grad_func_graph, args\n\n\ndef _grad_fn(ys, xs, args, func_graph):\n \"\"\"Computes the gradient of `func_graph` in the current graph.\n\n This function builds the gradient graph of the corresponding forward-pass\n `func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n args: The input arguments.\n args[0] - Loop counter\n args[1] - Total number of iterations.\n args[2:] - Incoming gradients for `ys`.\n func_graph: function.FuncGraph. The corresponding forward-pass function.\n\n Returns:\n The output gradient Tensors.\n \"\"\"\n grad_ys = args[2:]\n\n # Build the gradient graph. Note that this builds the gradient computation of\n # func_graph in the current graph, which requires capturing tensors from\n # func_graph. The captured func_graph tensors are resolved to external tensors\n # after the forward While op has been rewritten in _resolve_grad_captures.\n # TODO(srbs): Mark GradientsHelper as public?\n grad_outs = gradients_impl._GradientsHelper(\n ys, xs, grad_ys=grad_ys, src_graph=func_graph,\n unconnected_gradients=\"zero\")\n\n # TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there\n # is a tf.StopGradient in the loop body.\n assert all(g is not None for g in grad_outs)\n counter = args[0]\n total_iters = args[1]\n return [counter + 1, total_iters] + grad_outs\n\n\ndef _resolve_grad_captures(body_graph, body_grad_graph, while_op):\n \"\"\"Returns the tensors to pass as captured inputs to `body_grad_graph`.\n\n `body_grad_graph` may have external references to:\n 1. Its outer graph containing the input gradients. These are left as-is.\n 2. Accumulators captured from the forward-pass graph. These should have been\n added as `while_op` outputs after the gradient graph was built. We replace\n these with the corresponding output of `while_op`, i.e. a tensor in\n `body_graph.outer_graph`. In the case of nested control flow or functions,\n the gradient logic handling `body_grad_graph.outer_graph` will make sure\n the tensor from `body_graph.outer_graph` is also correctly captured.\n\n Args:\n body_graph: FuncGraph. The forward-pass body function.\n body_grad_graph: FuncGraph. The body gradients function.\n while_op: The forward-pass While Operation calling `body_graph`.\n\n Returns:\n A list of input tensors to be passed as the captured inputs to\n `body_grad_graph`.\n \"\"\"\n new_capture_inputs = []\n for t in body_grad_graph.external_captures:\n # All values captured by gradient computation should be from the forward\n # graph or a captured resource variable (note that input gradients are\n # regular non-captured inputs).\n if t.graph == body_graph:\n # Captured accumulator\n t = while_op.outputs[t.graph.outputs.index(t)]\n # Note: We rely on the capturing logic of the gradient While op graph to\n # correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2\n # and while_v2 handle this while building their gradient functions.\n assert t.graph == body_graph.outer_graph\n else:\n # Captured resource variable\n assert t.dtype == dtypes.resource\n\n new_capture_inputs.append(t)\n return new_capture_inputs\n\n\ndef _get_accumulator(tensor):\n r\"\"\"Returns TensorList if any containing accumulated values of tensor.\n\n We try to find a pattern of the form:\n\n input_tl tensor\n \\ /\n (TensorListPushBack)\n |\n output_tl\n\n which satisfies the following conditions:\n\n 1. input_tl must be in tensor.graph.inputs.\n 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.\n 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).\n\n output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is\n returned if such a pattern is found else None is returned.\n\n Args:\n tensor: The Tensor to be accumulated.\n\n Returns:\n A variant tensor in the same graph as `tensor` or None if no accumulator is\n found.\n \"\"\"\n assert isinstance(tensor.graph, func_graph_module.FuncGraph)\n\n def get_func_graph_output(t):\n \"\"\"Returns t or Identity(t) whichever exists in graph outputs else None.\"\"\"\n if t in tensor.graph.outputs:\n return t\n # tf.defun adds an Identity for each output, check whether that is the case.\n identity_op = t.consumers()[0]\n if (identity_op.type == \"Identity\" and\n identity_op.outputs[0] in tensor.graph.outputs):\n return identity_op.outputs[0]\n return None\n\n for consumer in tensor.consumers():\n # Find the consumer that is a TensorListPushBack node whose TensorList input\n # is in the list of function inputs.\n if (consumer.type != \"TensorListPushBack\" or\n consumer.inputs[0] not in tensor.graph.inputs):\n continue\n\n output = get_func_graph_output(consumer.outputs[0])\n if output is None:\n # The TensorList output of `consumer` is not in the list of function\n # outputs.\n continue\n\n accum_input_idx = tensor.graph.inputs.index(consumer.inputs[0])\n accum_output_idx = tensor.graph.outputs.index(output)\n if accum_input_idx == accum_output_idx:\n return output\n return None\n\n\nclass _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph):\n \"\"\"FuncGraph for the gradient function of the body of a While op.\n\n Contains the logic for capturing the tensors from the body of the forward\n While op which is as follows:\n 1. If the tensor is of resource type (these are not accumulated):\n a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop\n inputs and outputs at the same index.\n b. Lookup the corresponding resource tensor in the forward outer graph and\n try to capture that.\n 2. If the tensor is not of resource type:\n a. Create an accumulator for that tensor and output it from the forward\n pass. Note this also requires adding it as an input to the forward pass.\n b. Capture the accumulator from the forward pass in this FuncGraph. This\n will later be resolved to the correct output of the forward While op.\n c. Pop a value from the captured placeholder and use it as the captured\n value for the forward pass tensor.\n\n This only allows capturing tensors in the forward graph. A ValueError is\n raised if an attempt is made to capture a tensor not in the forward graph.\n To manually capture capture a tensor that is not in the forward graph, call\n `capture` with `whitelisted=True`.\n\n Note: The `captures` dict does not contain the forward tensor since it is not\n directly captured. It contains the accumulator corresponding to this forward\n tensor.\n\n Attributes:\n while_op_needs_rewrite: True if any non-resource intermediates were\n captured, meaning the forward While op needs to be rewritten to output the\n corresponding accumulators.\n empty_tensor_lists: list of EmptyTensorList tensors to be used as initial\n input to the new accumulators in the forward graph.\n popped_tensor_lists: dict from the captured accumulator placeholder to the\n TensorList obtained after popping the intermediate tensor from it. The\n values of this dict need to be added to the list of outputs.\n \"\"\"\n\n def __init__(self, name, forward_cond_graph, forward_body_graph, max_iters):\n super(_WhileBodyGradFuncGraph, self).__init__(name)\n self.empty_tensor_lists = []\n self.popped_tensor_lists = {}\n # FuncGraph for the body of the forward While op.\n self._forward_graph = forward_body_graph\n # FuncGraph for the cond of the forward While op.\n self._forward_cond_graph = forward_cond_graph\n self._maximum_iterations = max_iters\n # Dict from forward intermediate tensor to its indirectly captured tensor\n # in this graph. Indirect capturing happens in two ways:\n # 1. For non-resource tensors we capture their accumulators from the forward\n # outer graph and pop values from that accumulator inside this graph\n # using TensorListPopBack.\n # 2. For resource tensors we directly capture their corresponding tensor\n # in the forward outer graph.\n self._indirect_captures = {}\n\n @property\n def while_op_needs_rewrite(self):\n return self.empty_tensor_lists\n\n def capture(self, tensor, name=None, whitelisted=False):\n \"\"\"Selectively captures external tensors.\n\n If `whitelisted` is False only allows capturing tensors in the\n `_forward_graph`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n whitelisted: If False (default), only allows capturing tensors from the\n forward graph.\n\n Returns:\n The placeholder in this graph for the tensor.\n\n Raises:\n ValueError: If attempting to capture an external tensor not in the forward\n graph with `whitelisted` set to False.\n \"\"\"\n if (not whitelisted and tensor.graph is not self and\n tensor.graph != self._forward_graph):\n raise ValueError(\"Attempting to capture tensor\", str(tensor),\n \" which is not in the forward graph but in \",\n _graph_name(tensor.graph), \".\")\n return super(_WhileBodyGradFuncGraph, self).capture(tensor, name)\n\n def _capture_helper(self, tensor, name):\n if tensor.graph is not self._forward_graph:\n return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name)\n\n while tensor.op.type == \"Identity\":\n # We do not accumulate the output of identity nodes so we try to capture\n # the input of the Identity node instead.\n tensor = tensor.op.inputs[0]\n\n captured_tensor = self._indirect_captures.get(tensor)\n if captured_tensor is not None:\n return captured_tensor\n\n if tensor.dtype == dtypes.resource:\n # Resource-type tensors are not accumulated.\n # If a resource tensor exists in the loop body it must either be a loop\n # input or an output of a nested While op inside the loop body which\n # had captured the external resource.\n if tensor in self._forward_graph.inputs:\n index = self._forward_graph.inputs.index(tensor)\n elif tensor.op.type == \"While\":\n # Captured resources occur at the same index in the lists of inputs and\n # outputs of a while op. So we lookup the input of `tensor.op` at the\n # same index as the index of `tensor` in the `tensor.op.outputs`.\n index = self._forward_graph.inputs.index(\n tensor.op.inputs[tensor.value_index])\n else:\n raise ValueError(\n \"Taking gradient of a while loop which creates\"\n \" a resource in its body is not supported: %s\" % str(tensor))\n # This must be a loop invariant.\n assert self._forward_graph.inputs[index] == self._forward_graph.outputs[\n index], \"Resource tensors must be loop invariants %s.\" % str(\n self._forward_graph._while.inputs[index])\n tensor_in_outer_graph = self._forward_graph._while.inputs[index]\n self._indirect_captures[tensor] = self.capture(\n tensor_in_outer_graph, whitelisted=True)\n return self._indirect_captures[tensor]\n\n # Create or find an existing accumulator output for `tensor` in the forward\n # graph, and fetch from this accumulator in the gradient graph to get the\n # raw intermediate value.\n accumulator = _get_accumulator(tensor)\n if accumulator is None:\n # Create the initial empty tensor list.\n with self._forward_graph.outer_graph.as_default():\n tensor_list = list_ops.empty_tensor_list(\n element_dtype=tensor.dtype, element_shape=tensor.shape,\n max_num_elements=self._maximum_iterations)\n self.empty_tensor_lists.append(tensor_list)\n\n # Push the intermediate tensor to the tensor list. This captures\n # `tensor_list`.\n with self._forward_graph.as_default():\n accumulator = list_ops.tensor_list_push_back(tensor_list, tensor)\n # Add the modified tensor list to the list of outputs. This output will be\n # all the accumulated values.\n self._forward_graph.outputs.append(accumulator)\n\n # Capture in the cond graph as well so the forward cond and body inputs\n # match.\n with self._forward_cond_graph.as_default():\n self._forward_cond_graph.capture(tensor_list)\n\n # Capture the accumulator tensor list in the gradient graph directly from\n # the forward graph -- we'll later modify this to capture the final list\n # output by the forward While op instead.\n captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper(\n accumulator, name)\n\n # Pop the intermediate value from the tensor list in the gradient graph.\n new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back(\n captured_accumulator, element_dtype=tensor.dtype)\n\n self._indirect_captures[tensor] = captured_tensor\n self.popped_tensor_lists[captured_accumulator] = new_tensor_list\n return captured_tensor\n\n\ndef _check_shapes_compat(output_tensors, shape_invariants, input_tensors):\n for (t, shape, input_t) in zip(output_tensors, shape_invariants,\n input_tensors):\n if not control_flow_ops._ShapeLessThanOrEqual(t.shape, shape):\n raise ValueError(\n \"Input tensor '%s' enters the loop with shape %s, but has \"\n \"shape %s after one iteration. To allow the shape to vary across \"\n \"iterations, use the `shape_invariants` argument of tf.while_loop to \"\n \"specify a less-specific shape.\" % (input_t.name, shape, t.shape))\n\n\ndef _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars):\n \"\"\"Checks the number of inputs/outputs of `cond_graph` and `body_graph`.\"\"\"\n assert len(cond_graph.inputs) == num_flattened_loop_vars, (\n \"cond_graph takes %d inputs; Expected: %d\" % (len(cond_graph.inputs),\n num_flattened_loop_vars))\n assert len(cond_graph.outputs) == 1, (\n \"cond_graph has %d outputs; Expected: 1\" % len(cond_graph.outputs))\n assert len(body_graph.inputs) == num_flattened_loop_vars, (\n \"body_graph takes %d inputs; Expected: %d\" % (len(cond_graph.inputs),\n num_flattened_loop_vars))\n assert len(body_graph.outputs) == num_flattened_loop_vars, (\n \"body_graph has %d outputs; Expected: %d\" % (len(body_graph.outputs),\n num_flattened_loop_vars))\n\n\ndef _copy_handle_data(src_tensors, tgt_tensors):\n for src_t, tgt_t in zip(src_tensors, tgt_tensors):\n custom_gradient.copy_handle_data(src_t, tgt_t)\n\n\ndef _maybe_set_maximum_iterations_attr(op, maximum_iterations):\n if control_flow_util.IsInXLAContext(op):\n # Store the maximum_iterations to use in the gradient pass.\n op._set_attr( # pylint: disable=protected-access\n \"_maximum_iterations\",\n attr_value_pb2.AttrValue(\n i=tensor_util.constant_value(maximum_iterations)))\n\n\n# TODO(srbs): This method should be in control_flow_util but that introduces\n# a circular dependency ops -> control_flow_util -> ops.\ndef _is_in_xla_context():\n \"\"\"Returns whether the current context is inside an XLA context.\"\"\"\n outer_graph = ops.get_default_graph()\n # The `_control_flow_context` is not copied when building a FuncGraph so\n # we look it up from the base graph.\n while isinstance(outer_graph, func_graph_module.FuncGraph):\n outer_graph = outer_graph.outer_graph\n cur_ctxt = outer_graph._get_control_flow_context() # pylint: disable=protected-access\n return control_flow_util.GetContainingXLAContext(cur_ctxt) is not None\n\n\ndef _graph_name(graph):\n if isinstance(graph, func_graph_module.FuncGraph):\n return graph.name\n return \"Base\"\n\n\ndef _pack_sequence_as(structure_with_tas, loop_vars):\n \"\"\"Like `nest.pack_sequence_as` but also replaces flows with TensorArrays.\"\"\"\n\n def flow_to_tensor_array(flow, ta): # pylint: disable=missing-docstring\n if isinstance(ta, tensor_array_ops.TensorArray):\n # pylint: disable=protected-access\n new_ta = tensor_array_ops.TensorArray(\n dtype=ta.dtype,\n handle=ta.handle,\n flow=flow,\n infer_shape=ta._infer_shape,\n colocate_with_first_write_call=ta._colocate_with_first_write_call)\n new_ta._colocate_with = ta._colocate_with\n new_ta._element_shape = ta._element_shape\n # pylint: enable=protected-access\n return new_ta\n return flow\n\n flattened_loop_vars = [\n flow_to_tensor_array(*z)\n for z in zip(nest.flatten(loop_vars), nest.flatten(structure_with_tas))\n ]\n return nest.pack_sequence_as(structure_with_tas, flattened_loop_vars)\n\n\ndef _tensor_array_to_flow(loop_vars):\n\n def f(maybe_ta):\n if isinstance(maybe_ta, tensor_array_ops.TensorArray):\n return maybe_ta.flow\n return maybe_ta\n\n return nest.map_structure(f, loop_vars)\n\n\ndef _build_signature(loop_vars, shape_invariants):\n return nest.pack_sequence_as(loop_vars, [\n tensor_spec.TensorSpec(s, t.dtype, name=t.op.name)\n for s, t in zip(nest.flatten(shape_invariants), nest.flatten(loop_vars))\n ])\n\n\n# pylint: enable=protected-access\n" ]
[ [ "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.control_flow_util.IsInXLAContext", "tensorflow.python.ops.control_flow_ops._ShapeLessThanOrEqual", "tensorflow.python.util.nest.flatten", "tensorflow.python.util.nest.is_sequence", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.control_flow_util_v2.create_new_tf_function", "tensorflow.python.ops.custom_gradient.copy_handle_data", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.list_ops.empty_tensor_list", "tensorflow.python.ops.gradients_impl.IsTrainable", "tensorflow.python.ops.list_ops.tensor_list_push_back", "tensorflow.python.ops.list_ops.tensor_list_pop_back", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.gradients_impl._GradientsHelper", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.control_flow_util_v2.in_defun", "tensorflow.python.ops.control_flow_util_v2.WhileCondFuncGraph", "tensorflow.python.ops.control_flow_util_v2.maybe_set_lowering_attr", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.control_flow_util_v2.WhileBodyFuncGraph", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.control_flow_util_v2.unique_grad_fn_name", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.ops.control_flow_util_v2.unique_fn_name", "tensorflow.python.framework.function_def_to_graph.function_def_to_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.control_flow_util.GetContainingXLAContext", "tensorflow.python.util.nest.map_structure", "tensorflow.python.util.nest.pack_sequence_as" ] ]
larksq/Lane-Lines-Detection-and-Localization
[ "f07d7feaca7c5e1132bf22c27745328968441ff0" ]
[ "Scripts/lines.py" ]
[ "import numpy as np\n\nclass lines:\n\n maxNum = 50\n threshold = 1\n insist = True\n\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n #average x values of the fitted line over the last n iterations\n self.bestx = None\n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None\n\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = None\n #distance in meters of vehicle center from the line\n self.line_base_pos = None\n\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n\n #x values for detected line pixels\n self.allx = None\n #y values for detected line pixels\n self.ally = None\n\n\n\n\n def add_rst(self, detected, fit, radius, bias, linepix, frame):\n\n resonableCurve = self.isReasonable(fit)\n\n if resonableCurve == False:\n self.insist = False\n\n else:\n\n # for starting 50 is to init\n self.recent_xfitted.append(linepix)\n multiplier = min(frame, self.maxNum)\n\n if frame < 2:\n self.bestx =linepix\n self.best_fit = fit\n self.radius_of_curvature = radius\n\n else:\n\n self.insist = True\n\n for index in range(0,2):\n diff = self.best_fit[0][index] - fit[0][index]\n if abs(diff)>self.threshold:\n self.insist = False\n print(\"\\n [Huge Jump] left not inconsist! Redetecting!\", index)\n\n for index in range(0,2):\n diff = self.best_fit[1][index] - fit[1][index]\n if abs(diff)>self.threshold:\n self.insist = False\n print(\"\\n [Huge Jump] right not insist! Redetecting!\", index)\n\n self.bestx = (self.bestx*multiplier+linepix)/(multiplier+1)\n self.best_fit = ((self.best_fit[0]*multiplier+fit[0])/(multiplier+1), (self.best_fit[1]*multiplier+fit[1])/(multiplier+1))\n self.radius_of_curvature = (self.radius_of_curvature*multiplier+radius)/(multiplier+1)\n\n if frame > self.maxNum:\n self.recent_xfitted.pop(0)\n\n self.line_base_pos = bias\n self.current_fit = fit\n\n return self.insist # return False to redetect\n\n def isReasonable(self, fit):\n\n # check left and right parrell\n diff = abs(fit[0][0]-fit[1][0])\n if diff > 0.01:\n print(\"\\n [OUTLIERS] NOT PARRELL! Discarding\")\n return False\n\n # check if curl too much\n if max(abs(fit[0][0]), abs(fit[1][0])) > 0.01:\n print(\"\\n [OUTLIERS] CRUL TOO MUCH! Discarding\")\n return False\n\n return True\n\n def smooth(self):\n pass\n\n\n\n" ]
[ [ "numpy.array" ] ]
LancerWang001/v8
[ "a0f0ebd7a876e8cb2210115adbfcffe900e99540" ]
[ "tools/callstats.py" ]
[ "#!/usr/bin/env python\n# Copyright 2016 the V8 project authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n'''\nUsage: callstats.py [-h] <command> ...\n\nOptional arguments:\n -h, --help show this help message and exit\n\nCommands:\n run run chrome with --runtime-call-stats and generate logs\n stats process logs and print statistics\n json process logs from several versions and generate JSON\n help help information\n\nFor each command, you can try ./runtime-call-stats.py help command.\n'''\n\n# for py2/py3 compatibility\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport operator\nfrom callstats_groups import RUNTIME_CALL_STATS_GROUPS\n\nimport numpy\nfrom math import sqrt\n\n\nMAX_NOF_RETRIES = 5\n\n\n# Run benchmarks.\n\ndef print_command(cmd_args):\n def fix_for_printing(arg):\n m = re.match(r'^--([^=]+)=(.*)$', arg)\n if m and (' ' in m.group(2) or m.group(2).startswith('-')):\n arg = \"--{}='{}'\".format(m.group(1), m.group(2))\n elif ' ' in arg:\n arg = \"'{}'\".format(arg)\n return arg\n print(\" \".join(map(fix_for_printing, cmd_args)))\n\n\ndef start_replay_server(args, sites, discard_output=True):\n with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js',\n mode='wt', delete=False) as f:\n injection = f.name\n generate_injection(f, sites, args.refresh)\n http_port = 4080 + args.port_offset\n https_port = 4443 + args.port_offset\n cmd_args = [\n args.replay_bin,\n \"--port=%s\" % http_port,\n \"--ssl_port=%s\" % https_port,\n \"--no-dns_forwarding\",\n \"--use_closest_match\",\n \"--no-diff_unknown_requests\",\n \"--inject_scripts=deterministic.js,{}\".format(injection),\n args.replay_wpr,\n ]\n print(\"=\" * 80)\n print_command(cmd_args)\n if discard_output:\n with open(os.devnull, 'w') as null:\n server = subprocess.Popen(cmd_args, stdout=null, stderr=null)\n else:\n server = subprocess.Popen(cmd_args)\n print(\"RUNNING REPLAY SERVER: %s with PID=%s\" % (args.replay_bin, server.pid))\n print(\"=\" * 80)\n return {'process': server, 'injection': injection}\n\n\ndef stop_replay_server(server):\n print(\"SHUTTING DOWN REPLAY SERVER %s\" % server['process'].pid)\n server['process'].terminate()\n os.remove(server['injection'])\n\n\ndef generate_injection(f, sites, refreshes=0):\n print(\"\"\"\\\n(function() {\n var s = window.sessionStorage.getItem(\"refreshCounter\");\n var refreshTotal = \"\"\", refreshes, \"\"\";\n var refreshCounter = s ? parseInt(s) : refreshTotal;\n var refreshId = refreshTotal - refreshCounter;\n if (refreshCounter > 0) {\n window.sessionStorage.setItem(\"refreshCounter\", refreshCounter-1);\n }\n function match(url, item) {\n if ('regexp' in item) { return url.match(item.regexp) !== null };\n var url_wanted = item.url;\n /* Allow automatic redirections from http to https. */\n if (url_wanted.startsWith(\"http://\") && url.startsWith(\"https://\")) {\n url_wanted = \"https://\" + url_wanted.substr(7);\n }\n return url.startsWith(url_wanted);\n };\n function onLoad(url) {\n for (var item of sites) {\n if (!match(url, item)) continue;\n var timeout = 'timeline' in item ? 2000 * item.timeline\n : 'timeout' in item ? 1000 * (item.timeout - 3)\n : 10000;\n console.log(\"Setting time out of \" + timeout + \" for: \" + url);\n window.setTimeout(function() {\n console.log(\"Time is out for: \" + url);\n var msg = \"STATS: (\" + refreshId + \") \" + url;\n %GetAndResetRuntimeCallStats(1, msg);\n if (refreshCounter > 0) {\n console.log(\n \"Refresh counter is \" + refreshCounter + \", refreshing: \" + url);\n window.location.reload();\n }\n }, timeout);\n return;\n }\n console.log(\"Ignoring: \" + url);\n };\n var sites =\n \"\"\", json.dumps(sites), \"\"\";\n onLoad(window.location.href);\n})();\"\"\", file=f)\n\ndef get_chrome_flags(js_flags, user_data_dir, arg_delimiter=\"\"):\n return [\n \"--no-default-browser-check\",\n \"--no-sandbox\",\n \"--disable-translate\",\n \"--enable-benchmarking\",\n \"--enable-stats-table\",\n \"--js-flags={}{}{}\".format(arg_delimiter, js_flags, arg_delimiter),\n \"--no-first-run\",\n \"--user-data-dir={}{}{}\".format(arg_delimiter, user_data_dir,\n arg_delimiter),\n \"--data-path={}{}{}\".format(arg_delimiter,\n os.path.join(user_data_dir, 'content-shell-data'), arg_delimiter),\n ]\n\ndef get_chrome_replay_flags(args, arg_delimiter=\"\"):\n http_port = 4080 + args.port_offset\n https_port = 4443 + args.port_offset\n return [\n \"--host-resolver-rules=%sMAP *:80 localhost:%s, \" \\\n \"MAP *:443 localhost:%s, \" \\\n \"EXCLUDE localhost%s\" % (\n arg_delimiter, http_port, https_port,\n arg_delimiter),\n \"--ignore-certificate-errors\",\n \"--disable-seccomp-sandbox\",\n \"--disable-web-security\",\n \"--reduce-security-for-testing\",\n \"--allow-insecure-localhost\",\n ]\n\ndef run_site(site, domain, args, timeout=None):\n print(\"=\"*80)\n print(\"RUNNING DOMAIN %s\" % domain)\n print(\"=\"*80)\n result_template = \"{domain}#{count}.txt\" if args.repeat else \"{domain}.txt\"\n count = 0\n if timeout is None: timeout = args.timeout\n if args.replay_wpr:\n timeout *= 1 + args.refresh\n timeout += 1\n retries_since_good_run = 0\n while count == 0 or args.repeat is not None and count < args.repeat:\n count += 1\n result = result_template.format(domain=domain, count=count)\n retries = 0\n while args.retries is None or retries < args.retries:\n retries += 1\n try:\n if args.user_data_dir:\n user_data_dir = args.user_data_dir\n else:\n user_data_dir = tempfile.mkdtemp(prefix=\"chr_\")\n js_flags = \"--runtime-call-stats\"\n if args.replay_wpr: js_flags += \" --allow-natives-syntax\"\n if args.js_flags: js_flags += \" \" + args.js_flags\n chrome_flags = get_chrome_flags(js_flags, user_data_dir)\n if args.replay_wpr:\n chrome_flags += get_chrome_replay_flags(args)\n else:\n chrome_flags += [ \"--single-process\", ]\n if args.chrome_flags:\n chrome_flags += args.chrome_flags.split()\n cmd_args = [\n \"timeout\", str(timeout),\n args.with_chrome\n ] + chrome_flags + [ site ]\n print(\"- \" * 40)\n print_command(cmd_args)\n print(\"- \" * 40)\n with open(result, \"wt\") as f:\n with open(args.log_stderr or os.devnull, 'at') as err:\n status = subprocess.call(cmd_args, stdout=f, stderr=err)\n # 124 means timeout killed chrome, 0 means the user was bored first!\n # If none of these two happened, then chrome apparently crashed, so\n # it must be called again.\n if status != 124 and status != 0:\n print(\"CHROME CRASHED, REPEATING RUN\");\n continue\n # If the stats file is empty, chrome must be called again.\n if os.path.isfile(result) and os.path.getsize(result) > 0:\n if args.print_url:\n with open(result, \"at\") as f:\n print(file=f)\n print(\"URL: {}\".format(site), file=f)\n retries_since_good_run = 0\n break\n if retries_since_good_run > MAX_NOF_RETRIES:\n # Abort after too many retries, no point in ever increasing the\n # timeout.\n print(\"TOO MANY EMPTY RESULTS ABORTING RUN\")\n return\n timeout += 2 ** retries_since_good_run\n retries_since_good_run += 1\n print(\"EMPTY RESULT, REPEATING RUN ({})\".format(\n retries_since_good_run));\n finally:\n if not args.user_data_dir:\n shutil.rmtree(user_data_dir)\n\n\ndef read_sites_file(args):\n try:\n sites = []\n try:\n with open(args.sites_file, \"rt\") as f:\n for item in json.load(f):\n if 'timeout' not in item:\n # This is more-or-less arbitrary.\n item['timeout'] = int(1.5 * item['timeline'] + 7)\n if item['timeout'] > args.timeout: item['timeout'] = args.timeout\n sites.append(item)\n except ValueError:\n args.error(\"Warning: Could not read sites file as JSON, falling back to \"\n \"primitive file\")\n with open(args.sites_file, \"rt\") as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'): continue\n sites.append({'url': line, 'timeout': args.timeout})\n return sites\n except IOError as e:\n args.error(\"Cannot read from {}. {}.\".format(args.sites_file, e.strerror))\n sys.exit(1)\n\n\ndef read_sites(args):\n # Determine the websites to benchmark.\n if args.sites_file:\n return read_sites_file(args)\n return [{'url': site, 'timeout': args.timeout} for site in args.sites]\n\ndef do_run(args):\n sites = read_sites(args)\n replay_server = start_replay_server(args, sites) if args.replay_wpr else None\n # Disambiguate domains, if needed.\n L = []\n domains = {}\n for item in sites:\n site = item['url']\n domain = None\n if args.domain:\n domain = args.domain\n elif 'domain' in item:\n domain = item['domain']\n else:\n m = re.match(r'^(https?://)?([^/]+)(/.*)?$', site)\n if not m:\n args.error(\"Invalid URL {}.\".format(site))\n continue\n domain = m.group(2)\n entry = [site, domain, None, item['timeout']]\n if domain not in domains:\n domains[domain] = entry\n else:\n if not isinstance(domains[domain], int):\n domains[domain][2] = 1\n domains[domain] = 1\n domains[domain] += 1\n entry[2] = domains[domain]\n L.append(entry)\n try:\n # Run them.\n for site, domain, count, timeout in L:\n if count is not None: domain = \"{}%{}\".format(domain, count)\n print((site, domain, timeout))\n run_site(site, domain, args, timeout)\n finally:\n if replay_server:\n stop_replay_server(replay_server)\n\n\ndef do_run_replay_server(args):\n sites = read_sites(args)\n print(\"- \" * 40)\n print(\"Available URLs:\")\n for site in sites:\n print(\" \"+site['url'])\n print(\"- \" * 40)\n print(\"Launch chromium with the following commands for debugging:\")\n flags = get_chrome_flags(\"--runtime-call-stats --allow-natives-syntax\",\n \"/var/tmp/`date +%s`\", '\"')\n flags += get_chrome_replay_flags(args, \"'\")\n print(\" $CHROMIUM_DIR/out/Release/chrome \" + (\" \".join(flags)) + \" <URL>\")\n print(\"- \" * 40)\n replay_server = start_replay_server(args, sites, discard_output=False)\n try:\n replay_server['process'].wait()\n finally:\n stop_replay_server(replay_server)\n\n\n# Calculate statistics.\n\ndef statistics(data):\n # NOTE(V8:10269): imports moved here to mitigate the outage.\n import scipy\n import scipy.stats\n\n N = len(data)\n average = numpy.average(data)\n median = numpy.median(data)\n low = numpy.min(data)\n high= numpy.max(data)\n if N > 1:\n # evaluate sample variance by setting delta degrees of freedom (ddof) to\n # 1. The degree used in calculations is N - ddof\n stddev = numpy.std(data, ddof=1)\n # Get the endpoints of the range that contains 95% of the distribution\n t_bounds = scipy.stats.t.interval(0.95, N-1)\n #assert abs(t_bounds[0] + t_bounds[1]) < 1e-6\n # sum mean to the confidence interval\n ci = {\n 'abs': t_bounds[1] * stddev / sqrt(N),\n 'low': average + t_bounds[0] * stddev / sqrt(N),\n 'high': average + t_bounds[1] * stddev / sqrt(N)\n }\n else:\n stddev = 0\n ci = { 'abs': 0, 'low': average, 'high': average }\n if abs(stddev) > 0.0001 and abs(average) > 0.0001:\n ci['perc'] = t_bounds[1] * stddev / sqrt(N) / average * 100\n else:\n ci['perc'] = 0\n return { 'samples': N, 'average': average, 'median': median,\n 'stddev': stddev, 'min': low, 'max': high, 'ci': ci }\n\n\ndef add_category_total(entries, groups, category_prefix):\n group_data = { 'time': 0, 'count': 0 }\n for group_name, regexp in groups:\n if not group_name.startswith('Group-' + category_prefix): continue\n group_data['time'] += entries[group_name]['time']\n group_data['count'] += entries[group_name]['count']\n entries['Group-' + category_prefix + '-Total'] = group_data\n\n\ndef read_stats(path, domain, args):\n groups = [];\n if args.aggregate:\n groups = [\n ('Group-IC', re.compile(\".*IC_.*\")),\n ('Group-OptimizeBackground', re.compile(\".*OptimizeBackground.*\")),\n ('Group-Optimize',\n re.compile(\"StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*\")),\n ('Group-CompileBackground', re.compile(\"(.*CompileBackground.*)\")),\n ('Group-Compile', re.compile(\"(^Compile.*)|(.*_Compile.*)\")),\n ('Group-ParseBackground', re.compile(\".*ParseBackground.*\")),\n ('Group-Parse', re.compile(\".*Parse.*\")),\n ('Group-Callback', re.compile(\".*Callback.*\")),\n ('Group-API', re.compile(\".*API.*\")),\n ('Group-GC-Custom', re.compile(\"GC_Custom_.*\")),\n ('Group-GC-Background', re.compile(\".*GC.*BACKGROUND.*\")),\n ('Group-GC', re.compile(\"GC_.*|AllocateInTargetSpace\")),\n ('Group-JavaScript', re.compile(\"JS_Execution\")),\n ('Group-Runtime', re.compile(\".*\"))]\n with open(path, \"rt\") as f:\n # Process the whole file and sum repeating entries.\n entries = { 'Sum': {'time': 0, 'count': 0} }\n for group_name, regexp in groups:\n entries[group_name] = { 'time': 0, 'count': 0 }\n for line in f:\n line = line.strip()\n # Discard headers and footers.\n if not line: continue\n if line.startswith(\"Runtime Function\"): continue\n if line.startswith(\"====\"): continue\n if line.startswith(\"----\"): continue\n if line.startswith(\"URL:\"): continue\n if line.startswith(\"STATS:\"): continue\n # We have a regular line.\n fields = line.split()\n key = fields[0]\n time = float(fields[1].replace(\"ms\", \"\"))\n count = int(fields[3])\n if key not in entries: entries[key] = { 'time': 0, 'count': 0 }\n entries[key]['time'] += time\n entries[key]['count'] += count\n # We calculate the sum, if it's not the \"total\" line.\n if key != \"Total\":\n entries['Sum']['time'] += time\n entries['Sum']['count'] += count\n for group_name, regexp in groups:\n if not regexp.match(key): continue\n entries[group_name]['time'] += time\n entries[group_name]['count'] += count\n break\n # Calculate the V8-Total (all groups except Callback)\n group_data = { 'time': 0, 'count': 0 }\n for group_name, regexp in groups:\n if group_name == 'Group-Callback': continue\n group_data['time'] += entries[group_name]['time']\n group_data['count'] += entries[group_name]['count']\n entries['Group-Total-V8'] = group_data\n # Calculate the Parse-Total, Compile-Total and Optimize-Total groups\n add_category_total(entries, groups, 'Parse')\n add_category_total(entries, groups, 'Compile')\n add_category_total(entries, groups, 'Optimize')\n # Append the sums as single entries to domain.\n for key in entries:\n if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }\n domain[key]['time_list'].append(entries[key]['time'])\n domain[key]['count_list'].append(entries[key]['count'])\n\n\ndef print_stats(S, args):\n # Sort by ascending/descending time average, then by ascending/descending\n # count average, then by ascending name.\n def sort_asc_func(item):\n return (item[1]['time_stat']['average'],\n item[1]['count_stat']['average'],\n item[0])\n def sort_desc_func(item):\n return (-item[1]['time_stat']['average'],\n -item[1]['count_stat']['average'],\n item[0])\n # Sorting order is in the commend-line arguments.\n sort_func = sort_asc_func if args.sort == \"asc\" else sort_desc_func\n # Possibly limit how many elements to print.\n L = [item for item in sorted(S.items(), key=sort_func)\n if item[0] not in [\"Total\", \"Sum\"]]\n N = len(L)\n if args.limit == 0:\n low, high = 0, N\n elif args.sort == \"desc\":\n low, high = 0, args.limit\n else:\n low, high = N-args.limit, N\n # How to print entries.\n def print_entry(key, value):\n def stats(s, units=\"\"):\n conf = \"{:0.1f}({:0.2f}%)\".format(s['ci']['abs'], s['ci']['perc'])\n return \"{:8.1f}{} +/- {:15s}\".format(s['average'], units, conf)\n print(\"{:>50s} {} {}\".format(\n key,\n stats(value['time_stat'], units=\"ms\"),\n stats(value['count_stat'])\n ))\n # Print and calculate partial sums, if necessary.\n for i in range(low, high):\n print_entry(*L[i])\n if args.totals and args.limit != 0 and not args.aggregate:\n if i == low:\n partial = { 'time_list': [0] * len(L[i][1]['time_list']),\n 'count_list': [0] * len(L[i][1]['count_list']) }\n assert len(partial['time_list']) == len(L[i][1]['time_list'])\n assert len(partial['count_list']) == len(L[i][1]['count_list'])\n for j, v in enumerate(L[i][1]['time_list']):\n partial['time_list'][j] += v\n for j, v in enumerate(L[i][1]['count_list']):\n partial['count_list'][j] += v\n # Print totals, if necessary.\n if args.totals:\n print('-' * 80)\n if args.limit != 0 and not args.aggregate:\n partial['time_stat'] = statistics(partial['time_list'])\n partial['count_stat'] = statistics(partial['count_list'])\n print_entry(\"Partial\", partial)\n print_entry(\"Sum\", S[\"Sum\"])\n print_entry(\"Total\", S[\"Total\"])\n\n\ndef do_stats(args):\n domains = {}\n for path in args.logfiles:\n filename = os.path.basename(path)\n m = re.match(r'^([^#]+)(#.*)?$', filename)\n domain = m.group(1)\n if domain not in domains: domains[domain] = {}\n read_stats(path, domains[domain], args)\n if args.aggregate:\n create_total_page_stats(domains, args)\n for i, domain in enumerate(sorted(domains)):\n if len(domains) > 1:\n if i > 0: print()\n print(\"{}:\".format(domain))\n print('=' * 80)\n domain_stats = domains[domain]\n for key in domain_stats:\n domain_stats[key]['time_stat'] = \\\n statistics(domain_stats[key]['time_list'])\n domain_stats[key]['count_stat'] = \\\n statistics(domain_stats[key]['count_list'])\n print_stats(domain_stats, args)\n\n\n# Create a Total page with all entries summed up.\ndef create_total_page_stats(domains, args):\n total = {}\n def sum_up(parent, key, other):\n sums = parent[key]\n for i, item in enumerate(other[key]):\n if i >= len(sums):\n sums.extend([0] * (i - len(sums) + 1))\n if item is not None:\n sums[i] += item\n # Exclude adwords and speedometer pages from aggrigate total, since adwords\n # dominates execution time and speedometer is measured elsewhere.\n excluded_domains = ['adwords.google.com', 'speedometer-angular',\n 'speedometer-jquery', 'speedometer-backbone',\n 'speedometer-ember', 'speedometer-vanilla'];\n # Sum up all the entries/metrics from all non-excluded domains\n for domain, entries in domains.items():\n if domain in excluded_domains:\n continue;\n for key, domain_stats in entries.items():\n if key not in total:\n total[key] = {}\n total[key]['time_list'] = list(domain_stats['time_list'])\n total[key]['count_list'] = list(domain_stats['count_list'])\n else:\n sum_up(total[key], 'time_list', domain_stats)\n sum_up(total[key], 'count_list', domain_stats)\n # Add a new \"Total\" page containing the summed up metrics.\n domains['Total'] = total\n\n# Generate Raw JSON file.\n\ndef _read_logs(args):\n versions = {}\n for path in args.logdirs:\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n version = os.path.basename(root)\n if version not in versions: versions[version] = {}\n for filename in files:\n if filename.endswith(\".txt\"):\n m = re.match(r'^([^#]+)(#.*)?\\.txt$', filename)\n domain = m.group(1)\n if domain not in versions[version]: versions[version][domain] = {}\n read_stats(os.path.join(root, filename),\n versions[version][domain], args)\n\n return versions\n\ndef do_raw_json(args):\n versions = _read_logs(args)\n\n for version, domains in versions.items():\n if args.aggregate:\n create_total_page_stats(domains, args)\n for domain, entries in domains.items():\n raw_entries = []\n for name, value in entries.items():\n # We don't want the calculated sum in the JSON file.\n if name == \"Sum\": continue\n raw_entries.append({\n 'name': name,\n 'duration': value['time_list'],\n 'count': value['count_list'],\n })\n\n domains[domain] = raw_entries\n\n print(json.dumps(versions, separators=(',', ':')))\n\n\n# Generate JSON file.\n\ndef do_json(args):\n versions = _read_logs(args)\n\n for version, domains in versions.items():\n if args.aggregate:\n create_total_page_stats(domains, args)\n for domain, entries in domains.items():\n stats = []\n for name, value in entries.items():\n # We don't want the calculated sum in the JSON file.\n if name == \"Sum\": continue\n entry = [name]\n for x in ['time_list', 'count_list']:\n s = statistics(entries[name][x])\n entry.append(round(s['average'], 1))\n entry.append(round(s['ci']['abs'], 1))\n entry.append(round(s['ci']['perc'], 2))\n stats.append(entry)\n domains[domain] = stats\n print(json.dumps(versions, separators=(',', ':')))\n\n\n# Help.\n\ndef do_help(parser, subparsers, args):\n if args.help_cmd:\n if args.help_cmd in subparsers:\n subparsers[args.help_cmd].print_help()\n else:\n args.error(\"Unknown command '{}'\".format(args.help_cmd))\n else:\n parser.print_help()\n\n\n# Main program, parse command line and execute.\n\ndef coexist(*l):\n given = sum(1 for x in l if x)\n return given == 0 or given == len(l)\n\ndef main():\n parser = argparse.ArgumentParser()\n subparser_adder = parser.add_subparsers(title=\"commands\", dest=\"command\",\n metavar=\"<command>\")\n subparsers = {}\n # Command: run.\n subparsers[\"run\"] = subparser_adder.add_parser(\n \"run\", help=\"Replay websites and collect runtime stats data.\")\n subparsers[\"run\"].set_defaults(\n func=do_run, error=subparsers[\"run\"].error)\n subparsers[\"run\"].add_argument(\n \"--chrome-flags\", type=str, default=\"\",\n help=\"specify additional chrome flags\")\n subparsers[\"run\"].add_argument(\n \"--js-flags\", type=str, default=\"\",\n help=\"specify additional V8 flags\")\n subparsers[\"run\"].add_argument(\n \"-u\", \"--user-data-dir\", type=str, metavar=\"<path>\",\n help=\"specify user data dir (default is temporary)\")\n subparsers[\"run\"].add_argument(\n \"-c\", \"--with-chrome\", type=str, metavar=\"<path>\",\n default=\"/usr/bin/google-chrome\",\n help=\"specify chrome executable to use\")\n subparsers[\"run\"].add_argument(\n \"-r\", \"--retries\", type=int, metavar=\"<num>\",\n help=\"specify retries if website is down (default: forever)\")\n subparsers[\"run\"].add_argument(\n \"--no-url\", dest=\"print_url\", action=\"store_false\", default=True,\n help=\"do not include url in statistics file\")\n subparsers[\"run\"].add_argument(\n \"--domain\", type=str, default=\"\",\n help=\"specify the output file domain name\")\n subparsers[\"run\"].add_argument(\n \"-n\", \"--repeat\", type=int, metavar=\"<num>\",\n help=\"specify iterations for each website (default: once)\")\n\n def add_replay_args(subparser):\n subparser.add_argument(\n \"-k\", \"--refresh\", type=int, metavar=\"<num>\", default=0,\n help=\"specify refreshes for each iteration (default: 0)\")\n subparser.add_argument(\n \"--replay-wpr\", type=str, metavar=\"<path>\",\n help=\"use the specified web page replay (.wpr) archive\")\n subparser.add_argument(\n \"--replay-bin\", type=str, metavar=\"<path>\",\n help=\"specify the replay.py script typically located in \" \\\n \"$CHROMIUM/src/third_party/webpagereplay/replay.py\")\n subparser.add_argument(\n \"-f\", \"--sites-file\", type=str, metavar=\"<path>\",\n help=\"specify file containing benchmark websites\")\n subparser.add_argument(\n \"-t\", \"--timeout\", type=int, metavar=\"<seconds>\", default=60,\n help=\"specify seconds before chrome is killed\")\n subparser.add_argument(\n \"-p\", \"--port-offset\", type=int, metavar=\"<offset>\", default=0,\n help=\"specify the offset for the replay server's default ports\")\n subparser.add_argument(\n \"-l\", \"--log-stderr\", type=str, metavar=\"<path>\",\n help=\"specify where chrome's stderr should go (default: /dev/null)\")\n subparser.add_argument(\n \"--sites\", type=str, metavar=\"<URL>\", nargs=\"*\",\n help=\"specify benchmark website\")\n add_replay_args(subparsers[\"run\"])\n\n # Command: replay-server\n subparsers[\"replay\"] = subparser_adder.add_parser(\n \"replay\", help=\"Run the replay server for debugging purposes\")\n subparsers[\"replay\"].set_defaults(\n func=do_run_replay_server, error=subparsers[\"replay\"].error)\n add_replay_args(subparsers[\"replay\"])\n\n # Command: stats.\n subparsers[\"stats\"] = subparser_adder.add_parser(\n \"stats\", help=\"Analize the results file create by the 'run' command.\")\n subparsers[\"stats\"].set_defaults(\n func=do_stats, error=subparsers[\"stats\"].error)\n subparsers[\"stats\"].add_argument(\n \"-l\", \"--limit\", type=int, metavar=\"<num>\", default=0,\n help=\"limit how many items to print (default: none)\")\n subparsers[\"stats\"].add_argument(\n \"-s\", \"--sort\", choices=[\"asc\", \"desc\"], default=\"asc\",\n help=\"specify sorting order (default: ascending)\")\n subparsers[\"stats\"].add_argument(\n \"-n\", \"--no-total\", dest=\"totals\", action=\"store_false\", default=True,\n help=\"do not print totals\")\n subparsers[\"stats\"].add_argument(\n \"logfiles\", type=str, metavar=\"<logfile>\", nargs=\"*\",\n help=\"specify log files to parse\")\n subparsers[\"stats\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: json.\n subparsers[\"json\"] = subparser_adder.add_parser(\n \"json\", help=\"Collect results file created by the 'run' command into\" \\\n \"a single json file.\")\n subparsers[\"json\"].set_defaults(\n func=do_json, error=subparsers[\"json\"].error)\n subparsers[\"json\"].add_argument(\n \"logdirs\", type=str, metavar=\"<logdir>\", nargs=\"*\",\n help=\"specify directories with log files to parse\")\n subparsers[\"json\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: raw-json.\n subparsers[\"raw-json\"] = subparser_adder.add_parser(\n \"raw-json\", help=\"Collect raw results from 'run' command into\" \\\n \"a single json file.\")\n subparsers[\"raw-json\"].set_defaults(\n func=do_raw_json, error=subparsers[\"json\"].error)\n subparsers[\"raw-json\"].add_argument(\n \"logdirs\", type=str, metavar=\"<logdir>\", nargs=\"*\",\n help=\"specify directories with log files to parse\")\n subparsers[\"raw-json\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: help.\n subparsers[\"help\"] = subparser_adder.add_parser(\n \"help\", help=\"help information\")\n subparsers[\"help\"].set_defaults(\n func=lambda args: do_help(parser, subparsers, args),\n error=subparsers[\"help\"].error)\n subparsers[\"help\"].add_argument(\n \"help_cmd\", type=str, metavar=\"<command>\", nargs=\"?\",\n help=\"command for which to display help\")\n\n # Execute the command.\n args = parser.parse_args()\n setattr(args, 'script_path', os.path.dirname(sys.argv[0]))\n if args.command == \"run\" and coexist(args.sites_file, args.sites):\n args.error(\"use either option --sites-file or site URLs\")\n sys.exit(1)\n elif args.command == \"run\" and not coexist(args.replay_wpr, args.replay_bin):\n args.error(\"options --replay-wpr and --replay-bin must be used together\")\n sys.exit(1)\n else:\n args.func(args)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" ]
[ [ "scipy.stats.t.interval", "numpy.median", "numpy.max", "numpy.min", "numpy.std", "numpy.average" ] ]
jboilard1994/disentanglement_lib
[ "a64b8b9994a28fafd47ccd866b0318fa30a3c76c" ]
[ "disentanglement_lib/evaluation/metrics/beta_vae_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The DisentanglementLib Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for beta_vae.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\nfrom disentanglement_lib.data.ground_truth import dummy_data\nfrom disentanglement_lib.evaluation.metrics import beta_vae\nimport numpy as np\n\n\nclass BetaVaeTest(absltest.TestCase):\n\n def test_metric(self):\n ground_truth_data = dummy_data.IdentityObservationsData()\n representation_function = lambda x: x\n random_state = np.random.RandomState(0)\n scores = beta_vae.compute_beta_vae_sklearn(\n ground_truth_data, representation_function, random_state, None, 5,\n 2000, 2000)\n self.assertBetween(scores[\"train_accuracy\"], 0.9, 1.0)\n self.assertBetween(scores[\"eval_accuracy\"], 0.9, 1.0)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.random.RandomState" ] ]
HuiminHe/BugBot
[ "ac121a37ac0b4858e5ed3849062c9bfaa47cb0fa" ]
[ "test_box.py" ]
[ "from simulator import Simulator, Map, Agent\nfrom devices import Device\nimport numpy as np\nimport simulator_config\n\nenv = Simulator(simulator_config)\nmap = Map()\nmap.get_map_from_geom2d(env, kp=np.array([[-100, 100], [-100, -100], [100, -100], [100, 100]]))\n\nrobot = Agent(env, kp=np.array([[-2, 0], [2, 0]]), color=(1, 0, 0, 0.5), v_max=5)\nrobot.reset(init_state=np.array([0, 40, 0]))\ndevice = Device(env, parent=robot, kp=np.array([[-10, 0], [10, 0]]), color=[0, 1, 0, 1], filled=False)\nwhile True:\n robot.update(v=np.array([5, 0]))\n env._render()\n" ]
[ [ "numpy.array" ] ]
OnsenTamagoYoshi/DeepLearningFromScratch
[ "006f80b63130829b142c04a88632287bcf5a61b0" ]
[ "ch04/gradient_2d.py" ]
[ "# -*- coding: utf-8 -*-\n# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef _numerical_gradient_no_batch(f, x):\n h = 1e-4 #0.0001\n grad = np.zeros_like(x) #xと同じ形状の配列を作成\n \n for idx in range(x.size):\n tmp_val = x[idx]\n #f(x + h)の計算\n x[idx] = tmp_val + h\n fxh1 = f(x)\n \n #f(x - h)の計算\n x[idx] = tmp_val - h\n fxh2 = f(x)\n \n grad[idx] = (fxh1 - fxh2) / (2 * h)\n x[idx] = tmp_val #値を元に戻す\n \n return grad\n\ndef numerical_gradient(f, X):\n if X.ndim == 1:\n return _numerical_gradient_no_batch(f, X)\n else:\n grad = np.zeros_like(X)\n \n for idx, x in enumerate(X):\n grad[idx] = _numerical_gradient_no_batch(f, x)\n \n return grad \n\ndef function_2(x):\n if x.ndim == 1:\n return x[0] ** 2 + x[1] ** 2 #または return np.sum(x**2)\n else:\n return np.sum(x ** 2, axis=1)\n \ndef tangent_line(f, x):\n d = numerical_gradient(f, x)\n print(d)\n y = f(x) - d * x\n return lambda t: d * t + y\n\nif __name__ == '__main__':\n x0 = np.arange(-2, 2.5, 0.25)\n x1 = np.arange(-2, 2.5, 0.25)\n X, Y = np.meshgrid(x0, x1)\n \n X = X.flatten()\n Y = Y.flatten()\n \n grad = numerical_gradient(function_2, np.array([X, Y]))\n \n plt.figure()\n plt.quiver(X, Y, -grad[0], -grad[1], angles=\"xy\", color=\"#666666\") #,headwidth=10,scale=40,color=\"#444444\")\n plt.xlim([-2, 2])\n plt.ylim([-2, 2])\n plt.xlabel('x0')\n plt.xlabel('x1')\n plt.grid()\n plt.legend()\n plt.draw()\n plt.show()\n" ]
[ [ "matplotlib.pylab.xlim", "numpy.zeros_like", "matplotlib.pylab.grid", "numpy.sum", "matplotlib.pylab.quiver", "matplotlib.pylab.legend", "matplotlib.pylab.draw", "matplotlib.pylab.figure", "matplotlib.pylab.show", "numpy.arange", "matplotlib.pylab.xlabel", "matplotlib.pylab.ylim", "numpy.array", "numpy.meshgrid" ] ]
JianhengHou/Medical-Sieve
[ "cafb69054ef98cf2f42229ff73c93b6796f9fa91" ]
[ "Medical_Sieve_Pipeline/Medical_Sieve_Model_Pipeline/estimator.py" ]
[ "from sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import hamming_loss\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\nimport copy\n\ndef combinations(nums):\n ans = [[]]\n for row in nums:\n curr = []\n for combination in ans:\n for element in row:\n new_combination = copy.deepcopy(combination)\n new_combination.append(element)\n curr.append(new_combination)\n ans = curr\n return ans\n\ndef f1(matrix):\n precision = matrix[1][1]*1.0 / (matrix[0][1] + matrix[1][1])\n recall = matrix[1][1]*1.0 / (matrix[1][0] + matrix[1][1])\n return 2*((precision*recall)/(precision+recall))\n\ndef model_evaluation(val_preds, aspect_vectors, thresholds_set):\n mlb_aspect = MultiLabelBinarizer()\n mlb_aspect.fit([aspect_vectors.columns.values.tolist()]) \n\n max_avg_f1 = 0\n max_hamming_score = 0\n max_exact_accuracy = 0\n max_fuzzy_accuracy = 0\n max_fuzzy_accuracy_pos = 0\n max_exact_accuracy_pos = 0\n max_avg_rocauc = 0\n max_confusion_matrix = None\n max_threshold_set = []\n\n for threshold_set in thresholds_set:\n predict_softmax = np.zeros(aspect_vectors.shape, dtype=int)\n for row_index, row in enumerate(val_preds):\n for index, each in enumerate(row):\n if each >= threshold_set[index]:\n predict_softmax[row_index][index] = 1\n\n hamming_score = 1 - hamming_loss(predict_softmax, aspect_vectors) \n num_fuzzy_match = 0\n num_fuzzy_match_pos = 0\n num_exact_match_pos = 0\n num_pos = 0\n for true, pre in zip(mlb_aspect.inverse_transform(aspect_vectors.values), mlb_aspect.inverse_transform(predict_softmax)):\n if len(true) != 0: \n num_pos += 1\n intersect = set(pre).intersection(set(true))\n if (len(true)>0 and len(pre)>0 and len(intersect) > 0) or (len(true) == 0 and len(pre) == 0):\n num_fuzzy_match += 1\n if len(true)>0 and len(pre)>0 and len(intersect) > 0:\n num_fuzzy_match_pos += 1\n if len(true)>0 and len(pre)>0 and pre == true: \n num_exact_match_pos += 1\n fuzzy_accuracy = num_fuzzy_match*1.0/len(predict_softmax)\n exact_accuracy = accuracy_score(predict_softmax, aspect_vectors)\n fuzzy_accuracy_pos = num_fuzzy_match_pos*1.0/num_pos\n exact_accuracy_pos = num_exact_match_pos*1.0/num_pos\n\n class_f1 = []\n for aspect, confusion_matrix in zip(mlb_aspect.classes_, multilabel_confusion_matrix(aspect_vectors, predict_softmax)):\n # print(aspect, ':',f1(confusion_matrix),'\\n', confusion_matrix, '\\n')\n class_f1.append(f1(confusion_matrix))\n \n rocauc_score = roc_auc_score(aspect_vectors, val_preds, 'weighted')\n if np.mean(class_f1) > max_avg_f1:\n max_threshold_set = threshold_set\n max_avg_f1 = max(max_avg_f1, np.mean(class_f1))\n max_hamming_score = hamming_score\n max_exact_accuracy = exact_accuracy\n max_fuzzy_accuracy = fuzzy_accuracy \n max_exact_accuracy_pos = exact_accuracy_pos\n max_fuzzy_accuracy_pos = fuzzy_accuracy_pos\n max_avg_rocauc = rocauc_score\n max_confusion_matrix = multilabel_confusion_matrix(aspect_vectors, predict_softmax)\n \n print(\"threshold set:\", max_threshold_set)\n print(\"Confusion Matrix for Each Aspect:\\n\" + \"=\"*60)\n print(max_confusion_matrix)\n print(\"Result of Metrics for Evaluation:\\n\" + \"=\"*60)\n print(\"Hamming score:\", max_hamming_score)\n print(\"Exact accuracy:\", max_exact_accuracy)\n print(\"Fuzzy accuracy:\", max_fuzzy_accuracy)\n print(\"Exact accuracy (exclude negative):\", max_exact_accuracy_pos )\n print(\"Fuzzy accuracy (exclude negative):\", max_fuzzy_accuracy_pos)\n print(\"Average F1 Score: \", max_avg_f1)\n print(\"ROC AUC Score: \", max_avg_rocauc)\n" ]
[ [ "numpy.zeros", "sklearn.metrics.roc_auc_score", "sklearn.metrics.accuracy_score", "sklearn.preprocessing.MultiLabelBinarizer", "sklearn.metrics.multilabel_confusion_matrix", "sklearn.metrics.hamming_loss", "numpy.mean" ] ]
mike0sv/catalyst
[ "54597a3b3d78e5b6c3084dfc3c28185600c79c90" ]
[ "catalyst/rl/scripts/run_samplers.py" ]
[ "#!/usr/bin/env python\n# isort:skip_file\n\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\n\nimport argparse # noqa E402\nimport atexit # noqa E402\nimport copy # noqa E402\nimport multiprocessing as mp # noqa E402\nimport time # noqa E402\n\nimport torch # noqa E402\ntorch.set_num_threads(1)\n\nfrom catalyst.rl.core import ( # noqa E402\n ExplorationHandler, Sampler, ValidSampler\n)\nfrom catalyst.rl.registry import ( # noqa E402\n DATABASES, ENVIRONMENTS, OFFPOLICY_ALGORITHMS, ONPOLICY_ALGORITHMS\n)\nfrom catalyst.rl.scripts.misc import ( # noqa E402\n OFFPOLICY_ALGORITHMS_NAMES, ONPOLICY_ALGORITHMS_NAMES\n)\nfrom catalyst.utils import ( # noqa E402\n boolean_flag, prepare_cudnn, set_global_seed\n)\nfrom catalyst.utils.config import parse_args_uargs # noqa E402\nfrom catalyst.utils.scripts import import_module # noqa E402\n\n\ndef build_args(parser):\n parser.add_argument(\n \"--config\",\n \"--configs\",\n \"-C\",\n nargs=\"+\",\n help=\"path to config/configs\",\n metavar=\"CONFIG_PATH\",\n dest=\"configs\",\n required=True\n )\n parser.add_argument(\"--expdir\", type=str, default=None)\n parser.add_argument(\"--logdir\", type=str, default=None)\n parser.add_argument(\"--resume\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=42)\n\n parser.add_argument(\"--train\", type=int, default=None)\n parser.add_argument(\"--valid\", type=int, default=None)\n parser.add_argument(\"--infer\", type=int, default=None)\n parser.add_argument(\"--vis\", type=int, default=None)\n\n boolean_flag(parser, \"check\", default=False)\n boolean_flag(parser, \"db\", default=True)\n\n parser.add_argument(\"--run-delay\", type=int, default=1)\n boolean_flag(parser, \"daemon\", default=True)\n parser.add_argument(\"--sampler-id\", type=int, default=0)\n\n boolean_flag(\n parser, \"deterministic\",\n default=None,\n help=\"Deterministic mode if running in CuDNN backend\"\n )\n boolean_flag(\n parser, \"benchmark\",\n default=None,\n help=\"Use CuDNN benchmark\"\n )\n\n return parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n build_args(parser)\n args, unknown_args = parser.parse_known_args()\n return args, unknown_args\n\n\ndef run_sampler(\n *,\n config,\n logdir,\n algorithm_fn,\n environment_fn,\n visualize,\n mode,\n seed=42,\n id=None,\n resume=None,\n db=True,\n exploration_power=1.0,\n sync_epoch=False\n):\n config_ = copy.deepcopy(config)\n id = 0 if id is None else id\n seed = seed + id\n set_global_seed(seed)\n\n db_server = DATABASES.get_from_params(\n **config.get(\"db\", {}), sync_epoch=sync_epoch\n ) if db else None\n\n env = environment_fn(\n **config_[\"environment\"],\n visualize=visualize,\n mode=mode,\n sampler_id=id,\n )\n agent = algorithm_fn.prepare_for_sampler(env_spec=env, config=config_)\n\n exploration_params = config_[\"sampler\"].pop(\"exploration_params\", None)\n exploration_handler = ExplorationHandler(env=env, *exploration_params) \\\n if exploration_params is not None \\\n else None\n if exploration_handler is not None:\n exploration_handler.set_power(exploration_power)\n\n seeds = dict(\n (k, config_[\"sampler\"].pop(f\"{k}_seeds\", None))\n for k in [\"train\", \"valid\", \"infer\"]\n )\n seeds = seeds[mode]\n\n if algorithm_fn in OFFPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"critic\" if env.discrete_actions else \"actor\"\n elif algorithm_fn in ONPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"actor\"\n else:\n # @TODO: add registry for algorithms, trainers, samplers\n raise NotImplementedError()\n\n if mode in [\"valid\"]:\n sampler_fn = ValidSampler\n else:\n sampler_fn = Sampler\n\n monitoring_params = config.get(\"monitoring_params\", None)\n\n sampler = sampler_fn(\n agent=agent,\n env=env,\n db_server=db_server,\n exploration_handler=exploration_handler,\n logdir=logdir,\n id=id,\n mode=mode,\n weights_sync_mode=weights_sync_mode,\n sampler_seed=seed,\n trajectory_seeds=seeds,\n monitoring_params=monitoring_params,\n **config_[\"sampler\"],\n )\n\n if resume is not None:\n sampler.load_checkpoint(filepath=resume)\n\n sampler.run()\n\n\ndef main(args, unknown_args):\n args, config = parse_args_uargs(args, unknown_args)\n set_global_seed(args.seed)\n prepare_cudnn(args.deterministic, args.benchmark)\n\n args.vis = args.vis or 0\n args.infer = args.infer or 0\n args.valid = args.valid or 0\n args.train = args.train or 0\n\n if args.expdir is not None:\n module = import_module(expdir=args.expdir) # noqa: F841\n\n environment_name = config[\"environment\"].pop(\"environment\")\n environment_fn = ENVIRONMENTS.get(environment_name)\n\n algorithm_name = config[\"algorithm\"].pop(\"algorithm\")\n\n if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = OFFPOLICY_ALGORITHMS\n sync_epoch = False\n elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = ONPOLICY_ALGORITHMS\n sync_epoch = True\n else:\n raise NotImplementedError()\n\n algorithm_fn = ALGORITHMS.get(algorithm_name)\n\n processes = []\n sampler_id = args.sampler_id\n\n def on_exit():\n for p in processes:\n p.terminate()\n\n atexit.register(on_exit)\n\n params = dict(\n seed=args.seed,\n logdir=args.logdir,\n algorithm_fn=algorithm_fn,\n environment_fn=environment_fn,\n config=config,\n resume=args.resume,\n db=args.db,\n sync_epoch=sync_epoch\n )\n\n if args.check:\n mode = \"train\"\n mode = \"valid\" if (args.valid is not None and args.valid > 0) else mode\n mode = \"infer\" if (args.infer is not None and args.infer > 0) else mode\n params_ = dict(\n visualize=(args.vis is not None and args.vis > 0),\n mode=mode,\n id=sampler_id\n )\n run_sampler(**params, **params_)\n return\n\n for i in range(args.vis):\n params_ = dict(\n visualize=True, mode=\"infer\", id=sampler_id, exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.infer):\n params_ = dict(\n visualize=False,\n mode=\"infer\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.valid):\n params_ = dict(\n visualize=False,\n mode=\"valid\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(1, args.train + 1):\n exploration_power = i / args.train\n params_ = dict(\n visualize=False,\n mode=\"train\",\n id=sampler_id,\n exploration_power=exploration_power\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n args, unknown_args = parse_args()\n main(args, unknown_args)\n" ]
[ [ "torch.set_num_threads" ] ]
todd-deshane/aihwkit
[ "07269e29731f9a6482d25326400437f6bef2fc94" ]
[ "src/aihwkit/nn/modules/linear_mapped.py" ]
[ "# -*- coding: utf-8 -*-\n\n# (C) Copyright 2020, 2021 IBM. All Rights Reserved.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Analog mapped layers.\"\"\"\n\nfrom typing import Optional, Tuple, List\n\nfrom torch import Tensor, cat, split, no_grad\nfrom torch.nn import Linear\n\nfrom aihwkit.nn.functions import AnalogFunction\nfrom aihwkit.nn.modules.base import AnalogModuleBase, RPUConfigAlias\nfrom aihwkit.simulator.configs import SingleRPUConfig\nfrom aihwkit.exceptions import ModuleError\n\n\nclass AnalogLinearMapped(AnalogModuleBase, Linear):\n \"\"\"Linear layer that uses an analog tile.\n\n Linear layer that uses an analog tile during its forward, backward\n and update passes. In contrast to\n :class:`~aihwkit.bb.modules.linear.Linear` the maximal in and/or\n out dimension can be restricted, in which case the linear layer is\n split into multiple parts and computed on multiple tiles of given\n max sizes.\n\n In contrast to :class:`~aihwkit.bb.modules.linear.Linear`, the\n bias vector (if requested) is always handled in digital (floating\n point).\n\n Note:\n Mapping is controlled by the :class:`aihwkit.simulator.configs.utils.MappingParameter`.\n\n Note:\n The tensor parameters of this layer (``.weight`` and ``.bias``) are not\n guaranteed to contain the same values as the internal weights and biases\n stored in the analog tile. Please use ``set_weights`` and\n ``get_weights`` when attempting to read or modify the weight/bias. This\n read/write process can simulate the (noisy and inexact) analog writing\n and reading of the resistive elements.\n\n Args:\n in_features: input vector size (number of columns).\n out_features: output vector size (number of rows).\n rpu_config: resistive processing unit configuration.\n bias: whether to use a bias row on the analog tile or not\n realistic_read_write: whether to enable realistic read/write\n for setting initial weights and read out of weights\n weight_scaling_omega: the weight value where the max\n weight will be scaled to. If zero, no weight scaling will\n be performed\n \"\"\"\n # pylint: disable=abstract-method, too-many-locals, too-many-instance-attributes\n\n __constants__ = ['in_features', 'out_features', 'realistic_read_write', 'weight_scaling_omega',\n 'digital_bias', 'analog_bias', 'use_bias']\n in_features: int\n out_features: int\n realistic_read_write: bool\n weight_scaling_omega: float\n digital_bias: bool\n analog_bias: bool\n use_bias: bool\n in_sizes: List[int]\n out_sizes: List[int]\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n rpu_config: Optional[RPUConfigAlias] = None,\n realistic_read_write: bool = False,\n weight_scaling_omega: float = 0.0,\n ):\n\n # Call super() after tile creation, including ``reset_parameters``.\n Linear.__init__(self, in_features, out_features, bias=bias)\n\n # Create tiles\n if rpu_config is None:\n rpu_config = SingleRPUConfig()\n\n AnalogModuleBase.__init__(\n self,\n in_features,\n out_features,\n bias,\n realistic_read_write,\n weight_scaling_omega,\n rpu_config.mapping\n )\n if self.analog_bias:\n raise ModuleError(\"AnalogLinearMapped only supports digital bias.\")\n\n # More than one tile may need to be created. If so, divide\n # weight matrix into equal pieces along input dimension with\n # as many tiles as needed\n max_input_size = rpu_config.mapping.max_input_size\n max_output_size = rpu_config.mapping.max_output_size\n\n self.in_sizes = self.get_split_sizes(in_features, max_input_size)\n self.out_sizes = self.get_split_sizes(out_features, max_output_size)\n\n self.analog_tile_array = []\n for i, in_tile_size in enumerate(self.in_sizes):\n in_tiles = []\n for j, out_tile_size in enumerate(self.out_sizes):\n tile = rpu_config.tile_class(out_tile_size,\n in_tile_size,\n rpu_config,\n bias=self.analog_bias)\n self.register_analog_tile(tile, name=f\"{i}_{j}\")\n in_tiles.append(tile)\n self.analog_tile_array.append(in_tiles)\n\n # Set weights from the reset_parameters\n self.set_weights(self.weight, self.bias)\n\n # Unregister weight/bias as a parameter but keep for sync\n self.unregister_parameter('weight')\n\n if self.analog_bias:\n self.unregister_parameter('bias')\n\n def get_split_sizes(self, size: int, split_max_size: int) -> List[int]:\n \"\"\" Computed the split sizes.\n\n Args:\n size: number of elements of the layer in one dimension\n split_max_size: max size of the split\n\n Returns:\n List of split sizes\n \"\"\"\n if split_max_size <= 0:\n return [size]\n\n n_splits = (size + split_max_size - 1) // split_max_size\n base, extra = divmod(size, n_splits)\n return [base + (i < extra) for i in range(n_splits)]\n\n def set_weights(\n self,\n weight: Tensor,\n bias: Optional[Tensor] = None,\n force_exact: bool = False\n ) -> None:\n \"\"\"Set the weight (and bias) with given Tensors.\n\n This uses an realistic write if the property ``realistic_read_write``\n of the layer is set, unless it is overwritten by ``force_exact``. It\n uses a scaled write if ``weight_scaling_omega`` is positive (see\n :meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`).\n\n Note:\n This is the recommended way for setting the weight/bias matrix of\n the analog tile, as it will correctly store the weights into the\n internal memory. Directly writing to ``self.weight`` and\n ``self.bias`` might yield wrong results as they are not always in\n sync with the analog tile Parameters, for performance reasons.\n\n Args:\n weight: weight matrix\n bias: bias vector\n force_exact: forces an exact write to the analog tiles\n\n \"\"\"\n shape = [self.out_features, self.in_features]\n weight = weight.clone().reshape(shape)\n\n realistic = self.realistic_read_write and not force_exact\n\n in_start = in_end = 0\n for in_size, in_tiles in zip(self.in_sizes, self.analog_tile_array):\n in_end += in_size\n out_start = out_end = 0\n for out_size, analog_tile in zip(self.out_sizes, in_tiles):\n out_end += out_size\n\n tile_weight = weight[out_start:out_end, in_start:in_end]\n\n if self.weight_scaling_omega > 0.0:\n analog_tile.set_weights_scaled(tile_weight, None,\n realistic=realistic,\n omega=self.weight_scaling_omega)\n else:\n analog_tile.set_weights(tile_weight, None, realistic=realistic)\n\n out_start = out_end\n in_start = in_end\n\n if self.digital_bias and bias is not None:\n with no_grad():\n self.bias.data[:] = bias[:]\n\n self._sync_weights_from_tile()\n\n def get_weights(self, force_exact: bool = False) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Get the weight (and bias) tensors.\n\n This uses an realistic read if the property ``realistic_read_write`` of\n the layer is set, unless it is overwritten by ``force_exact``. It\n scales the analog weights by the digital alpha scale if\n ``weight_scaling_omega`` is positive (see\n :meth:`~aihwkit.simulator.tiles.base.BaseTile.get_weights_scaled`).\n\n Note:\n This is the recommended way for setting the weight/bias matrix from\n the analog tile, as it will correctly fetch the weights from the\n internal memory. Accessing ``self.weight`` and ``self.bias`` might\n yield wrong results as they are not always in sync with the\n analog tile library, for performance reasons.\n\n Args:\n force_exact: forces an exact read to the analog tiles\n\n Returns:\n tuple: weight matrix, bias vector\n\n \"\"\"\n\n realistic = self.realistic_read_write and not force_exact\n\n weight_lst = []\n for in_tiles in self.analog_tile_array:\n in_tile_weight = []\n for analog_tile in in_tiles:\n if self.weight_scaling_omega > 0.0:\n tile_weight, _ = analog_tile.get_weights_scaled(realistic=realistic)\n else:\n tile_weight, _ = analog_tile.get_weights(realistic=realistic)\n in_tile_weight.append(tile_weight)\n weight_lst.append(cat(in_tile_weight, 0))\n\n weight = cat(weight_lst, 1)\n\n if self.digital_bias:\n with no_grad():\n return weight, self.bias.data.detach().cpu()\n return weight, None\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters (weight and bias).\"\"\"\n super().reset_parameters()\n if self.analog_tile_count():\n self.set_weights(self.weight, self.bias)\n\n def forward(self, x_input: Tensor) -> Tensor:\n \"\"\"Compute the forward pass.\"\"\"\n # pylint: disable=arguments-differ,arguments-renamed\n\n if self.analog_tile_count() == 1:\n out = AnalogFunction.apply(\n self.analog_tile_array[0][0].get_analog_ctx(), x_input,\n self.analog_tile_array[0][0].shared_weights, not self.training)\n\n if self.digital_bias:\n return out + self.bias\n return out\n\n # mapped version\n last_dim = x_input.ndim - 1\n splits = split(x_input, self.in_sizes, dim=last_dim)\n result = None # type: Tensor\n for idx, (x, in_tiles) in enumerate(zip(splits, self.analog_tile_array)):\n out_result = []\n\n for analog_tile in in_tiles:\n output = AnalogFunction.apply(\n analog_tile.get_analog_ctx(), x,\n analog_tile.shared_weights, not self.training)\n out_result.append(output)\n\n if idx == 0:\n result = cat(out_result, last_dim)\n else:\n result.add_(cat(out_result, last_dim))\n\n # add bias to final result\n if self.digital_bias:\n return result.add_(self.bias)\n return result\n\n def extra_repr(self) -> str:\n \"\"\"Set the extra representation of the module.\n\n Returns:\n A string with the extra representation.\n \"\"\"\n output = AnalogModuleBase.extra_repr(self)\n output += ', mapping={}'.format((len(self.in_sizes), len(self.out_sizes)))\n\n return output\n\n @classmethod\n def from_digital(\n cls,\n module: Linear,\n rpu_config: Optional[RPUConfigAlias] = None,\n realistic_read_write: bool = False,\n weight_scaling_omega: float = 0.0,\n ) -> 'AnalogLinearMapped':\n \"\"\"Return an AnalogLinearMapped layer from a torch Linear layer.\n\n Args:\n module: The torch module to convert. All layers that are\n defined in the ``conversion_map``.\n rpu_config: RPU config to apply to all converted tiles.\n Applied to all converted tiles.\n realistic_read_write: Whether to use closed-loop programming\n when setting the weights. Applied to all converted tiles.\n weight_scaling_omega: If non-zero, the analog weights will be\n scaled by ``weight_scaling_omega`` divided by the absolute\n maximum value of the original weight matrix.\n\n Note:\n Make sure that the weight max and min settings of the\n device support the desired analog weight range.\n\n Returns:\n an AnalogLinearMapped layer based on the digital Linear ``module``.\n \"\"\"\n analog_module = cls(module.in_features,\n module.out_features,\n module.bias is not None,\n rpu_config,\n realistic_read_write,\n weight_scaling_omega,\n )\n\n analog_module.set_weights(module.weight, module.bias)\n return analog_module\n" ]
[ [ "torch.nn.Linear.__init__", "torch.no_grad", "torch.cat", "torch.split" ] ]
DanielJMaher/compliance-checker
[ "944220a4a7bd0e945d7b4e468ffb524af5eca5b2" ]
[ "compliance_checker/tests/test_suite.py" ]
[ "from pkg_resources import resource_filename\nfrom compliance_checker.suite import CheckSuite\nfrom compliance_checker.base import Result, BaseCheck\nimport numpy as np\nimport unittest\nimport os\n\nstatic_files = {\n '2dim' : resource_filename('compliance_checker', 'tests/data/2dim-grid.nc'),\n 'bad_region' : resource_filename('compliance_checker', 'tests/data/bad_region.nc'),\n 'bad_data_type' : resource_filename('compliance_checker', 'tests/data/bad_data_type.nc'),\n 'test_cdl' : resource_filename('compliance_checker', 'tests/data/test_cdl.cdl'),\n 'test_cdl_nc' : resource_filename('compliance_checker', 'tests/data/test_cdl_nc_file.nc'),\n}\n\n\nclass TestSuite(unittest.TestCase):\n # @see\n # http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/\n\n def shortDescription(self):\n return None\n\n # override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests\n # ion.module:TestClassName.test_function_name\n def __repr__(self):\n name = self.id()\n name = name.split('.')\n if name[0] not in [\"ion\", \"pyon\"]:\n return \"%s (%s)\" % (name[-1], '.'.join(name[:-1]))\n else:\n return \"%s ( %s )\" % (name[-1], '.'.join(name[:-2]) + \":\" + '.'.join(name[-2:]))\n __str__ = __repr__\n\n def test_suite(self):\n # BWA: what's the purpose of this test? Just to see if the suite\n # runs without errors?\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['2dim'])\n cs.run(ds, 'acdd')\n\n def test_unicode_formatting(self):\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['bad_region'])\n score_groups = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in score_groups.items():\n groups, errors = rpair\n score_list, points, out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, points, out_of)\n\n def test_skip_checks(self):\n \"\"\"Tests that checks are properly skipped when specified\"\"\"\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['2dim'])\n # exclude title from the check attributes\n score_groups = cs.run(ds, ['check_high'], 'acdd')\n assert all(sg.name not in {'Conventions', 'title', 'keywords',\n 'summary'} for sg in score_groups['acdd'][0])\n\n def test_group_func(self):\n # This is checking for issue #183, where group_func results in\n # IndexError: list index out of range\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['bad_data_type'])\n score_groups = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in score_groups.items():\n groups, errors = rpair\n score_list, points, out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, points, out_of)\n\n def test_score_grouping(self):\n # Testing the grouping of results for output, which can fail\n # if some assumptions are not met, e.g. if a Result object has\n # a value attribute of unexpected type\n cs = CheckSuite()\n res = [\n Result(BaseCheck.MEDIUM, True, 'one'),\n Result(BaseCheck.MEDIUM, (1, 3), 'one'),\n Result(BaseCheck.MEDIUM, None, 'one'),\n Result(BaseCheck.MEDIUM, True, 'two'),\n Result(BaseCheck.MEDIUM, np.isnan(1), 'two') # value is type numpy.bool_\n ]\n score = cs.scores(res)\n self.assertEqual(score[0].name, 'one')\n self.assertEqual(score[0].value, (2, 4))\n self.assertEqual(score[1].name, 'two')\n self.assertEqual(score[1].value, (1, 2))\n\n def test_cdl_file(self):\n # Testing whether you can run compliance checker on a .cdl file\n cs = CheckSuite()\n cs.load_all_available_checkers()\n\n # Load the cdl file\n ds = cs.load_dataset(static_files['test_cdl'])\n vals = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in vals.items():\n groups, errors = rpair\n score_list, cdl_points, cdl_out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, cdl_points, cdl_out_of)\n ds.close()\n\n # Ok now load the nc file that it came from\n ds = cs.load_dataset(static_files['test_cdl_nc'])\n vals = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in vals.items():\n groups, errors = rpair\n score_list, nc_points, nc_out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, nc_points, nc_out_of)\n ds.close()\n\n nc_file_path = static_files['test_cdl'].replace('.cdl', '.nc')\n self.addCleanup(os.remove, nc_file_path)\n\n # Ok the scores should be equal!\n self.assertEqual(nc_points, cdl_points)\n self.assertEqual(nc_out_of, cdl_out_of)\n" ]
[ [ "numpy.isnan" ] ]
gyungchan2110/ImageUtils
[ "618d032122d6eadeec4afc9fc6c6906fa71f0ff6" ]
[ "LungBoundaryCrop.py" ]
[ "# In[]\nimport cv2 \nimport numpy as np \nimport os \nfrom operator import eq\nimport random\nimport matplotlib.pyplot as plt \nfrom skimage import io\nimport shutil\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimgBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original\"\nsrcbase = \"D:/[Data]/[Lung_Segmentation]/WholeDataSetMask\"\n\n#classMaskBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180125_103950_Expand_40pixel/Masks/Mask_Rt Upper CB\"\n#lungMaskBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180324_LungMaskData/Imgs\"\nmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask\"\ncropmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask_Cropped\"\nmaskcropmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask_Cropped_Mask\"\ndstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_Img2Mask_3Channel\"\n\n\n# Img_20180130_175720\n\n# Img_20180130_162001\n# Img_20180130_163512\n# Img_20180130_164744\n\n\nlowerFolders = [\"Normal\", \"Abnormal\"]\n#lowerFolders = [\"1_AS\", \"2_AR\", \"3_MS\", \"4_MR\", \"5_AS+AR\", \"6_MS_MR\"]\nsrcPaths = []\nimgPaths = []\nmaskdstPaths = []\ncropImgsdstPaths = []\nmaskcropImgsdstPaths = []\ndstPath = []\n\n\n\nfor folder in folders: \n\n if(not os.path.isdir(maskdstBase + \"/\" + folder)):\n os.mkdir(maskdstBase + \"/\" + folder)\n\n if(not os.path.isdir(cropmaskdstBase + \"/\" + folder)):\n os.mkdir(cropmaskdstBase + \"/\" + folder)\n\n if(not os.path.isdir(maskcropmaskdstBase + \"/\" + folder)):\n os.mkdir(maskcropmaskdstBase + \"/\" + folder)\n if(not os.path.isdir(dstBase + \"/\" + folder)):\n os.mkdir(dstBase + \"/\" + folder)\n\n for lowerFolder in lowerFolders:\n if(not os.path.isdir(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(dstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(dstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n\n maskdstPaths.append(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n cropImgsdstPaths.append(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n maskcropImgsdstPaths.append(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n dstPath.append(dstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n srcPaths.append(srcbase + \"/\" + lowerFolder)\n imgPaths.append(imgBase + \"/\" + folder + \"/\" + lowerFolder)\n\n\ndef run_Modyfying():\n \n for i, imgPath in enumerate(imgPaths) : \n for file in os.listdir(imgPath):\n LungBoundaryCrop(imgPath,srcPaths[i], maskdstPaths[i],cropImgsdstPaths[i],maskcropImgsdstPaths[i], file)\n break\n break\n\ndef LungBoundaryEnhancement(imgPath, maskPath, dstPath, filename):\n\n Img = cv2.imread(imgPath + \"/\" + filename, 0)\n Mask = cv2.imread(maskPath + \"/\" + filename, 0)\n Img = cv2.resize(Img, (1024,1024))\n Img = np.asarray(Img)\n Mask = np.asarray(Mask)\n\n Image = np.stack((Img, Img, Mask), -1)\n\n cv2.imwrite(dstPath + \"/\" + filename, Image)\n\n\ndef LungBoundaryCrop(imgPath, srcPath, maskdstPath,cropmaskdstPath, maskcropmaskdstPath, filename): \n \n \n #shutil.copyfile(srcPath + \"/\" + filename, maskdstPath + \"/\" + filename)\n \n maskImg = cv2.imread(maskdstPath + \"/\" + filename, 0)\n maskImg = np.asarray(maskImg, dtype = np.uint8)\n\n _, maskImg = cv2.threshold(maskImg, 127, 255, cv2.THRESH_BINARY)\n _, contours, _ = cv2.findContours(maskImg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n rects = []\n for cnt in contours:\n rects.append(cv2.boundingRect(cnt))\n\n tcomx = 10\n tcomy = 10 \n bcomx = 10 \n bcomy = 10\n\n top_x, top_y, bottom_x, bottom_y = 0, 0 ,0, 0\n\n rects.sort()\n\n top_x = min([x for (x, y, w, h) in rects]) - tcomx #26\n top_y = min([y for (x, y, w, h) in rects]) - tcomy #26\n bottom_x = max([x+w for (x, y, w, h) in rects]) + bcomx #234\n bottom_y = max([y+h for (x, y, w, h) in rects]) + bcomy #227\n \n #print(top_x, top_y, bottom_x, bottom_y)\n\n if(top_x <=0 ) : top_x = tcomx\n if(top_y <=0 ) : top_y = tcomy\n \n if(bottom_x >= 1024 ) : bottom_x = 1024 - tcomx\n if(bottom_y >= 1024 ) : bottom_y = 1024 - tcomy\n\n print((top_x + bottom_x)/2, (top_y + bottom_y)/2)\n center_shift_x = 512 - (int)((top_x + bottom_x)/2)\n center_shift_y = 512 - (int)((top_y + bottom_y)/2)\n\n\n # maskCrop = maskImg[top_y:bottom_y, top_x:bottom_x]\n # maskCrop = cv2.resize(maskCrop, (1024,1024))\n # cv2.imwrite(maskcropmaskdstPath + \"/\" + filename, maskCrop)\n\n Img = cv2.imread(imgPath + \"/\" + filename)\n Img = np.asarray(Img)\n Img = cv2.resize(Img, (1024,1024))\n # ImgCrop = Img[top_y*2:bottom_y*2, top_x*2:bottom_x*2, :]\n # ImgCrop = cv2.resize(ImgCrop, (1024,1024))\n # cv2.imwrite(cropmaskdstPath + \"/\" + filename, ImgCrop)\n # print(imgPath + \"/\" + filename)\n Img_Shifted = np.zeros(Img.shape)\n #Img_Shifted = Img_Shifted * 255\n Img_Shifted[:1024+center_shift_y, center_shift_x:] = Img[-center_shift_y:, :1024-center_shift_x]\n cv2.imwrite(\"D:/Temp/Shifted.png\", Img_Shifted)\n cv2.imwrite(\"D:/Temp/Original.png\", Img)\nrun_Modyfying()" ]
[ [ "numpy.stack", "numpy.asarray", "numpy.zeros" ] ]
jerbaroo/bridge-sim
[ "c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af" ]
[ "bridge_sim/internal/plot/geometry/node.py" ]
[ "from typing import List\n\nimport numpy as np\n\nfrom bridge_sim.internal.plot import plt\nfrom bridge_sim.internal.plot.geometry.angles import ax_3d\nfrom bridge_sim.sim.model import Node\n\n\ndef node_scatter_3d(nodes: List[Node], new_fig: bool = True):\n # Split into separate arrays of x, y and z position, and colors.\n xs = np.array([n.x for n in nodes])\n ys = np.array([n.y for n in nodes])\n zs = np.array([n.z for n in nodes])\n\n # Setup a new 3D landscape figure.\n if new_fig:\n fig, ax = ax_3d(xs=xs, ys=zs, zs=ys)\n else:\n ax = plt.gca()\n\n ax.scatter(xs, zs, ys, marker=\"o\", s=1)\n" ]
[ [ "numpy.array" ] ]
dravog7/nboost
[ "e0c086db2eaa8601c20244c81d8f5483b7491902" ]
[ "tests/unit/test_onnx_bert_rerank.py" ]
[ "from nboost.plugins.models import resolve_model\nfrom nboost import defaults\nimport unittest\nimport numpy as np\n\n\nclass TestPtBertRerankModelPlugin(unittest.TestCase):\n def setUp(self):\n self.model = resolve_model(\n model_dir='onnx-bert-base-msmarco',\n data_dir=defaults.data_dir,\n model_cls=''\n )\n self.pt_model = resolve_model(\n model_dir='pt-bert-base-uncased-msmarco',\n data_dir=defaults.data_dir,\n model_cls=''\n )\n\n def test_rank(self):\n QUERY = 'O wherefore art thou'\n ranks, scores = self.model.rank(QUERY, CHOICES)\n self.assertEqual(self.model.__class__.__name__, 'ONNXBertRerankModelPlugin')\n self.assertIsInstance(ranks, list)\n self.assertEqual(6, len(ranks))\n pt_ranks, pt_scores = self.pt_model.rank(QUERY, CHOICES)\n assert np.allclose(pt_scores, scores, rtol=1e-04, atol=1e-05)\n\n def tearDown(self) -> None:\n self.model.close()\n\n\nCHOICES = [\n 'From fairest creatures we desire increase' * 4,\n 'That thereby beautys rose might never die' * 4,\n 'But as the riper should by time decease' * 4,\n 'His tender heir might bear his memory:' * 4,\n 'But thou contracted to thine own bright eyes' * 4,\n 'Feedst thy lights flame with self-substantial fuel' * 4,\n]" ]
[ [ "numpy.allclose" ] ]
mpewsey/civpy
[ "bbf74b1c04ca9f7604831f5280cc80d796240e67" ]
[ "civpy/survey/alignment.py" ]
[ "\"\"\"\nCopyright (c) 2019, Matt Pewsey\n\"\"\"\n\nimport attr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .spatial_hash import SpatialHash\n\n__all__ = ['Alignment']\n\n\[email protected](hash=False)\nclass Alignment(object):\n \"\"\"\n A class representing a survey alignment.\n\n Parameters\n ----------\n name : str\n Name of alignment.\n pis : list\n A list of :class:`.PI`.\n stakes : list\n A list of :class:`.SurveyStake`.\n grid : float\n The grid size used for spatial hash generation.\n view_offset : float\n The offset beyond which points will be ignored when generating station\n coordinates from global coordinates.\n view_margin : float\n The station margin at the beginning and end of the alignment. Beyond\n this threshold, generated station coordinates from global coordinates\n will be ignored.\n\n Examples\n --------\n .. plot:: ../examples/survey/alignment_ex1.py\n :include-source:\n \"\"\"\n # Global class variables\n BISC_TOL = 1e-4 # Bisector station tolerance\n\n # Properties\n name = attr.ib()\n pis = attr.ib(default=[])\n stakes = attr.ib(default=[])\n grid = attr.ib(default=10)\n view_offset = attr.ib(default=15)\n view_margin = attr.ib(default=15)\n\n def set_stake_xy(self):\n \"\"\"\n Sets the xy coordinates for all station stakes assigned to the\n alignment.\n \"\"\"\n obj = []\n p = []\n\n for x in self.stakes:\n if x._type == 'station':\n obj.append(x)\n p.append((x.station, x.offset, x.rotation))\n\n p = np.array(p)\n c, s = np.cos(p[:,2]), np.sin(p[:,2])\n c, s = np.column_stack([c, -s]), np.column_stack([s, c])\n\n b = self.coordinates(p[:,0])\n p = self.coordinates(p[:,:2])\n p -= b\n\n c = np.einsum('ij,ij->i', p, c)\n s = np.einsum('ij,ij->i', p, s)\n p = np.column_stack([c, s])\n p += b\n\n for a, b in zip(obj, p):\n a[:2] = b\n\n def pi_coordinates(self):\n \"\"\"\n Returns an array of PI coordinates of shape (N, 3).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n return np.array(self.pis, dtype='float')\n\n def pi_radii(self):\n \"\"\"\n Returns an array of PI horizontal curve radii of shape (N,).\n \"\"\"\n return np.array([x.radius for x in self.pis], dtype='float')\n\n def azimuths(self):\n \"\"\"\n Returns an array of alignment azimuths in the shape (N,). Each element\n of the array corresponds to a PI index and represents the azimuth of\n the alignment ahead of that PI.\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n elif len(self.pis) == 1:\n return np.zeros(1, dtype='float')\n\n x = self.pi_coordinates()\n dx = x[1:,:2] - x[:-1,:2]\n az = np.arctan2(dx[:,0], dx[:,1])\n az = np.append(az, az[-1])\n\n return np.asarray(az, dtype='float')\n\n def deflection_angles(self):\n \"\"\"\n Returns an array of PI deflection angles in the shape (N,). The angle\n is negative for turns to the left and positive for turns to the right.\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n elif len(self.pis) == 1:\n return np.zeros(1, dtype='float')\n\n az = self.azimuths()\n da = az[1:] - az[:-1]\n i = (np.abs(da) > np.pi)\n da[i] -= 2 * np.pi * np.sign(da[i])\n da = np.insert(da, 0, 0)\n\n return np.asarray(da, dtype='float')\n\n def tangent_ordinates(self):\n \"\"\"\n Returns an array of tangent ordinates corresponding to each PI\n in the shape (N,). This value is the horizontal distance between\n the PI and PC and PI and PT.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(np.tan(da/2))\n\n def curve_lengths(self):\n \"\"\"\n Returns an array of horizontal curve lengths corresponding to each PI\n in teh shape (N,). This value is the station distance between the\n PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(da)\n\n def middle_ordinates(self):\n \"\"\"\n Returns an array of middle ordinate distances corresponding to each PI\n in the shape (N,). This value is the horizontal distance between the\n MPC and midpoint of the chord line between the PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = np.abs(self.deflection_angles())\n return r * (1 - np.cos(da/2))\n\n def external_ordinates(self):\n \"\"\"\n Returns an array of external ordinates corresponding to each PI\n in the shape (N,). This is the horizontal distance between the\n MPC and PI.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(np.tan(da/2) * np.tan(da/4))\n\n def chord_distances(self):\n \"\"\"\n Returns an array of chord distances corresponding to each PI\n in teh shape (N,). This is the straight line horizontal distance\n between the PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = np.abs(self.deflection_angles())\n return 2 * r * np.sin(da/2)\n\n def pt_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Tangents (PT)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n t = self.tangent_ordinates()\n t = np.expand_dims(t, 1)\n uv = np.column_stack([np.sin(az), np.cos(az)])\n pt = pi[:,:2] + t * uv\n\n return np.asarray(pt, dtype='float')\n\n def pc_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Curves (PC)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n t = self.tangent_ordinates()\n t = np.expand_dims(t, 1)\n az -= da\n uv = np.column_stack([np.sin(az), np.cos(az)])\n pc = pi[:,:2] - t * uv\n\n return np.asarray(pc, dtype='float')\n\n def mpc_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Midpoint of Curves (MPC)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n e = self.external_ordinates()\n az += (np.pi - da) / 2\n da = np.expand_dims(da, 1)\n e = np.expand_dims(e, 1)\n uv = np.column_stack([np.sin(az), np.cos(az)])\n mpc = pi[:,:2] + np.sign(da) * e * uv\n\n return np.asarray(mpc, dtype='float')\n\n def rp_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Radius Points (RP)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n e = self.external_ordinates()\n e = np.expand_dims(e, 1)\n r = self.pi_radii()\n r = np.expand_dims(r, 1)\n az += (np.pi - da) / 2\n uv = np.column_stack([np.sin(az), np.cos(az)])\n da = np.expand_dims(da, 1)\n rp = pi[:,:2] + np.sign(da) * (e + r) * uv\n\n return np.asarray(rp, dtype='float')\n\n def pt_stations(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Tangents (PT)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n x = self.pi_coordinates()\n tan = self.tangent_ordinates()\n dist = np.linalg.norm(x[:-1,:2] - x[1:,:2], axis=1)\n dist = np.insert(dist, 0, 0)\n dist += self.curve_lengths() - tan\n sta = np.cumsum(dist)\n sta[1:] -= np.cumsum(tan[:-1])\n\n return np.asarray(sta, dtype='float')\n\n def pc_stations(self):\n \"\"\"\n Returns an array of stations for the Point of Curves (PC) in the\n shape (N,).\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n sta = self.pt_stations() - self.curve_lengths()\n return np.asarray(sta, dtype='float')\n\n def mpc_stations(self):\n \"\"\"\n Returns an array of stations for the Midpoint of Curves (MPC)\n in the shape (N,).\n \"\"\"\n return 0.5 * (self.pt_stations() + self.pc_stations())\n\n def poc_transforms(self):\n \"\"\"\n Returns the POC transforms in the shape (N, 2, 2). These transforms\n project (x, y) global coordinates to (offset, station) station\n coordinates relative to the PI angle bisector.\n \"\"\"\n az = self.azimuths()\n da = self.deflection_angles()\n l = az - da / 2\n t = l + np.pi / 2\n t = np.column_stack([np.sin(t), np.cos(t), np.sin(l), np.cos(l)])\n\n return t.reshape(t.shape[0], 2, 2)\n\n def pot_transforms(self):\n \"\"\"\n Returns the POT transforms in the shape (N, 2, 2). These transforms\n project (x, y) global coordinates to (offset, station) station\n coordinates relative to the tangent line between PI's.\n \"\"\"\n l = self.azimuths()\n t = l + np.pi / 2\n t = np.column_stack([np.sin(t), np.cos(t), np.sin(l), np.cos(l)])\n return t.reshape(t.shape[0], 2, 2)\n\n def segment_indices(self, stations):\n \"\"\"\n Determines the segment type and PI indices corresponding to the\n specified stations. Returns an array of shape (N, 2). The first column\n of the array contains 1 if the station is located along an alignment\n tangent or 2 if the station is located on a horizontal curve or\n alignment bisector. The second column contains the index corresponding\n to the PI where the point is located.\n\n Parameters\n ----------\n stations : array\n An array of stations of shape (N,).\n \"\"\"\n sta = np.asarray(stations)\n pc_sta = self.pc_stations()\n pt_sta = self.pt_stations()\n s = SpatialHash(np.expand_dims(sta, 1), self.grid)\n\n # Set values beyond alignment limits\n r = np.zeros((sta.shape[0], 2), dtype='int')\n r[sta < 0] = 1, 0\n r[sta > pt_sta[-1]] = 1, pt_sta.shape[0]-1\n\n # POT segments\n ah = np.expand_dims(pc_sta[1:], 1)\n bk = np.expand_dims(pt_sta[:-1], 1)\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = s.query_range(b, a, 0)\n r[f] = 1, i\n\n # POC segments\n f = (self.curve_lengths() == 0)\n pc_sta[f] -= Alignment.BISC_TOL\n pt_sta[f] += Alignment.BISC_TOL\n\n ah = np.expand_dims(pt_sta[1:-1], 1)\n bk = np.expand_dims(pc_sta[1:-1], 1)\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = s.query_range(b, a, 0)\n r[f] = 2, i+1\n\n return r\n\n def _pot_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POT coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n f = (seg[:,0] == 1)\n\n if not f.any():\n return\n\n sta = np.expand_dims(sta_coords[f,0], 1)\n off = np.expand_dims(sta_coords[f,1], 1)\n\n i = seg[f,1]\n t = self.pot_transforms()[i]\n tx, ty = t[:,0], t[:,1]\n pt_coord = self.pt_coordinates()[i]\n pt_sta = np.expand_dims(self.pt_stations()[i], 1)\n\n result[f] = tx * off + ty * (sta - pt_sta) + pt_coord\n\n def _poc_bisc_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POC bisector coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n f = (seg[:,0] == 2) & (self.curve_lengths() == 0)[seg[:,1]]\n\n if not f.any():\n return\n\n off = np.expand_dims(sta_coords[f,1], 1)\n\n i = seg[f,1]\n tx = self.poc_transforms()[i,0]\n rp_coord = self.rp_coordinates()[i]\n\n result[f] = tx * off + rp_coord\n\n def _poc_curve_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POC curve coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n l = self.curve_lengths()\n f = (seg[:,0] == 2) & (l != 0)[seg[:,1]]\n\n if not f.any():\n return\n\n sta = sta_coords[f,0]\n off = sta_coords[f,1]\n\n i = seg[f,1]\n tx = self.poc_transforms()[i,0]\n mpc_sta = self.mpc_stations()[i]\n rp_coord = self.rp_coordinates()[i]\n da = self.deflection_angles()[i]\n r = np.expand_dims(self.pi_radii()[i], 1)\n\n beta = da * (mpc_sta - sta) / l[i]\n c, s = np.cos(beta), np.sin(beta)\n c, s = np.column_stack([c, -s]), np.column_stack([s, c])\n\n c = np.einsum('ij,ij->i', tx, c)\n s = np.einsum('ij,ij->i', tx, s)\n\n tx = np.column_stack([c, s])\n da = np.sign(np.expand_dims(da, 1))\n off = np.expand_dims(off, 1)\n\n result[f] = tx * (off - da * r) + rp_coord\n\n def coordinates(self, sta_coords):\n \"\"\"\n Returns the (x, y) or (x, y, z) global coordinates corresponding\n to the input station coordinates. Result is in the shape of (N, 2)\n of (N, 3).\n\n Parameters\n ----------\n sta_coords : array\n An array of (station), (station, offset), or (station, offset, z)\n coordinates of the shape (N,), (N, 2) or (N, 3).\n \"\"\"\n sta_coords = np.asarray(sta_coords)\n\n # If shape is (N,), add zero offsets\n if len(sta_coords.shape) == 1:\n sta_coords = np.column_stack([sta_coords, np.zeros(sta_coords.shape[0])])\n\n result = np.zeros((sta_coords.shape[0], 2), dtype='float')\n seg = self.segment_indices(sta_coords[:,0])\n\n self._pot_coordinates(result, seg, sta_coords)\n self._poc_bisc_coordinates(result, seg, sta_coords)\n self._poc_curve_coordinates(result, seg, sta_coords)\n\n # Add z coordinate to result if available\n if sta_coords.shape[1] == 3:\n result = np.column_stack([result, sta_coords[:,2]])\n\n return np.asarray(result, dtype='float')\n\n def _pot_station_coordinates(self, result, spatial_hash, coords):\n \"\"\"\n Adds the POT station coordinates within the view.\n\n Parameters\n ----------\n result : dict\n The dictionary to which the results will be added.\n spatial_hash : array\n The spatial hash.\n coords : array\n An array of coordinates of shape (N, 2) or (N, 3).\n \"\"\"\n t = self.pot_transforms()\n pt_sta = self.pt_stations()\n pt_coord = self.pt_coordinates()\n\n bk = self.pt_coordinates()[:-1]\n ah = self.pc_coordinates()[1:]\n\n if t.shape[0] > 0:\n bk[0] -= self.view_margin * t[0, 1]\n ah[-1] += self.view_margin * t[-1, 1]\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = spatial_hash.query_range(b, a, self.view_offset)\n\n if f.shape[0] == 0:\n continue\n\n delta = coords[f,:2] - pt_coord[i]\n sta = np.dot(delta, t[i,1]) + pt_sta[i]\n off = np.dot(delta, t[i,0])\n\n if coords.shape[1] == 3:\n p = np.column_stack([sta, off, coords[f,2]])\n else:\n p = np.column_stack([sta, off])\n\n for n, m in enumerate(f):\n if m not in result:\n result[m] = []\n result[m].append(p[n])\n\n def _poc_station_coordinates(self, result, spatial_hash, coords):\n \"\"\"\n Adds the POC station coordinates within the view.\n\n Parameters\n ----------\n result : dict\n The dictionary to which the results will be added.\n spatial_hash : array\n The spatial hash.\n coords : array\n An array of coordinates of shape (N, 2) or (N, 3).\n \"\"\"\n l = self.curve_lengths()\n t = self.poc_transforms()\n da = self.deflection_angles()\n pc_sta = self.pc_stations()\n pt_sta = self.pt_stations()\n rp_coord = self.rp_coordinates()\n pt_coord = self.pt_coordinates()\n\n for i in range(1, len(self.pis)-1):\n r = self.pis[i].radius\n ro = r + self.view_offset\n ri = max(r - self.view_offset, 0)\n f = spatial_hash.query_point(rp_coord[i], ro, ri)\n\n if f.shape[0] == 0:\n continue\n\n if l[i] == 0:\n # Angle bisector\n delta = coords[f,:2] - pt_coord[i]\n sta = np.dot(delta, t[i,1]) + pt_sta[i]\n off = np.dot(delta, t[i,0])\n\n g = ((np.abs(off) <= self.view_offset)\n & (sta >= pt_sta[i] - Alignment.BISC_TOL)\n & (sta <= pt_sta[i] + Alignment.BISC_TOL))\n else:\n # Horizontal curve\n delta = pt_coord[i] - rp_coord[i]\n delta = np.arctan2(delta[0], delta[1])\n p = coords[f,:2] - rp_coord[i]\n delta -= np.arctan2(p[:,0], p[:,1])\n\n sta = pt_sta[i] - (l[i] / da[i]) * delta\n off = np.sign(da[i]) * (r - np.linalg.norm(p, axis=1))\n\n g = (sta >= pc_sta[i]) & (sta <= pt_sta[i])\n\n if coords.shape[1] == 3:\n p = np.column_stack([sta, off, coords[f,2]])[g]\n else:\n p = np.column_stack([sta, off])[g]\n\n for n, m in enumerate(f[g]):\n if m not in result:\n result[m] = []\n result[m].append(p[n])\n\n def station_coordinates(self, coordinates):\n \"\"\"\n Finds the (station, offset) or (station, offset, z) coordinates\n for the input global coordinates. Returns a dictionary of point\n indices with arrays of shape (N, 2) or (N, 3). If a point index\n is not in the dictionary, then no points are located along\n the alignment within the view threshold.\n\n Parameters\n ----------\n coordinates : array\n An array of (x, y) or (x, y, z) global coordinates in the shape\n (N, 2) or (N, 3).\n \"\"\"\n coordinates = np.asarray(coordinates)\n s = SpatialHash(coordinates[:,:2], self.grid)\n result = {}\n\n self._pot_station_coordinates(result, s, coordinates)\n self._poc_station_coordinates(result, s, coordinates)\n\n for k, x in result.items():\n result[k] = np.array(x, dtype='float')\n\n return result\n\n def plot_plan(self, ax=None, step=1, symbols={}):\n \"\"\"\n Plots a the plan view for the alignment.\n\n Parameters\n ----------\n ax : :class:`matplotlib.axes.Axes`\n The axex to which to add the plot. If None, a new figure and axes\n will be created.\n step : float\n The step interval to use for plotting points along horizontal\n curves.\n symbols : dict\n A dictionary of symbols to use for the plot. The following keys\n are used:\n\n * `pi`: PI point symbol, default is 'r.'\n * `rp`: RP point symbol, default is 'c.'\n * `pc`: PC point symbol, default is 'b.'\n * `pt`: PT point symbol, default is 'b.'\n * `alignment`: Alignment lines, default is 'b-'\n * `stakes`: Stake symbols, default is 'rx'\n\n Examples\n --------\n .. plot:: ../examples/survey/alignment_ex1.py\n :include-source:\n \"\"\"\n if ax is None:\n x = self.pi_coordinates()[:,:2]\n mx = x.max(axis=0)\n c = 0.5 * (mx + x.min(axis=0))\n r = 1.1 * (np.max(mx - c) + self.view_offset + self.view_margin)\n xlim, ylim = np.column_stack([c - r, c + r])\n\n fig = plt.figure()\n ax = fig.add_subplot(111,\n title=self.name,\n xlim=xlim,\n ylim=ylim,\n xlabel='X',\n ylabel='Y',\n aspect='equal'\n )\n ax.grid('major', alpha=0.2)\n\n sym = dict(\n pi='r.',\n rp='c.',\n pc='b.',\n pt='b.',\n alignment='b-',\n stakes='rx'\n )\n sym.update(symbols)\n\n pt = self.pt_coordinates()\n pc = self.pc_coordinates()\n\n if sym['alignment'] is not None:\n for a, b in zip(pt[:-1], pc[1:]):\n x = np.array([a, b])\n ax.plot(x[:,0], x[:,1], sym['alignment'])\n\n for a, b in zip(self.pt_stations(), self.pc_stations()):\n if a != b:\n n = int(np.ceil((a - b) / step))\n sta = np.linspace(b, a, n)\n x = self.coordinates(sta)\n ax.plot(x[:,0], x[:,1], sym['alignment'])\n\n if sym['pi'] is not None:\n x = self.pi_coordinates()\n ax.plot(x[:,0], x[:,1], sym['pi'])\n\n if sym['rp'] is not None:\n x = self.rp_coordinates()\n ax.plot(x[:,0], x[:,1], sym['rp'])\n\n if sym['pt'] is not None:\n ax.plot(pt[:,0], pt[:,1], sym['pt'])\n\n if sym['pc'] is not None:\n ax.plot(pc[:,0], pc[:,1], sym['pc'])\n\n if sym['stakes'] is not None and len(self.stakes) > 0:\n self.set_stake_xy()\n x = np.array(self.stakes)\n ax.plot(x[:,0], x[:,1], sym['stakes'])\n\n return ax\n" ]
[ [ "numpy.asarray", "numpy.insert", "numpy.append", "matplotlib.pyplot.figure", "numpy.abs", "numpy.cos", "numpy.expand_dims", "numpy.linspace", "numpy.ceil", "numpy.zeros", "numpy.column_stack", "numpy.tan", "numpy.max", "numpy.einsum", "numpy.linalg.norm", "numpy.arctan2", "numpy.cumsum", "numpy.sign", "numpy.array", "numpy.sin", "numpy.dot" ] ]
lfchener/dgl
[ "77f4287a4118db64c46f4f413a426e1419a09d53" ]
[ "examples/pytorch/rgcn-hetero-ogbn-mag/model.py" ]
[ "from typing import Callable, Dict, List, Union\n\nimport dgl\nimport dgl.nn.pytorch as dglnn\nimport torch\nimport torch.nn as nn\n\n\nclass RelGraphEmbedding(nn.Module):\n def __init__(\n self,\n hg: dgl.DGLHeteroGraph,\n embedding_size: int,\n num_nodes: Dict[str, int],\n node_feats: Dict[str, torch.Tensor],\n node_feats_projection: bool = False,\n ):\n super().__init__()\n self._hg = hg\n self._node_feats = node_feats\n self._node_feats_projection = node_feats_projection\n self.node_embeddings = nn.ModuleDict()\n\n if node_feats_projection:\n self.embeddings = nn.ParameterDict()\n\n for ntype in hg.ntypes:\n if node_feats[ntype] is None:\n node_embedding = nn.Embedding(\n num_nodes[ntype], embedding_size, sparse=True)\n nn.init.uniform_(node_embedding.weight, -1, 1)\n\n self.node_embeddings[ntype] = node_embedding\n elif node_feats[ntype] is not None and node_feats_projection:\n input_embedding_size = node_feats[ntype].shape[-1]\n embedding = nn.Parameter(torch.Tensor(\n input_embedding_size, embedding_size))\n nn.init.xavier_uniform_(embedding)\n\n self.embeddings[ntype] = embedding\n\n def forward(\n self,\n in_nodes: Dict[str, torch.Tensor] = None,\n device: torch.device = None,\n ) -> Dict[str, torch.Tensor]:\n if in_nodes is not None:\n ntypes = [ntype for ntype in in_nodes.keys()]\n nids = [nid for nid in in_nodes.values()]\n else:\n ntypes = self._hg.ntypes\n nids = [self._hg.nodes(ntype) for ntype in ntypes]\n\n x = {}\n\n for ntype, nid in zip(ntypes, nids):\n if self._node_feats[ntype] is None:\n x[ntype] = self.node_embeddings[ntype](nid)\n else:\n if device is not None:\n self._node_feats[ntype] = self._node_feats[ntype].to(\n device)\n\n if self._node_feats_projection:\n x[ntype] = self._node_feats[ntype][nid] @ self.embeddings[ntype]\n else:\n x[ntype] = self._node_feats[ntype][nid]\n\n return x\n\n\nclass RelGraphConvLayer(nn.Module):\n def __init__(\n self,\n in_feats: int,\n out_feats: int,\n rel_names: List[str],\n num_bases: int,\n norm: str = 'right',\n weight: bool = True,\n bias: bool = True,\n activation: Callable[[torch.Tensor], torch.Tensor] = None,\n dropout: float = None,\n self_loop: bool = False,\n ):\n super().__init__()\n self._rel_names = rel_names\n self._num_rels = len(rel_names)\n self._conv = dglnn.HeteroGraphConv({rel: dglnn.GraphConv(\n in_feats, out_feats, norm=norm, weight=False, bias=False) for rel in rel_names})\n self._use_weight = weight\n self._use_basis = num_bases < self._num_rels and weight\n self._use_bias = bias\n self._activation = activation\n self._dropout = nn.Dropout(dropout) if dropout is not None else None\n self._use_self_loop = self_loop\n\n if weight:\n if self._use_basis:\n self.basis = dglnn.WeightBasis(\n (in_feats, out_feats), num_bases, self._num_rels)\n else:\n self.weight = nn.Parameter(torch.Tensor(\n self._num_rels, in_feats, out_feats))\n nn.init.xavier_uniform_(\n self.weight, gain=nn.init.calculate_gain('relu'))\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_feats))\n nn.init.zeros_(self.bias)\n\n if self_loop:\n self.self_loop_weight = nn.Parameter(\n torch.Tensor(in_feats, out_feats))\n nn.init.xavier_uniform_(\n self.self_loop_weight, gain=nn.init.calculate_gain('relu'))\n\n def _apply_layers(\n self,\n ntype: str,\n inputs: torch.Tensor,\n inputs_dst: torch.Tensor = None,\n ) -> torch.Tensor:\n x = inputs\n\n if inputs_dst is not None:\n x += torch.matmul(inputs_dst[ntype], self.self_loop_weight)\n\n if self._use_bias:\n x += self.bias\n\n if self._activation is not None:\n x = self._activation(x)\n\n if self._dropout is not None:\n x = self._dropout(x)\n\n return x\n\n def forward(\n self,\n hg: dgl.DGLHeteroGraph,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n hg = hg.local_var()\n\n if self._use_weight:\n weight = self.basis() if self._use_basis else self.weight\n weight_dict = {self._rel_names[i]: {'weight': w.squeeze(\n dim=0)} for i, w in enumerate(torch.split(weight, 1, dim=0))}\n else:\n weight_dict = {}\n\n if self._use_self_loop:\n if hg.is_block:\n inputs_dst = {ntype: h[:hg.num_dst_nodes(\n ntype)] for ntype, h in inputs.items()}\n else:\n inputs_dst = inputs\n else:\n inputs_dst = None\n\n x = self._conv(hg, inputs, mod_kwargs=weight_dict)\n x = {ntype: self._apply_layers(ntype, h, inputs_dst)\n for ntype, h in x.items()}\n\n return x\n\n\nclass EntityClassify(nn.Module):\n def __init__(\n self,\n hg: dgl.DGLHeteroGraph,\n in_feats: int,\n hidden_feats: int,\n out_feats: int,\n num_bases: int,\n num_layers: int,\n norm: str = 'right',\n layer_norm: bool = False,\n input_dropout: float = 0,\n dropout: float = 0,\n activation: Callable[[torch.Tensor], torch.Tensor] = None,\n self_loop: bool = False,\n ):\n super().__init__()\n self._hidden_feats = hidden_feats\n self._out_feats = out_feats\n self._num_layers = num_layers\n self._input_dropout = nn.Dropout(input_dropout)\n self._dropout = nn.Dropout(dropout)\n self._activation = activation\n self._rel_names = sorted(list(set(hg.etypes)))\n self._num_rels = len(self._rel_names)\n\n if num_bases < 0 or num_bases > self._num_rels:\n self._num_bases = self._num_rels\n else:\n self._num_bases = num_bases\n\n self._layers = nn.ModuleList()\n\n self._layers.append(RelGraphConvLayer(\n in_feats,\n hidden_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n for _ in range(1, num_layers - 1):\n self._layers.append(RelGraphConvLayer(\n hidden_feats,\n hidden_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n self._layers.append(RelGraphConvLayer(\n hidden_feats,\n out_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n if layer_norm:\n self._layer_norms = nn.ModuleList()\n\n for _ in range(num_layers - 1):\n self._layer_norms.append(nn.LayerNorm(hidden_feats))\n else:\n self._layer_norms = None\n\n def _apply_layers(\n self,\n layer_idx: int,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n x = inputs\n\n for ntype, h in x.items():\n if self._layer_norms is not None:\n h = self._layer_norms[layer_idx](h)\n\n if self._activation is not None:\n h = self._activation(h)\n\n x[ntype] = self._dropout(h)\n\n return x\n\n def forward(\n self,\n hg: Union[dgl.DGLHeteroGraph, List[dgl.DGLHeteroGraph]],\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n x = {ntype: self._input_dropout(h) for ntype, h in inputs.items()}\n\n if isinstance(hg, list):\n for i, (layer, block) in enumerate(zip(self._layers, hg)):\n x = layer(block, x)\n\n if i < self._num_layers - 1:\n x = self._apply_layers(i, x)\n else:\n for i, layer in enumerate(self._layers):\n x = layer(hg, x)\n\n if i < self._num_layers - 1:\n x = self._apply_layers(i, x)\n\n return x\n\n def inference(\n self,\n hg: dgl.DGLHeteroGraph,\n batch_size: int,\n num_workers: int,\n embedding_layer: nn.Module,\n device: torch.device,\n ) -> Dict[str, torch.Tensor]:\n for i, layer in enumerate(self._layers):\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n hg,\n {ntype: hg.nodes(ntype) for ntype in hg.ntypes},\n sampler,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers,\n )\n\n if i < self._num_layers - 1:\n y = {ntype: torch.zeros(hg.num_nodes(\n ntype), self._hidden_feats, device=device) for ntype in hg.ntypes}\n else:\n y = {ntype: torch.zeros(hg.num_nodes(\n ntype), self._out_feats, device=device) for ntype in hg.ntypes}\n\n for in_nodes, out_nodes, blocks in dataloader:\n in_nodes = {rel: nid.to(device)\n for rel, nid in in_nodes.items()}\n out_nodes = {rel: nid.to(device)\n for rel, nid in out_nodes.items()}\n block = blocks[0].to(device)\n\n if i == 0:\n h = embedding_layer(in_nodes=in_nodes, device=device)\n else:\n h = {ntype: x[ntype][in_nodes[ntype]]\n for ntype in hg.ntypes}\n\n h = layer(block, h)\n\n if i < self._num_layers - 1:\n h = self._apply_layers(i, h)\n\n for ntype in h:\n y[ntype][out_nodes[ntype]] = h[ntype]\n\n x = y\n\n return x\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.init.xavier_uniform_", "torch.Tensor", "torch.split", "torch.nn.ParameterDict", "torch.nn.init.uniform_", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.ModuleList", "torch.nn.init.zeros_", "torch.nn.ModuleDict", "torch.nn.Dropout", "torch.matmul" ] ]
vishalbelsare/GNN_tf_2.x
[ "4b6429ed58f2c0922257600a9287d5cc5a10395b" ]
[ "graph_class.py" ]
[ "# coding=utf-8\n\nimport os\nimport shutil\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.sparse import coo_matrix\n\n\n#######################################################################################################################\n## GRAPH OBJECT CLASS #################################################################################################\n#######################################################################################################################\nclass GraphObject:\n ## CONSTRUCTORS METHODS ###########################################################################################\n def __init__(self, arcs, nodes, targets,\n problem_based: str = 'n',\n set_mask=None,\n output_mask=None,\n sample_weights=1,\n NodeGraph=None,\n ArcNode=None,\n aggregation_mode: str = 'average'):\n \"\"\" CONSTRUCTOR METHOD\n\n :param arcs: Ordered Arcs Matrix where arcs[i] = [ID Node From | ID NodeTo | Arc Label].\n :param nodes: Ordered Nodes Matrix where nodes[i] = [Node Label].\n :param targets: Targets Array with shape (Num of targeted example [nodes or arcs], dim_target example).\n :param problem_based: (str) define the problem on which graph is used: 'a' arcs-based, 'g' graph-based, 'n' node-based.\n :param set_mask: Array of {0,1} to define arcs/nodes belonging to a set, when dataset == single GraphObject.\n :param output_mask: Array of {0,1} to define the sub-set of arcs/nodes whose target is known.\n :param sample_weights: target sample weight for loss computation. It can be int, float or numpy.array of ints or floats\n > If int, all targets are weighted as sample_weights * ones.\n > If numpy.array, len(sample_weights) and targets.shape[0] must agree.\n :param NodeGraph: Matrix (nodes.shape[0],{Num graphs or 1}) used only when problem_based=='g'.\n :param ArcNode: Matrix of shape (num_of_arcs, num_of_nodes) s.t. A[i,j]=value if arc[i,2]==node[j].\n :param aggregation_mode: (str) It defines the aggregation mode for the incoming message of a node using ArcNode and Adjacency:\n > 'average': elem(matrix)={0-1} -> matmul(m,A) gives the average of incoming messages, s.t. sum(A[:,i])=1;\n > 'normalized': elem(matrix)={0-1} -> matmul(m,A) gives the normalized message wrt the total number of g.nodes;\n > 'sum': elem(matrix)={0,1} -> matmul(m,A) gives the total sum of incoming messages. In this case Adjacency\n \"\"\"\n self.dtype = tf.keras.backend.floatx()\n\n # store arcs, nodes, targets\n self.arcs = arcs.astype(self.dtype)\n self.nodes = nodes.astype(self.dtype)\n self.targets = targets.astype(self.dtype)\n self.sample_weights = sample_weights * np.ones(self.targets.shape[0])\n\n # store dimensions\n self.DIM_NODE_LABEL = nodes.shape[1]\n self.DIM_ARC_LABEL = (arcs.shape[1] - 2) # first two columns contain nodes indices\n self.DIM_TARGET = targets.shape[1]\n\n # setting the problem type: node, arcs or graph based + check existence of passed parameters in keys\n lenMask = {'n': nodes.shape[0], 'a': arcs.shape[0], 'g': nodes.shape[0]}\n\n # build set_mask, for a dataset composed of only a single graph: its nodes have to be divided in Tr, Va and Te\n self.set_mask = np.ones(lenMask[problem_based], dtype=bool) if set_mask is None else set_mask.astype(bool)\n # build output_mask\n self.output_mask = np.ones(len(self.set_mask), dtype=bool) if output_mask is None else output_mask.astype(bool)\n\n # check lengths: output_mask must be as long as set_mask\n if len(self.set_mask) != len(self.output_mask): raise ValueError('Error - len(<set_mask>) != len(<output_mask>)')\n\n # nodes and arcs aggregation\n if aggregation_mode not in ['average', 'normalized', 'sum']: raise ValueError(\"ERROR: Unknown aggregation mode\")\n self.aggregation_mode = aggregation_mode\n\n # build ArcNode matrix or acquire it from input\n self.ArcNode = self.buildArcNode() if ArcNode is None else ArcNode.astype(self.dtype)\n\n # build Adjancency Matrix. Note that it can be an Aggregated Version of the 'normal' Adjacency Matrix (with only 0 and 1)\n self.Adjacency = self.buildAdiacency()\n\n # build node_graph conversion matrix\n self.NodeGraph = self.buildNodeGraph(problem_based) if NodeGraph is None else NodeGraph.astype(self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def copy(self):\n \"\"\" COPY METHOD\n\n :return: a Deep Copy of the GraphObject instance.\n \"\"\"\n return GraphObject(arcs=self.getArcs(), nodes=self.getNodes(), targets=self.getTargets(), set_mask=self.getSetMask(),\n output_mask=self.getOutputMask(), sample_weights=self.getSampleWeights(), NodeGraph=self.getNodeGraph(),\n aggregation_mode=self.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildAdiacency(self):\n \"\"\" Build 'Aggregated' Adjacency Matrix ADJ, s.t. ADJ[i,j]=value if edge (i,j) exists in graph edges set.\n value is set by self.aggregation_mode: 'sum':1, 'normalized':1/self.nodes.shape[0], 'average':1/number_of_neighbors \"\"\"\n values = self.getArcNode().data\n indices = self.arcs[:, :2].astype(int)\n return coo_matrix((values, (indices[:, 0], indices[:, 1])), shape=(self.nodes.shape[0], self.nodes.shape[0]), dtype=self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildArcNode(self):\n \"\"\" Build ArcNode Matrix A of shape (number_of_arcs, number_of_nodes) where A[i,j]=value if arc[i,2]==node[j].\n Compute the matmul(m:=message,A) to get the incoming message on each node.\n :return: sparse ArcNode Matrix, for memory efficiency.\n :raise: Error if <aggregation_mode> is not in ['average','sum','normalized'].\n \"\"\"\n\n col = self.arcs[:, 1] # column indices of A are located in the second column of the arcs tensor\n row = np.arange(0, len(col)) # arc id (from 0 to number of arcs)\n\n # sum node aggregation - incoming message as sum of neighbors states and labels\n values_vector = np.ones(len(col))\n\n # normalized node aggregation - incoming message as sum of neighbors states and labels divided by the number of nodes in the graph\n if self.aggregation_mode == 'normalized':\n values_vector = values_vector * float(1 / len(col))\n\n # average node aggregation - incoming message as average of neighbors states and labels\n elif self.aggregation_mode == 'average':\n val, col_index, destination_node_counts = np.unique(col, return_inverse=True, return_counts=True)\n values_vector = values_vector / destination_node_counts[col_index]\n\n # isolated nodes correction: if nodes[i] is isolated, then ArcNode[:,i]=0, to maintain nodes ordering\n return coo_matrix((values_vector, (row, col)), shape=(self.arcs.shape[0], self.nodes.shape[0]), dtype=self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def setAggregation(self, aggregation_mode: str):\n \"\"\" Set ArcNode values for the specified :param aggregation_mode: \"\"\"\n if aggregation_mode not in ['average', 'normalized', 'sum']: raise ValueError(\"ERROR: Unknown aggregation mode\")\n self.aggregation_mode = aggregation_mode\n self.ArcNode = self.buildArcNode()\n self.Adjacency = self.buildAdiacency()\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildNodeGraph(self, problem_based: str):\n \"\"\" Build Node-Graph Aggregation Matrix, to transform a node-based problem in a graph-based one.\n nodegraph != None only if problem_based == 'g': It has dimensions (nodes.shape[0], 1) for a single graph, \n or (nodes.shape[0], Num graphs) for a graph containing 2+ graphs, built by merging the single graphs into a bigger one,\n such that after the node-graph aggregation process gnn can compute (Num graphs, targets.shape[1]) as output.\n It's normalized wrt the number of nodes whose output is computed, i.e. the number of ones in output_mask.\n :return: nodegraph matrix if :param problem_based: is 'g' else None, as nodegraph is used in graph-based problems.\n \"\"\"\n nodegraph = None\n if problem_based == 'g':\n nodes_output_coefficient = self.nodes.shape[0]\n nodegraph = np.ones((nodes_output_coefficient, 1), dtype=np.float32) * 1 / nodes_output_coefficient\n return nodegraph\n\n # -----------------------------------------------------------------------------------------------------------------\n def save(self, graph_folder_path: str) -> None:\n \"\"\" save graph in folder. All attributes are saved in numpy .npy files.\n\n :param graph_folder_path: (str) folder path in which graph is saved.\n \"\"\"\n GraphObject.save_graph(graph_folder_path, self)\n\n # -----------------------------------------------------------------------------------------------------------------\n def savetxt(self, graph_folder_path: str, format: str = '%.10g') -> None:\n \"\"\" save graph in folder. All attributes are saved in textual .txt files.\n\n :param graph_folder_path: (str) folder path in which graph is saved.\n \"\"\"\n GraphObject.save_txt(graph_folder_path, self, format)\n\n ## GETTERS ########################################################################################################\n def getArcs(self):\n return self.arcs.copy()\n\n def getNodes(self):\n return self.nodes.copy()\n\n def getTargets(self):\n return self.targets.copy()\n\n def getSetMask(self):\n return self.set_mask.copy()\n\n def getOutputMask(self):\n return self.output_mask.copy()\n\n def getAdjacency(self):\n return self.Adjacency.copy()\n\n def getArcNode(self):\n return self.ArcNode.copy()\n\n def getNodeGraph(self):\n return None if self.NodeGraph is None else self.NodeGraph.copy()\n\n def getSampleWeights(self):\n return self.sample_weights.copy()\n\n ## CLASS METHODs ##################################################################################################\n @classmethod\n def save_graph(self, graph_folder_path: str, g):\n \"\"\" Save a graph to a directory, creating txt files referring to all attributes of graph g\n Note that graph_folder_path will contain ONLY a single graph g. If folder is not empty, it is removed and re-made\n Remind that dataset folder contains one folder for each graph.\n\n :param graph_folder_path: new directory for saving the graph. \n :param g: graph of type GraphObject to be saved.\n \"\"\"\n # check folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n if os.path.exists(graph_folder_path): shutil.rmtree(graph_folder_path)\n os.makedirs(graph_folder_path)\n\n # save everything\n np.save(graph_folder_path + 'arcs.npy', g.arcs)\n np.save(graph_folder_path + 'nodes.npy', g.nodes)\n np.save(graph_folder_path + 'targets.npy', g.targets)\n if not all(g.set_mask): np.save(graph_folder_path + 'set_mask.npy', g.set_mask)\n if not all(g.output_mask): np.save(graph_folder_path + 'output_mask.npy', g.output_mask)\n if np.any(g.sample_weights != 1): np.save(graph_folder_path + 'sample_weights.npy', g.sample_weights)\n if g.NodeGraph is not None and g.targets.shape[0] > 1: np.save(graph_folder_path + 'NodeGraph.npy', g.NodeGraph)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def save_txt(self, graph_folder_path: str, g, format: str = '%.10g'):\n \"\"\" Save a graph to a directory, creating txt files referring to all attributes of graph g\n Note that graph_folder_path will contain ONLY a single graph g. If folder is not empty, it is removed and re-made.\n Remind that dataset folder contains one folder for each graph.\n\n :param graph_folder_path: new directory for saving the graph.\n :param g: graph of type GraphObject to be saved.\n :param format: param passed to np.savetxt().\n \"\"\"\n # check folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n if os.path.exists(graph_folder_path): shutil.rmtree(graph_folder_path)\n os.makedirs(graph_folder_path)\n\n # save everything\n np.savetxt(graph_folder_path + 'arcs.txt', g.arcs, fmt=format)\n np.savetxt(graph_folder_path + 'nodes.txt', g.nodes, fmt=format)\n np.savetxt(graph_folder_path + 'targets.txt', g.targets, fmt=format)\n if not all(g.set_mask): np.savetxt(graph_folder_path + 'set_mask.txt', g.set_mask, fmt=format)\n if not all(g.output_mask): np.savetxt(graph_folder_path + 'output_mask.txt', g.output_mask, fmt=format)\n if np.any(g.sample_weights != 1): np.savetxt(graph_folder_path + 'sample_weights.txt', g.sample_weights, fmt=format)\n if g.NodeGraph is not None and g.targets.shape[0] > 1: np.savetxt(graph_folder_path + 'NodeGraph.txt', g.NodeGraph, fmt=format)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def load(self, graph_folder_path: str, problem_based: str, aggregation_mode: str):\n \"\"\" Load a graph from a directory which contains at least 3 numpy files referring to nodes, arcs and targets\n\n :param graph_folder_path: directory containing at least 3 files: 'nodes.npy', 'arcs.npy' and 'targets.npy'\n > other possible files: 'NodeGraph.npy','output_mask.npy' and 'set_mask.npy'. No other files required!\n :param aggregation_mode: node aggregation mode: 'average','sum','normalized'. Go to BuildArcNode for details\n :param problem_based: (str) : 'n'-nodeBased; 'a'-arcBased; 'g'-graphBased\n > NOTE For graph_based problems, file 'NodeGraph.npy' must be present in folder\n :return: GraphObject described by files in <graph_folder_path> folder\n \"\"\"\n # load all the files inside <graph_folder_path> folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n files = os.listdir(graph_folder_path)\n keys = [i.rsplit('.')[0] for i in files] + ['problem_based', 'aggregation_mode']\n vals = [np.load(graph_folder_path + i) for i in files] + [problem_based, aggregation_mode]\n\n # create a dictionary with parameters and values to be passed to constructor and return GraphObject\n params = dict(zip(keys, vals))\n return self(**params)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def load_txt(self, graph_folder_path: str, problem_based: str, aggregation_mode: str):\n \"\"\" Load a graph from a directory which contains at least 3 txt files referring to nodes, arcs and targets\n\n :param graph_folder_path: directory containing at least 3 files: 'nodes.txt', 'arcs.txt' and 'targets.txt'\n > other possible files: 'NodeGraph.txt','output_mask.txt' and 'set_mask.txt'. No other files required!\n :param problem_based: (str) : 'n'-nodeBased; 'a'-arcBased; 'g'-graphBased\n > NOTE For graph_based problems, file 'NodeGraph.txt' must to be present in folder\n :param aggregation_mode: node aggregation mode: 'average','sum','normalized'. Go to BuildArcNode for details\n :return: GraphObject described by files in <graph_folder_path> folder\n \"\"\"\n # load all the files inside <graph_folder_path> folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n files = os.listdir(graph_folder_path)\n keys = [i.rsplit('.')[0] for i in files] + ['problem_based', 'aggregation_mode']\n vals = [np.loadtxt(graph_folder_path + i, ndmin=2) for i in files] + [problem_based, aggregation_mode]\n\n # create a dictionary with parameters and values to be passed to constructor and return GraphObject\n params = dict(zip(keys, vals))\n return self(**params)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def merge(self, glist, problem_based: str, aggregation_mode: str):\n \"\"\" Method to merge graphs: it takes in input a list of graphs and returns them as a single graph\n\n :param glist: list of GraphObjects\n > NOTE if problem_based=='g', new NodeGraph will have dimension (Num nodes, Num graphs) else None\n :param aggregation_mode: str, node aggregation mode for new GraphObject, go to buildArcNode for details\n :return: a new GraphObject containing all the information (nodes, arcs, targets, etc) in glist\n \"\"\"\n # check glist parameter: others parameter are in constructor\n if not (type(glist) == list and all(isinstance(x, (GraphObject, str)) for x in glist)):\n raise TypeError('type of param <glist> must be list of str \\'path-like\\' or GraphObjects')\n\n nodes, nodes_lens, arcs, targets, set_mask, output_mask, sample_weights, nodegraph_list = zip(*[(i.getNodes(), i.nodes.shape[0],\n i.getArcs(), i.getTargets(),\n i.getSetMask(), i.getOutputMask(),\n i.getSampleWeights(), i.getNodeGraph())\n for i in glist])\n\n # get single matrices for new graph\n for i, elem in enumerate(arcs): elem[:, :2] += sum(nodes_lens[:i])\n arcs = np.concatenate(arcs, axis=0)\n nodes = np.concatenate(nodes, axis=0)\n targets = np.concatenate(targets, axis=0)\n set_mask = np.concatenate(set_mask, axis=0)\n output_mask = np.concatenate(output_mask, axis=0)\n sample_weights = np.concatenate(sample_weights, axis=0)\n\n nodegraph = None\n if problem_based == 'g':\n from scipy.linalg import block_diag\n nodegraph = block_diag(*nodegraph_list)\n\n # resulting GraphObject\n return self(arcs=arcs, nodes=nodes, targets=targets, problem_based=problem_based, set_mask=set_mask, output_mask=output_mask,\n sample_weights=sample_weights, NodeGraph=nodegraph, aggregation_mode=aggregation_mode)\n\n @classmethod\n def fromGraphTensor(self, g, problem_based: str):\n nodegraph = None\n if problem_based == 'g': nodegraph = g.NodeGraph.numpy()\n return self(arcs=g.arcs.numpy(), nodes=g.nodes.numpy(), targets=g.targets.numpy(),\n set_mask=g.set_mask.numpy(), output_mask=g.output_mask.numpy(), sample_weights=g.sample_weights.numpy(),\n NodeGraph=nodegraph, aggregation_mode=g.aggregation_mode, problem_based=problem_based)\n\n\nclass GraphTensor:\n def __init__(self, nodes, arcs, targets, set_mask, output_mask, sample_weights, Adjacency, ArcNode, NodeGraph, aggregation_mode):\n dtype = tf.keras.backend.floatx()\n\n self.nodes = tf.constant(nodes, dtype=dtype)\n self.arcs = tf.constant(arcs, dtype=dtype)\n self.targets = tf.constant(targets, dtype=dtype)\n self.sample_weights = tf.constant(sample_weights, dtype=dtype)\n self.set_mask = tf.constant(set_mask, dtype=bool)\n self.output_mask = tf.constant(output_mask, dtype=bool)\n self.aggregation_mode = aggregation_mode\n self.NodeGraph = None\n if NodeGraph is not None: self.NodeGraph = tf.constant(NodeGraph, dtype=dtype)\n # Adjacency and ArcNode in GraphTensor MUST BE already transposed!\n self.Adjacency = tf.sparse.SparseTensor.from_value(Adjacency)\n self.ArcNode = tf.sparse.SparseTensor.from_value(ArcNode)\n\n # -----------------------------------------------------------------------------------------------------------------\n def copy(self):\n return GraphTensor(nodes=self.nodes, arcs=self.arcs, targets=self.targets, set_mask=self.set_mask, output_mask=self.output_mask,\n sample_weights=self.sample_weights, Adjacency=self.Adjacency, ArcNode=self.ArcNode, NodeGraph=self.NodeGraph,\n aggregation_mode=self.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def fromGraphObject(self, g: GraphObject):\n \"\"\" Create GraphTensor from GraphObject. Note that Adjacency and ArcNode are transposed so that GraphTensor.ArcNode and\n GraphTensor.Adjacency are ready for sparse_dense_matmul in Loop operations.\n \"\"\"\n return self(nodes=g.nodes, arcs=g.arcs, targets=g.targets, set_mask=g.set_mask, output_mask=g.output_mask,\n sample_weights=g.sample_weights, NodeGraph=g.NodeGraph, Adjacency=self.COO2SparseTransposedTensor(g.Adjacency),\n ArcNode=self.COO2SparseTransposedTensor(g.ArcNode), aggregation_mode=g.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n @staticmethod\n def COO2SparseTransposedTensor(coo_matrix) -> tf.Tensor:\n \"\"\" Get the transposed sparse tensor from a sparse coo_matrix matrix \"\"\"\n # SparseTensor is created and then reordered to be correctly computable. NOTE: reorder() recommended by TF2.0+\n indices = list(zip(coo_matrix.col, coo_matrix.row))\n sparse_tensor = tf.SparseTensor(indices, values=coo_matrix.data, dense_shape=[coo_matrix.shape[1], coo_matrix.shape[0]])\n sparse_tensor = tf.sparse.reorder(sparse_tensor)\n sparse_tensor = tf.cast(sparse_tensor, dtype=tf.keras.backend.floatx())\n return sparse_tensor\n" ]
[ [ "numpy.save", "numpy.ones", "numpy.load", "numpy.savetxt", "numpy.any", "scipy.sparse.coo_matrix", "tensorflow.sparse.SparseTensor.from_value", "tensorflow.keras.backend.floatx", "tensorflow.SparseTensor", "tensorflow.sparse.reorder", "scipy.linalg.block_diag", "tensorflow.constant", "numpy.concatenate", "numpy.unique", "numpy.loadtxt" ] ]
sarkarpr/azure-python-labs
[ "10ad5d69175cec7fc8ff465368e9867440d034f3" ]
[ "2019/6-azureml-movie-recommendation/reco_utils/dataset/spark_splitters.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\n\nfrom pyspark.sql import Window\nfrom pyspark.sql.functions import col, row_number, broadcast, rand\n\nfrom reco_utils.common.constants import (\n DEFAULT_ITEM_COL,\n DEFAULT_USER_COL,\n DEFAULT_TIMESTAMP_COL,\n DEFAULT_RATING_COL,\n)\nfrom reco_utils.dataset.split_utils import process_split_ratio, min_rating_filter_spark\n\n\ndef spark_random_split(data, ratio=0.75, seed=42):\n \"\"\"Spark random splitter\n Randomly split the data into several splits.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two halfs and the ratio argument indicates the ratio of \n training data set; if it is a list of float numbers, the splitter splits \n data into several portions corresponding to the split ratios. If a list \n is provided and the ratios are not summed to 1, they will be normalized.\n seed (int): Seed.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n multi_split, ratio = process_split_ratio(ratio)\n\n if multi_split:\n return data.randomSplit(ratio, seed=seed)\n else:\n return data.randomSplit([ratio, 1 - ratio], seed=seed)\n\n\ndef spark_chrono_split(\n data,\n ratio=0.75,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_timestamp=DEFAULT_TIMESTAMP_COL,\n):\n \"\"\"Spark chronological splitter\n This function splits data in a chronological manner. That is, for each user / item, the\n split function takes proportions of ratings which is specified by the split ratio(s).\n The split is stratified.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two sets and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits \n data into several portions corresponding to the split ratios. If a list is \n provided and the ratios are not summed to 1, they will be normalized.\n seed (int): Seed.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to filter\n with min_rating.\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n col_timestamp (str): column name of timestamps.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n if not (filter_by == \"user\" or filter_by == \"item\"):\n raise ValueError(\"filter_by should be either 'user' or 'item'.\")\n\n if min_rating < 1:\n raise ValueError(\"min_rating should be integer and larger than or equal to 1.\")\n\n multi_split, ratio = process_split_ratio(ratio)\n\n split_by_column = col_user if filter_by == \"user\" else col_item\n\n if min_rating > 1:\n data = min_rating_filter_spark(\n data,\n min_rating=min_rating,\n filter_by=filter_by,\n col_user=col_user,\n col_item=col_item,\n )\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.partitionBy(split_by_column).orderBy(col(col_timestamp))\n\n rating_grouped = (\n data.groupBy(split_by_column)\n .agg({col_timestamp: \"count\"})\n .withColumnRenamed(\"count(\" + col_timestamp + \")\", \"count\")\n )\n rating_all = data.join(broadcast(rating_grouped), on=split_by_column)\n\n rating_rank = rating_all.withColumn(\n \"rank\", row_number().over(window_spec) / col(\"count\")\n )\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i])\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n )\n\n splits.append(rating_split)\n\n return splits\n\n\ndef spark_stratified_split(\n data,\n ratio=0.75,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_rating=DEFAULT_RATING_COL,\n seed=42,\n):\n \"\"\"Spark stratified splitter\n For each user / item, the split function takes proportions of ratings which is\n specified by the split ratio(s). The split is stratified.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two halfs and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits\n data into several portions corresponding to the split ratios. If a list is\n provided and the ratios are not summed to 1, they will be normalized.\n Earlier indexed splits will have earlier times\n (e.g the latest time per user or item in split[0] <= the earliest time per user or item in split[1])\n seed (int): Seed.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to filter\n with min_rating.\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n if not (filter_by == \"user\" or filter_by == \"item\"):\n raise ValueError(\"filter_by should be either 'user' or 'item'.\")\n\n if min_rating < 1:\n raise ValueError(\"min_rating should be integer and larger than or equal to 1.\")\n\n multi_split, ratio = process_split_ratio(ratio)\n\n split_by_column = col_user if filter_by == \"user\" else col_item\n\n if min_rating > 1:\n data = min_rating_filter_spark(\n data,\n min_rating=min_rating,\n filter_by=filter_by,\n col_user=col_user,\n col_item=col_item,\n )\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.partitionBy(split_by_column).orderBy(rand(seed=seed))\n\n rating_grouped = (\n data.groupBy(split_by_column)\n .agg({col_rating: \"count\"})\n .withColumnRenamed(\"count(\" + col_rating + \")\", \"count\")\n )\n rating_all = data.join(broadcast(rating_grouped), on=split_by_column)\n\n rating_rank = rating_all.withColumn(\n \"rank\", row_number().over(window_spec) / col(\"count\")\n )\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i])\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n )\n\n splits.append(rating_split)\n\n return splits\n\n\ndef spark_timestamp_split(\n data,\n ratio=0.75,\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_timestamp=DEFAULT_TIMESTAMP_COL,\n):\n \"\"\"Spark timestamp based splitter\n The splitter splits the data into sets by timestamps without stratification on either\n user or item.\n The ratios are applied on the timestamp column which is divided accordingly into\n several partitions.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two sets and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits\n data into several portions corresponding to the split ratios. If a list is\n provided and the ratios are not summed to 1, they will be normalized.\n Earlier indexed splits will have earlier times\n (e.g the latest time in split[0] <= the earliest time in split[1])\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n col_timestamp (str): column name of timestamps. Float number represented in\n seconds since Epoch.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n multi_split, ratio = process_split_ratio(ratio)\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.orderBy(col(col_timestamp))\n rating = data.withColumn(\"rank\", row_number().over(window_spec))\n\n data_count = rating.count()\n rating_rank = rating.withColumn(\"rank\", row_number().over(window_spec) / data_count)\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i]).drop(\n \"rank\"\n )\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n ).drop(\"rank\")\n\n splits.append(rating_split)\n\n return splits\n" ]
[ [ "numpy.cumsum" ] ]
tkuri/noise2noise
[ "a293f4952d3d1c997f4eb298a20fd5fac50a2dd3" ]
[ "test_model.py" ]
[ "import argparse\nimport numpy as np\nfrom pathlib import Path\nimport cv2\nfrom model import get_model\n# from noise_model import get_noise_model\n\nMAX_8BIT = 255.\nMAX_16BIT = 65535.\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Test trained model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--image_dir\", type=str, required=True,\n help=\"test image dir\")\n parser.add_argument(\"--model\", type=str, default=\"srresnet\",\n help=\"model architecture ('srresnet' or 'unet')\")\n parser.add_argument(\"--weight_file\", type=str, required=True,\n help=\"trained weight file\")\n # parser.add_argument(\"--test_noise_model\", type=str, default=\"gaussian,25,25\",\n # help=\"noise model for test images\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"if set, save resulting images otherwise show result using imshow\")\n parser.add_argument('--uint16', action='store_true', help='16bit process.')\n args = parser.parse_args()\n return args\n\n\ndef get_image(image, args):\n if args.uint16:\n image = np.clip(image, 0, MAX_8BIT)\n image = image.astype(dtype=np.uint8)\n else:\n image = np.clip(image*MAX_8BIT, 0, MAX_16BIT)\n image = image.astype(dtype=np.uint16)\n return image\n\n\ndef main():\n args = get_args()\n image_dir = args.image_dir\n weight_file = args.weight_file\n # val_noise_model = get_noise_model(args.test_noise_model)\n model = get_model(args.model)\n model.load_weights(weight_file)\n\n if args.output_dir:\n output_dir = Path(args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n print(image_dir)\n# image_paths = list(Path(image_dir).glob(\"*.*\"))\n image_paths = list(Path(image_dir).glob(\"*.png\"))\n# print(image_paths)\n\n for image_path in image_paths:\n image = cv2.imread(str(image_path),-1)\n\n if args.uint16:\n image = image / MAX_8BIT # Normalize 0~255 if input is 16bit\n\n h, w, _ = image.shape\n image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)\n h, w, _ = image.shape\n \n pred = model.predict(np.expand_dims(image, 0))\n out_image = get_image(pred[0], args)\n\n# out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)\n# noise_image = val_noise_model(image)\n# pred = model.predict(np.expand_dims(noise_image, 0))\n# denoised_image = get_image(pred[0])\n# out_image[:, :w] = image\n# out_image[:, w:w * 2] = noise_image\n# out_image[:, w * 2:] = denoised_image\n\n\n if args.output_dir:\n cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + \".png\", out_image)\n else:\n cv2.imshow(\"result\", out_image)\n key = cv2.waitKey(-1)\n # \"q\": quit\n if key == 113:\n return 0\n return 0\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.clip", "numpy.expand_dims" ] ]
quickgrid/paper-implementations
[ "90de1e93cc664e8f5e1e49c57c030f3d9d14fdf9" ]
[ "pytorch/gaugan/gaugan.py" ]
[ "\"\"\"Pytorch GauGAN implementation.\n\nEither segmentation one hot mask or rgb mask can be passed to discriminator with little modification.\n\nTodo\n - Modify to try to generate and match mask also as loss.\n - Try discriminator with either segmentation image or label.\n - Use multiscale feature from discriminator to calculate loss.\n - Test conv bias, norm affine and other parameter effect on result.\n\nReferences\n - https://arxiv.org/abs/1903.07291\n - https://keras.io/examples/generative/gaugan/\n - https://github.com/quickgrid/AI-Resources/blob/master/resources/ai-notes/gaugan-series.md\n - https://github.com/NVlabs/SPADE\n\"\"\"\n\nimport os\nimport pathlib\nfrom datetime import datetime\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nfrom torchvision.models.feature_extraction import get_graph_node_names, create_feature_extractor\nfrom torchvision.transforms import transforms\nfrom torchvision import models\nfrom torch.nn.utils.spectral_norm import spectral_norm\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.nn import functional\nfrom torch.backends import cudnn\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\nclass LayerDebugger(nn.Module):\n def __init__(self) -> None:\n super(LayerDebugger, self).__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n print(x.shape)\n return x\n\n\nclass ImageEncoder(nn.Module):\n def __init__(\n self,\n img_size: int,\n latent_dim: int,\n enable_dropout: bool = False,\n dropout_rate: float = 0.5,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(ImageEncoder, self).__init__()\n\n dropout_layer = list()\n if enable_dropout:\n dropout_layer = [nn.Dropout(p=dropout_rate)]\n\n def _get_conv_layer(_in_channels: int, _out_channels: int, apply_bias: bool = False) -> list:\n conv_layer = nn.Conv2d(\n in_channels=_in_channels, out_channels=_out_channels,\n kernel_size=(3, 3), padding=(1, 1), stride=(2, 2), bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n def _get_block(\n _in_channels: int,\n _out_channels: int,\n apply_norm: bool = True,\n ) -> list:\n norm_layer = list()\n if apply_norm:\n norm_layer = [nn.InstanceNorm2d(num_features=_out_channels, affine=False)]\n\n return [\n *_get_conv_layer(_in_channels=_in_channels, _out_channels=_out_channels),\n *norm_layer,\n nn.LeakyReLU(negative_slope=0.2),\n *dropout_layer,\n ]\n\n channel_in = [3, 64, 128, 256, 512, 512]\n channel_out = [64, 128, 256, 512, 512, 512]\n linear_features = 8192\n\n conv_layers = list()\n for idx, (in_channels, out_channels) in enumerate(zip(channel_in, channel_out)):\n if idx != 0:\n conv_layers.extend(_get_block(_in_channels=in_channels, _out_channels=out_channels))\n else:\n conv_layers.extend(_get_block(_in_channels=in_channels, _out_channels=out_channels, apply_norm=False))\n\n self.encoder_layers = nn.Sequential(\n *conv_layers,\n nn.Flatten(),\n nn.Linear(\n in_features=((img_size // (2 ** len(channel_out))) ** 2) * channel_out[-1],\n out_features=linear_features\n ),\n )\n\n self.mean_out = nn.Linear(in_features=linear_features, out_features=latent_dim)\n self.variance_out = nn.Linear(in_features=linear_features, out_features=latent_dim)\n\n def forward(self, img: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n x = self.encoder_layers(img)\n mean = self.mean_out(x)\n var = self.variance_out(x)\n return mean, var\n\n\nclass Discriminator(nn.Module):\n \"\"\"Conv parameters are manually calculated using formula in pytorch Conv2D docs to keep output shape same.\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n device: torch.device,\n enable_dropout: bool = False,\n dropout_rate: float = 0.5,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(Discriminator, self).__init__()\n\n dropout_layer = list()\n if enable_dropout:\n dropout_layer = [nn.Dropout(p=dropout_rate)]\n\n def _get_conv_layer(\n _in_channels: int,\n _out_channels: int,\n _stride: int,\n _padding: int,\n _dilation: int,\n apply_bias: bool = False\n ) -> list:\n conv_layer = nn.Conv2d(\n in_channels=_in_channels, out_channels=_out_channels,\n kernel_size=(4, 4),\n padding=(_padding, _padding),\n stride=(_stride, _stride),\n dilation=(_dilation, _dilation),\n device=device,\n bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n def _get_block(\n _in_channels: int,\n _out_channels: int,\n _stride: int,\n _padding: int,\n _dilation: int,\n apply_norm: bool = True,\n ) -> nn.Sequential:\n norm_layer = list()\n if apply_norm:\n # norm_layer = [nn.BatchNorm2d(num_features=_out_channels, device=device)]\n norm_layer = [nn.InstanceNorm2d(num_features=_out_channels, affine=False, device=device)]\n\n return nn.Sequential(\n *_get_conv_layer(\n _in_channels=_in_channels, _out_channels=_out_channels,\n _stride=_stride, _padding=_padding, _dilation=_dilation,\n ),\n *norm_layer,\n nn.LeakyReLU(negative_slope=0.2),\n *dropout_layer,\n )\n\n channel_in = [3 * 2, 64, 128, 256]\n # channel_in = [3 + num_classes, 64, 128, 256]\n channel_out = [64, 128, 256, 512]\n stride = [2, 2, 2, 1]\n padding = [3, 3, 3, 3]\n dilation = [2, 2, 2, 2]\n\n self.disc_multiscale_features = list()\n for idx, (in_channels, out_channels, stride, padding, dilation) in enumerate(zip(\n channel_in, channel_out, stride, padding, dilation\n )):\n if idx != 0:\n self.disc_multiscale_features.append(\n _get_block(\n _in_channels=in_channels,\n _out_channels=out_channels,\n _stride=stride,\n _padding=padding,\n _dilation=dilation,\n )\n )\n else:\n self.disc_multiscale_features.append(\n _get_block(\n _in_channels=in_channels,\n _out_channels=out_channels,\n _stride=stride,\n _padding=padding,\n _dilation=dilation,\n apply_norm=False,\n )\n )\n\n self.disc_out_layer = nn.Conv2d(\n in_channels=512, out_channels=1, kernel_size=(4, 4), padding=(3, 3), stride=(2, 2), dilation=(2, 2)\n )\n\n def forward(self, img1: torch.Tensor, img2: torch.Tensor) -> Tuple[torch.Tensor, list]:\n x = torch.cat([img1, img2], dim=1)\n multiscale_features = list()\n for layer in self.disc_multiscale_features:\n x = layer(x)\n multiscale_features.append(x)\n x = self.disc_out_layer(x)\n return x, multiscale_features\n\n\nclass SPADE(nn.Module):\n def __init__(\n self,\n out_channels: int,\n num_classes: int,\n ) -> None:\n super(SPADE, self).__init__()\n\n embed_dim = 128\n self.normalizer = nn.InstanceNorm2d(num_features=embed_dim, affine=False)\n\n self.embedding_conv = nn.Sequential(\n nn.Conv2d(in_channels=num_classes, out_channels=embed_dim, kernel_size=(3, 3), padding=(1, 1)),\n nn.ReLU(),\n )\n self.gamma_conv = nn.Conv2d(\n in_channels=embed_dim, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)\n )\n\n self.beta_conv = nn.Conv2d(\n in_channels=embed_dim, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)\n )\n\n def forward(self, packed_tensor: torch.Tensor) -> torch.Tensor:\n prev_input, onehot_mask = packed_tensor\n normalized = self.normalizer(prev_input)\n mask = functional.interpolate(onehot_mask.float(), size=prev_input.shape[2:], mode='nearest')\n x = self.embedding_conv(mask)\n gamma = self.gamma_conv(x)\n beta = self.beta_conv(x)\n output = gamma * normalized + beta\n return output\n\n\nclass SPADEResBlock(nn.Module):\n def __init__(\n self,\n in_filters: int,\n out_filters: int,\n num_classes: int,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(SPADEResBlock, self).__init__()\n self.learned_skip = (in_filters != out_filters)\n min_filters = min(in_filters, out_filters)\n\n def _get_conv_layer(in_channels: int, out_channels: int, apply_bias: bool = True) -> list:\n conv_layer = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=(3, 3),\n padding=(1, 1),\n bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n self.spade_res_block_1 = nn.Sequential(\n SPADE(num_classes=num_classes, out_channels=in_filters),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=in_filters, out_channels=min_filters),\n )\n\n self.spade_res_block_2 = nn.Sequential(\n SPADE(out_channels=min_filters, num_classes=num_classes),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=min_filters, out_channels=out_filters),\n )\n\n self.learned_skip_path = nn.Sequential(\n SPADE(out_channels=in_filters, num_classes=num_classes),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=in_filters, out_channels=out_filters, apply_bias=True),\n )\n\n def forward(self, packed_tensor: torch.Tensor) -> torch.Tensor:\n x, onehot_mask = packed_tensor\n x_skip = x\n x = self.spade_res_block_1((x, onehot_mask))\n x = self.spade_res_block_2((x, onehot_mask))\n if self.learned_skip:\n x_skip = self.learned_skip_path((x_skip, onehot_mask))\n x = x + x_skip\n return x\n\n\nclass GaussianSampler(nn.Module):\n def __init__(\n self,\n batch_size: int,\n latent_dim: int,\n device: torch.device,\n ) -> None:\n super(GaussianSampler, self).__init__()\n self.batch_size = batch_size\n self.latent_dim = latent_dim\n self.device = device\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n mean, variance = x\n epsilon = torch.normal(mean=0.0, std=1.0, size=(self.batch_size, self.latent_dim), device=self.device)\n noise_input = mean + torch.exp(0.5 * variance) * epsilon\n return noise_input\n\n\nclass Generator(nn.Module):\n def __init__(\n self,\n latent_dim: int,\n num_classes: int,\n device: torch.device,\n ) -> None:\n super(Generator, self).__init__()\n\n def _get_res_block(_in_filters: int, _out_filters: int) -> nn.Sequential:\n return nn.Sequential(\n SPADEResBlock(\n in_filters=_in_filters, out_filters=_out_filters, num_classes=num_classes\n ).to(device=device),\n nn.Upsample(scale_factor=(2, 2)),\n )\n\n self.initial_shape = 1024\n\n filter_list = [self.initial_shape, 1024, 512, 256, 128, 128, 64]\n self.filter_list_len = len(filter_list)\n\n self.generator_middle_layers = list()\n for i in range(self.filter_list_len - 1):\n self.generator_middle_layers.append(\n _get_res_block(_in_filters=filter_list[i], _out_filters=filter_list[i+1])\n )\n\n self.generator_input_layers = nn.Sequential(\n nn.Linear(in_features=latent_dim, out_features=128 * 128),\n )\n\n # Change conv layer stride for custom image size.\n self.generator_output_layers = nn.Sequential(\n nn.LeakyReLU(negative_slope=0.2),\n nn.Conv2d(in_channels=filter_list[-1], out_channels=3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.Tanh(),\n )\n\n def forward(self, latent_vector: torch.Tensor, onehot_mask: torch.Tensor) -> torch.Tensor:\n x = self.generator_input_layers(latent_vector)\n x = x.view(-1, self.initial_shape, 4, 4)\n for mid_layer in self.generator_middle_layers:\n x = mid_layer((x, onehot_mask))\n x = self.generator_output_layers(x)\n return x\n\n\nclass VggLoss(nn.Module):\n \"\"\"Use vgg intermediate layers to calculate perceptual loss.\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n debug: bool = False,\n ) -> None:\n super(VggLoss, self).__init__()\n model = models.vgg19(pretrained=True).to(device=device)\n model.eval()\n for param in model.parameters():\n param.requires_grad = False\n\n if debug:\n print(model.features)\n train_nodes, eval_nodes = get_graph_node_names(model)\n print('train_nodes')\n print(train_nodes)\n print('eval_nodes')\n print(eval_nodes)\n\n return_nodes = {\n 'features.1': 'out_0',\n 'features.6': 'out_1',\n 'features.11': 'out_2',\n 'features.20': 'out_3',\n 'features.29': 'out_4',\n }\n self.feature_count = len(return_nodes)\n\n self.feature_extractor = create_feature_extractor(\n model,\n return_nodes=return_nodes\n )\n\n self.layer_weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> float:\n x_out = self.feature_extractor(x)\n y_out = self.feature_extractor(y)\n loss = 0.0\n for i in range(self.feature_count):\n loss += self.layer_weights[i] * functional.l1_loss(x_out[f'out_{i}'], y_out[f'out_{i}'])\n return loss\n\n\nclass GauganDataset(Dataset):\n \"\"\"Real images should be jpg and Segmentation image should be in png format.\n \"\"\"\n\n def __init__(\n self,\n root_dir: str,\n image_size: int,\n image_channels: int,\n num_classes: int,\n ) -> None:\n super(GauganDataset, self).__init__()\n\n self.num_classes = num_classes + 1\n\n self.root_dir = root_dir\n self.image_labels_files_list = list()\n for root, dirs, files in os.walk(root_dir):\n for names in files:\n if names.endswith('.jpg'):\n base_name = names.split('.')[0]\n self.image_labels_files_list.append(\n (\n os.path.join(root, f'{base_name}.jpg'),\n os.path.join(root, f'{base_name}.png'),\n )\n )\n\n self.image_files_list_len = len(self.image_labels_files_list)\n\n self.img_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5 for _ in range(image_channels)],\n std=[0.5 for _ in range(image_channels)],\n )\n ])\n\n self.segmentation_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5 for _ in range(image_channels)],\n std=[0.5 for _ in range(image_channels)],\n )\n ])\n\n self.segmentation_label_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.PILToTensor(),\n ])\n\n def __len__(self) -> int:\n return self.image_files_list_len\n\n def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n image_path, segmentation_path = self.image_labels_files_list[idx]\n\n image = Image.open(image_path)\n image = image.convert('RGB')\n image = self.img_transform(image)\n\n segmentation_image_original = Image.open(segmentation_path)\n\n segmentation_image = segmentation_image_original.convert('RGB')\n segmentation_image = self.segmentation_transform(segmentation_image)\n\n segmentation_label = segmentation_image_original.convert('P')\n segmentation_label = self.segmentation_label_transform(segmentation_label)\n segmentation_label = functional.one_hot(segmentation_label.long(), num_classes=self.num_classes)\n segmentation_label = torch.permute(segmentation_label.squeeze(), (2, 0, 1))\n\n return image, segmentation_image, segmentation_label\n\n\ndef feature_matching_loss(real_preds: torch.Tensor, fake_preds: torch.Tensor) -> float:\n pred_count_weight = 1 / len(real_preds)\n _feature_matching_loss = 0.0\n for real_features, fake_features in zip(real_preds, fake_preds):\n _feature_matching_loss += functional.l1_loss(real_features, fake_features) * pred_count_weight\n return _feature_matching_loss\n\n\nclass Trainer:\n def __init__(\n self,\n num_classes: int,\n root_dir='',\n device: str = None,\n checkpoint_path: str = None,\n save_checkpoint_every: int = 20,\n num_workers: int = 0,\n batch_size: int = 3,\n image_size: int = 256,\n image_channels: int = 3,\n num_epochs: int = 10000,\n latent_dim: int = 256,\n gen_learning_rate: float = 0.0001,\n disc_learning_rate: float = 0.0004,\n disc_iterations: int = 1,\n debug: bool = False,\n ) -> None:\n\n torch.autograd.set_detect_anomaly(False)\n torch.autograd.profiler.emit_nvtx(enabled=False)\n torch.autograd.profiler.profile(enabled=False)\n cudnn.benchmark = True\n\n if debug:\n torch.autograd.set_detect_anomaly(True)\n torch.autograd.profiler.emit_nvtx(enabled=True)\n torch.autograd.profiler.profile(enabled=True)\n cudnn.benchmark = False\n\n self.device = torch.device(device) if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.save_every = save_checkpoint_every\n self.disc_iterations = disc_iterations\n\n gan_dataset = GauganDataset(\n root_dir=root_dir,\n image_size=image_size,\n image_channels=image_channels,\n num_classes=num_classes,\n )\n self.train_loader = DataLoader(\n gan_dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n drop_last=True,\n )\n\n self.vgg_model = VggLoss(device=self.device)\n\n self.image_encoder = ImageEncoder(img_size=image_size, latent_dim=latent_dim)\n self.noise_sampler = GaussianSampler(batch_size=batch_size, latent_dim=latent_dim, device=self.device)\n self.generator = Generator(latent_dim=latent_dim, num_classes=num_classes + 1, device=self.device)\n self.discriminator = Discriminator(num_classes=num_classes + 1, device=self.device)\n\n self.image_encoder.to(device=self.device)\n self.noise_sampler.to(device=self.device)\n self.generator.to(device=self.device)\n self.discriminator.to(device=self.device)\n\n def _initialize_weights(model, mean=0.0, std=0.02):\n for m in model.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n # nn.init.normal_(m.weight.data, mean=mean, std=std)\n nn.init.xavier_normal_(m.weight.data)\n # nn.init.kaiming_normal_(m.weight.data)\n\n _initialize_weights(self.image_encoder)\n _initialize_weights(self.generator)\n _initialize_weights(self.discriminator)\n\n encoder_generator_parameters = list(self.generator.parameters()) + list(self.image_encoder.parameters())\n self.gen_optimizer = optim.Adam(\n params=encoder_generator_parameters, lr=gen_learning_rate, betas=(0.0, 0.999)\n )\n\n self.disc_optimizer = optim.Adam(\n params=self.discriminator.parameters(), lr=disc_learning_rate, betas=(0.0, 0.999)\n )\n\n self.fixed_noise = torch.randn((batch_size, latent_dim), device=self.device)\n\n current_datetime = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self.writer_real = SummaryWriter(f'logs/real/{current_datetime}/')\n self.writer_fake = SummaryWriter(f'logs/fake/{current_datetime}/')\n self.step = 0\n\n self.start_epoch = 0\n pathlib.Path('checkpoints').mkdir(parents=True, exist_ok=True)\n if checkpoint_path is not None:\n self.load_checkpoint(checkpoint_path=checkpoint_path)\n\n def load_checkpoint(self, checkpoint_path: str) -> None:\n checkpoint = torch.load(checkpoint_path)\n self.generator.load_state_dict(checkpoint['generator_state_dict'])\n self.discriminator.load_state_dict(checkpoint['discriminator_state_dict'])\n self.gen_optimizer.load_state_dict(checkpoint['generator_optimizer_state_dict'])\n self.disc_optimizer.load_state_dict(checkpoint['discriminator_optimizer_state_dict'])\n self.start_epoch = checkpoint['epoch']\n\n def train(self) -> None:\n for epoch in range(self.start_epoch, self.num_epochs):\n with tqdm(self.train_loader) as tqdm_train_loader:\n for batch_idx, (real_image, segmentation_image, segmentation_label) in enumerate(tqdm_train_loader):\n real_image = real_image.to(self.device)\n segmentation_image = segmentation_image.to(self.device)\n segmentation_label = segmentation_label.to(self.device)\n\n # Train discriminator.\n for i in range(self.disc_iterations):\n mean, var = self.image_encoder(real_image)\n latent_vector = self.noise_sampler((mean, var))\n generated_image = self.generator(latent_vector, segmentation_label)\n\n fake_pred, fake_pred_multiscale_features = self.discriminator(\n segmentation_image, generated_image\n )\n real_pred, real_pred_multiscale_features = self.discriminator(\n segmentation_image, real_image\n )\n\n fake_pred = fake_pred.reshape(-1)\n real_pred = real_pred.reshape(-1)\n\n loss_real = -torch.mean(\n torch.min(real_pred - 1, torch.zeros_like(real_pred, requires_grad=False))\n )\n loss_fake = -torch.mean(\n torch.min(-fake_pred.detach() - 1, torch.zeros_like(fake_pred, requires_grad=False))\n )\n discriminator_loss = (loss_fake + loss_real) * 0.5\n\n self.disc_optimizer.zero_grad(set_to_none=True)\n discriminator_loss.backward()\n self.disc_optimizer.step()\n\n # Train generator\n fake_pred, fake_pred_multiscale_features = self.discriminator(segmentation_image, generated_image)\n real_pred, real_pred_multiscale_features = self.discriminator(segmentation_image, real_image)\n fake_pred = fake_pred.reshape(-1)\n\n loss_gen = -torch.mean(fake_pred)\n loss_kldiv = -0.5 * torch.sum(1 + var - mean.pow(2) - var.exp())\n loss_vgg = self.vgg_model(real_image, generated_image)\n loss_features = feature_matching_loss(\n real_pred_multiscale_features,\n fake_pred_multiscale_features,\n )\n\n # generator_loss = loss_gen + 0.1 * loss_kldiv + 0.1 * loss_vgg + 10 * loss_features\n generator_loss = loss_gen + 0.1 * loss_kldiv + 10 * loss_vgg + 10 * loss_features\n\n self.gen_optimizer.zero_grad(set_to_none=True)\n generator_loss.backward()\n self.gen_optimizer.step()\n\n tqdm_train_loader.set_description(\n f'LOSS, disc: {discriminator_loss:.2f}, '\n f'generator: {generator_loss:.2f}, '\n f'gan: {loss_gen:.2f}, '\n f'kl: {loss_kldiv:.2f}, '\n f'vgg: {loss_vgg:.2f}, '\n f'features: {loss_features:.2f}'\n )\n\n if batch_idx % self.save_every == self.save_every - 1:\n self.generator.eval()\n self.discriminator.eval()\n\n with torch.no_grad():\n fake = self.generator(self.fixed_noise, segmentation_label)\n img_grid_real = torchvision.utils.make_grid(real_image[:self.batch_size], normalize=True)\n img_grid_fake = torchvision.utils.make_grid(fake[:self.batch_size], normalize=True)\n self.writer_real.add_image(\"Real\", img_grid_real, global_step=self.step)\n self.writer_fake.add_image(\"Fake\", img_grid_fake, global_step=self.step)\n self.step += 1\n\n torch.save({\n 'epoch': epoch,\n 'generator_state_dict': self.generator.state_dict(),\n 'discriminator_state_dict': self.discriminator.state_dict(),\n 'generator_optimizer_state_dict': self.gen_optimizer.state_dict(),\n 'discriminator_optimizer_state_dict': self.disc_optimizer.state_dict(),\n }, f'checkpoints/checkpoint_{epoch}.pt')\n\n self.discriminator.train()\n self.generator.train()\n\n\nif __name__ == '__main__':\n trainer = Trainer(\n root_dir=r'C:\\staging\\gaugan_data\\base',\n num_classes=12,\n # checkpoint_path='checkpoints/checkpoint_19.pt'\n )\n trainer.train()\n" ]
[ [ "torch.nn.utils.spectral_norm.spectral_norm", "torch.utils.data.DataLoader", "torch.no_grad", "torch.nn.Upsample", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.utils.tensorboard.SummaryWriter", "torch.autograd.profiler.profile", "torch.cat", "torch.nn.Dropout", "torch.nn.init.xavier_normal_", "torch.randn", "torch.optim.Adam", "torch.autograd.profiler.emit_nvtx", "torch.device", "torch.mean", "torch.load", "torch.normal", "torch.nn.Linear", "torch.nn.Flatten", "torch.nn.functional.l1_loss", "torch.zeros_like", "torch.nn.Tanh", "torch.exp", "torch.autograd.set_detect_anomaly", "torch.nn.ReLU", "torch.nn.LeakyReLU" ] ]
vimalromeo/pandas
[ "9444dce96954c546333d5aecc92a06c3bfd19aa5" ]
[ "pandas/tests/io/parser/dialect.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nTests that dialects are properly handled during parsing\nfor all of the parsers defined in parsers.py\n\"\"\"\n\nimport csv\n\nfrom pandas import DataFrame\nfrom pandas.compat import StringIO\nfrom pandas.errors import ParserWarning\n\nimport pandas.util.testing as tm\n\n\nclass DialectTests(object):\n\n def test_dialect(self):\n data = \"\"\"\\\nlabel1,label2,label3\nindex1,\"a,c,e\nindex2,b,d,f\n\"\"\"\n\n dia = csv.excel()\n dia.quoting = csv.QUOTE_NONE\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), dialect=dia)\n\n data = '''\\\nlabel1,label2,label3\nindex1,a,c,e\nindex2,b,d,f\n'''\n exp = self.read_csv(StringIO(data))\n exp.replace('a', '\"a', inplace=True)\n tm.assert_frame_equal(df, exp)\n\n def test_dialect_str(self):\n data = \"\"\"\\\nfruit:vegetable\napple:brocolli\npear:tomato\n\"\"\"\n exp = DataFrame({\n 'fruit': ['apple', 'pear'],\n 'vegetable': ['brocolli', 'tomato']\n })\n csv.register_dialect('mydialect', delimiter=':')\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), dialect='mydialect')\n\n tm.assert_frame_equal(df, exp)\n csv.unregister_dialect('mydialect')\n\n def test_invalid_dialect(self):\n class InvalidDialect(object):\n pass\n\n data = 'a\\n1'\n msg = 'Invalid dialect'\n\n with tm.assert_raises_regex(ValueError, msg):\n self.read_csv(StringIO(data), dialect=InvalidDialect)\n\n def test_dialect_conflict(self):\n data = 'a,b\\n1,2'\n dialect = 'excel'\n exp = DataFrame({'a': [1], 'b': [2]})\n\n with tm.assert_produces_warning(None):\n df = self.read_csv(StringIO(data), delimiter=',', dialect=dialect)\n tm.assert_frame_equal(df, exp)\n\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), delimiter='.', dialect=dialect)\n tm.assert_frame_equal(df, exp)\n" ]
[ [ "pandas.util.testing.assert_raises_regex", "pandas.DataFrame", "pandas.compat.StringIO", "pandas.util.testing.assert_produces_warning", "pandas.util.testing.assert_frame_equal" ] ]
luijkr/pandas
[ "bc29dfb5bf4c82f8d616857c2316fc8f17d8f2a5" ]
[ "pandas/tests/window/test_rolling.py" ]
[ "from datetime import datetime, timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import UnsupportedFunctionCall\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n MultiIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.window import Rolling\n\n\ndef test_doc_string():\n\n df = DataFrame({\"B\": [0, 1, 2, np.nan, 4]})\n df\n df.rolling(2).sum()\n df.rolling(2, min_periods=1).sum()\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n\n c = frame_or_series(range(5)).rolling\n\n # valid\n c(0)\n c(window=2)\n c(window=2, min_periods=1)\n c(window=2, min_periods=1, center=True)\n c(window=2, min_periods=1, center=False)\n\n # GH 13383\n\n msg = \"window must be an integer 0 or greater\"\n\n with pytest.raises(ValueError, match=msg):\n c(-1)\n\n\[email protected](\"w\", [2.0, \"foo\", np.array([2])])\ndef test_invalid_constructor(frame_or_series, w):\n # not valid\n\n c = frame_or_series(range(5)).rolling\n\n msg = (\n \"window must be an integer|\"\n \"passed window foo is not compatible with a datetimelike index\"\n )\n with pytest.raises(ValueError, match=msg):\n c(window=w)\n\n msg = \"min_periods must be an integer\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=w)\n\n msg = \"center must be a boolean\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=1, center=w)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3)])\ndef test_constructor_with_timedelta_window(window):\n # GH 15440\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)}, index=date_range(\"2015-12-24\", periods=n, freq=\"D\")\n )\n expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))\n\n result = df.rolling(window=window).sum()\n expected = DataFrame(\n {\"value\": expected_data},\n index=date_range(\"2015-12-24\", periods=n, freq=\"D\"),\n )\n tm.assert_frame_equal(result, expected)\n expected = df.rolling(\"3D\").sum()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3), \"3D\"])\ndef test_constructor_timedelta_window_and_minperiods(window, raw):\n # GH 15305\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)}, index=date_range(\"2017-08-08\", periods=n, freq=\"D\")\n )\n expected = DataFrame(\n {\"value\": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},\n index=date_range(\"2017-08-08\", periods=n, freq=\"D\"),\n )\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\n result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)\n tm.assert_frame_equal(result_roll_sum, expected)\n tm.assert_frame_equal(result_roll_generic, expected)\n\n\[email protected](\"method\", [\"std\", \"mean\", \"sum\", \"max\", \"min\", \"var\"])\ndef test_numpy_compat(method):\n # see gh-12811\n r = Rolling(Series([2, 4, 6]), window=2)\n\n msg = \"numpy operations are not valid with window objects\"\n\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(1, 2, 3)\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(dtype=np.float64)\n\n\ndef test_closed_fixed(closed, arithmetic_win_operators):\n # GH 34315\n func_name = arithmetic_win_operators\n df_fixed = DataFrame({\"A\": [0, 1, 2, 3, 4]})\n df_time = DataFrame({\"A\": [0, 1, 2, 3, 4]}, index=date_range(\"2020\", periods=5))\n\n result = getattr(df_fixed.rolling(2, closed=closed, min_periods=1), func_name)()\n expected = getattr(df_time.rolling(\"2D\", closed=closed), func_name)().reset_index(\n drop=True\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_closed_fixed_binary_col():\n # GH 34315\n data = [0, 1, 1, 0, 0, 1, 0, 1]\n df = DataFrame(\n {\"binary_col\": data},\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(data)),\n )\n\n rolling = df.rolling(window=len(df), closed=\"left\", min_periods=1)\n result = rolling.mean()\n expected = DataFrame(\n [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571],\n columns=[\"binary_col\"],\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(data)),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"closed\", [\"neither\", \"left\"])\ndef test_closed_empty(closed, arithmetic_win_operators):\n # GH 26005\n func_name = arithmetic_win_operators\n ser = Series(data=np.arange(5), index=date_range(\"2000\", periods=5, freq=\"2D\"))\n roll = ser.rolling(\"1D\", closed=closed)\n\n result = getattr(roll, func_name)()\n expected = Series([np.nan] * 5, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry(func):\n # GH24718\n ser = Series(data=[2], index=date_range(\"2000\", periods=1))\n result = getattr(ser.rolling(\"10D\", closed=\"left\"), func)()\n tm.assert_series_equal(result, Series([np.nan], index=ser.index))\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry_groupby(func):\n # GH24718\n ser = DataFrame(\n data={\"A\": [1, 1, 2], \"B\": [3, 2, 1]}, index=date_range(\"2000\", periods=3)\n )\n result = getattr(\n ser.groupby(\"A\", sort=False)[\"B\"].rolling(\"10D\", closed=\"left\"), func\n )()\n exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=(\"A\", None))\n expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"input_dtype\", [\"int\", \"float\"])\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"both\", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"min\", \"neither\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"left\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", \"right\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"both\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"neither\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"max\", \"left\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n ],\n)\ndef test_closed_min_max_datetime(input_dtype, func, closed, expected):\n # see gh-21704\n ser = Series(\n data=np.arange(10).astype(input_dtype), index=date_range(\"2000\", periods=10)\n )\n\n result = getattr(ser.rolling(\"3D\", closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_uneven():\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n\n # uneven\n ser = ser.drop(index=ser.index[[1, 5]])\n result = ser.rolling(\"3D\", closed=\"left\").min()\n expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"both\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"min\", \"neither\", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"left\", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"max\", \"right\", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),\n (\"max\", \"both\", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),\n (\"max\", \"neither\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),\n (\"max\", \"left\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),\n ],\n)\ndef test_closed_min_max_minp(func, closed, expected):\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n ser[ser.index[-3:]] = np.nan\n result = getattr(ser.rolling(\"3D\", min_periods=2, closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"closed,expected\",\n [\n (\"right\", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"both\", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"neither\", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"left\", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),\n ],\n)\ndef test_closed_median_quantile(closed, expected):\n # GH 26005\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n roll = ser.rolling(\"3D\", closed=closed)\n expected = Series(expected, index=ser.index)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.5)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"roller\", [\"1s\", 1])\ndef tests_empty_df_rolling(roller):\n # GH 15819 Verifies that datetime and integer rolling windows can be\n # applied to empty DataFrames\n expected = DataFrame()\n result = DataFrame().rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer rolling windows can be applied to\n # empty DataFrames with datetime index\n expected = DataFrame(index=DatetimeIndex([]))\n result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_window_median_quantile():\n # GH 26005\n expected = Series([np.nan, np.nan, np.nan])\n roll = Series(np.arange(3)).rolling(0)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero():\n # https://github.com/pandas-dev/pandas/pull/18921\n # minp=0\n x = Series([np.nan])\n result = x.rolling(1, min_periods=0).sum()\n expected = Series([0.0])\n tm.assert_series_equal(result, expected)\n\n # minp=1\n result = x.rolling(1, min_periods=1).sum()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero_variable():\n # https://github.com/pandas-dev/pandas/pull/18921\n x = Series(\n [np.nan] * 4,\n index=DatetimeIndex([\"2017-01-01\", \"2017-01-04\", \"2017-01-06\", \"2017-01-07\"]),\n )\n result = x.rolling(Timedelta(\"2d\"), min_periods=0).sum()\n expected = Series(0.0, index=x.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_multi_index_names():\n\n # GH 16789, 16825\n cols = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\", \"E\"]], names=[\"1\", \"2\"])\n df = DataFrame(np.ones((10, 6)), columns=cols)\n result = df.rolling(3).cov()\n\n tm.assert_index_equal(result.columns, df.columns)\n assert result.index.names == [None, \"1\", \"2\"]\n\n\ndef test_rolling_axis_sum(axis_frame):\n # see gh-23372.\n df = DataFrame(np.ones((10, 20)))\n axis = df._get_axis_number(axis_frame)\n\n if axis == 0:\n expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})\n else:\n # axis == 1\n expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)\n\n result = df.rolling(3, axis=axis_frame).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_count(axis_frame):\n # see gh-26055\n df = DataFrame({\"x\": range(3), \"y\": range(3)})\n\n axis = df._get_axis_number(axis_frame)\n\n if axis in [0, \"index\"]:\n expected = DataFrame({\"x\": [1.0, 2.0, 2.0], \"y\": [1.0, 2.0, 2.0]})\n else:\n expected = DataFrame({\"x\": [1.0, 1.0, 1.0], \"y\": [2.0, 2.0, 2.0]})\n\n result = df.rolling(2, axis=axis_frame, min_periods=0).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_readonly_array():\n # GH-27766\n arr = np.array([1, 3, np.nan, 3, 5])\n arr.setflags(write=False)\n result = Series(arr).rolling(2).mean()\n expected = Series([np.nan, 2, np.nan, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_datetime(axis_frame, tz_naive_fixture):\n # GH-28192\n tz = tz_naive_fixture\n df = DataFrame(\n {i: [1] * 2 for i in date_range(\"2019-8-01\", \"2019-08-03\", freq=\"D\", tz=tz)}\n )\n if axis_frame in [0, \"index\"]:\n result = df.T.rolling(\"2D\", axis=axis_frame).sum().T\n else:\n result = df.rolling(\"2D\", axis=axis_frame).sum()\n expected = DataFrame(\n {\n **{\n i: [1.0] * 2\n for i in date_range(\"2019-8-01\", periods=1, freq=\"D\", tz=tz)\n },\n **{\n i: [2.0] * 2\n for i in date_range(\"2019-8-02\", \"2019-8-03\", freq=\"D\", tz=tz)\n },\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_window_as_string():\n # see gh-22590\n date_today = datetime.now()\n days = date_range(date_today, date_today + timedelta(365), freq=\"D\")\n\n npr = np.random.RandomState(seed=421)\n\n data = npr.randint(1, high=100, size=len(days))\n df = DataFrame({\"DateCol\": days, \"metric\": data})\n\n df.set_index(\"DateCol\", inplace=True)\n result = df.rolling(window=\"21D\", min_periods=2, closed=\"left\")[\"metric\"].agg(\"max\")\n\n expData = (\n [np.nan] * 2\n + [88.0] * 16\n + [97.0] * 9\n + [98.0]\n + [99.0] * 21\n + [95.0] * 16\n + [93.0] * 5\n + [89.0] * 5\n + [96.0] * 21\n + [94.0] * 14\n + [90.0] * 13\n + [88.0] * 2\n + [90.0] * 9\n + [96.0] * 21\n + [95.0] * 6\n + [91.0]\n + [87.0] * 6\n + [92.0] * 21\n + [83.0] * 2\n + [86.0] * 10\n + [87.0] * 5\n + [98.0] * 21\n + [97.0] * 14\n + [93.0] * 7\n + [87.0] * 4\n + [86.0] * 4\n + [95.0] * 21\n + [85.0] * 14\n + [83.0] * 2\n + [76.0] * 5\n + [81.0] * 2\n + [98.0] * 21\n + [95.0] * 14\n + [91.0] * 7\n + [86.0]\n + [93.0] * 3\n + [95.0] * 20\n )\n\n expected = Series(\n expData, index=days.rename(\"DateCol\")._with_freq(None), name=\"metric\"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_min_periods1():\n # GH#6795\n df = DataFrame([0, 1, 2, 1, 0], columns=[\"a\"])\n result = df[\"a\"].rolling(3, center=True, min_periods=1).max()\n expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_count_with_min_periods(frame_or_series):\n # GH 26996\n result = frame_or_series(range(5)).rolling(3, min_periods=3).count()\n expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0])\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_count_default_min_periods_with_null_values(frame_or_series):\n # GH 26996\n values = [1, 2, 3, np.nan, 4, 5, 6]\n expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]\n\n # GH 31302\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = frame_or_series(values).rolling(3).count()\n expected = frame_or_series(expected_counts)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"df,expected,window,min_periods\",\n [\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n None,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 2,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 0,\n ),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, None),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, 1),\n (DataFrame(), [({}, [])], 2, None),\n (\n DataFrame({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}),\n [\n ({\"A\": [1.0], \"B\": [np.nan]}, [0]),\n ({\"A\": [1, np.nan], \"B\": [np.nan, 5]}, [0, 1]),\n ({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n 2,\n ),\n ],\n)\ndef test_iter_rolling_dataframe(df, expected, window, min_periods):\n # GH 11704\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, df.rolling(window, min_periods=min_periods)\n ):\n tm.assert_frame_equal(actual, expected)\n\n\[email protected](\n \"expected,window\",\n [\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n \"2D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n \"3D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n \"1D\",\n ),\n ],\n)\ndef test_iter_rolling_on_dataframe(expected, window):\n # GH 11704\n df = DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5],\n \"B\": [4, 5, 6, 7, 8],\n \"C\": date_range(start=\"2016-01-01\", periods=5, freq=\"D\"),\n }\n )\n\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n for (expected, actual) in zip(expected, df.rolling(window, on=\"C\")):\n tm.assert_frame_equal(actual, expected)\n\n\[email protected](\n \"ser,expected,window, min_periods\",\n [\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n None,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n 1,\n ),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], 2, 1),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], 2, 2),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1),\n (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0),\n (Series([], dtype=\"int64\"), [], 2, 1),\n ],\n)\ndef test_iter_rolling_series(ser, expected, window, min_periods):\n # GH 11704\n expected = [Series(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, ser.rolling(window, min_periods=min_periods)\n ):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"expected,expected_index,window\",\n [\n (\n [[0], [1], [2], [3], [4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-02\", periods=1, freq=\"D\"),\n date_range(\"2020-01-03\", periods=1, freq=\"D\"),\n date_range(\"2020-01-04\", periods=1, freq=\"D\"),\n date_range(\"2020-01-05\", periods=1, freq=\"D\"),\n ],\n \"1D\",\n ),\n (\n [[0], [0, 1], [1, 2], [2, 3], [3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-02\", periods=2, freq=\"D\"),\n date_range(\"2020-01-03\", periods=2, freq=\"D\"),\n date_range(\"2020-01-04\", periods=2, freq=\"D\"),\n ],\n \"2D\",\n ),\n (\n [[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-01\", periods=3, freq=\"D\"),\n date_range(\"2020-01-02\", periods=3, freq=\"D\"),\n date_range(\"2020-01-03\", periods=3, freq=\"D\"),\n ],\n \"3D\",\n ),\n ],\n)\ndef test_iter_rolling_datetime(expected, expected_index, window):\n # GH 11704\n ser = Series(range(5), index=date_range(start=\"2020-01-01\", periods=5, freq=\"D\"))\n\n expected = [\n Series(values, index=idx) for (values, idx) in zip(expected, expected_index)\n ]\n\n for (expected, actual) in zip(expected, ser.rolling(window)):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"grouping,_index\",\n [\n (\n {\"level\": 0},\n MultiIndex.from_tuples(\n [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]\n ),\n ),\n (\n {\"by\": \"X\"},\n MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=[\"X\", None]\n ),\n ),\n ],\n)\ndef test_rolling_positional_argument(grouping, _index, raw):\n # GH 34605\n\n def scaled_sum(*args):\n if len(args) < 2:\n raise ValueError(\"The function needs two arguments\")\n array, scale = args\n return array.sum() / scale\n\n df = DataFrame(data={\"X\": range(5)}, index=[0, 0, 1, 1, 1])\n\n expected = DataFrame(data={\"X\": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index)\n result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"add\", [0.0, 2.0])\ndef test_rolling_numerical_accuracy_kahan_mean(add):\n # GH: 36031 implementing kahan summation\n df = DataFrame(\n {\"A\": [3002399751580331.0 + add, -0.0, -0.0]},\n index=[\n Timestamp(\"19700101 09:00:00\"),\n Timestamp(\"19700101 09:00:03\"),\n Timestamp(\"19700101 09:00:06\"),\n ],\n )\n result = (\n df.resample(\"1s\").ffill().rolling(\"3s\", closed=\"left\", min_periods=3).mean()\n )\n dates = date_range(\"19700101 09:00:00\", periods=7, freq=\"S\")\n expected = DataFrame(\n {\n \"A\": [\n np.nan,\n np.nan,\n np.nan,\n 3002399751580330.5,\n 2001599834386887.25,\n 1000799917193443.625,\n 0.0,\n ]\n },\n index=dates,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_kahan_sum():\n # GH: 13254\n df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=[\"x\"])\n result = df[\"x\"].rolling(3).sum()\n expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name=\"x\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_jump():\n # GH: 32761\n index = date_range(start=\"2020-01-01\", end=\"2020-01-02\", freq=\"60s\").append(\n DatetimeIndex([\"2020-01-03\"])\n )\n data = np.random.rand(len(index))\n\n df = DataFrame({\"data\": data}, index=index)\n result = df.rolling(\"60s\").mean()\n tm.assert_frame_equal(result, df[[\"data\"]])\n\n\ndef test_rolling_numerical_accuracy_small_values():\n # GH: 10319\n s = Series(\n data=[0.00012456, 0.0003, -0.0, -0.0],\n index=date_range(\"1999-02-03\", \"1999-02-06\"),\n )\n result = s.rolling(1).mean()\n tm.assert_series_equal(result, s)\n\n\ndef test_rolling_numerical_too_large_numbers():\n # GH: 11645\n dates = date_range(\"2015-01-01\", periods=10, freq=\"D\")\n ds = Series(data=range(10), index=dates, dtype=np.float64)\n ds[2] = -9e33\n result = ds.rolling(5).mean()\n expected = Series(\n [np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 5.0, 6.0, 7.0],\n index=dates,\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"func\", \"value\"),\n [(\"sum\", 2.0), (\"max\", 1.0), (\"min\", 1.0), (\"mean\", 1.0), (\"median\", 1.0)],\n)\ndef test_rolling_mixed_dtypes_axis_1(func, value):\n # GH: 20649\n df = DataFrame(1, index=[1, 2], columns=[\"a\", \"b\", \"c\"])\n df[\"c\"] = 1.0\n result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)()\n expected = DataFrame(\n {\"a\": [1.0, 1.0], \"b\": [value, value], \"c\": [value, value]}, index=[1, 2]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_one_with_nan():\n # GH: 35596\n df = DataFrame(\n [\n [0, 1, 2, 4, np.nan, np.nan, np.nan],\n [0, 1, 2, np.nan, np.nan, np.nan, np.nan],\n [0, 2, 2, np.nan, 2, np.nan, 1],\n ]\n )\n result = df.rolling(window=7, min_periods=1, axis=\"columns\").sum()\n expected = DataFrame(\n [\n [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],\n [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],\n [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],\n ]\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"value\",\n [\"test\", to_datetime(\"2019-12-31\"), to_timedelta(\"1 days 06:05:01.00003\")],\n)\ndef test_rolling_axis_1_non_numeric_dtypes(value):\n # GH: 20649\n df = DataFrame({\"a\": [1, 2]})\n df[\"b\"] = value\n result = df.rolling(window=2, min_periods=1, axis=1).sum()\n expected = DataFrame({\"a\": [1.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_on_df_transposed():\n # GH: 32724\n df = DataFrame({\"A\": [1, None], \"B\": [4, 5], \"C\": [7, 8]})\n expected = DataFrame({\"A\": [1.0, np.nan], \"B\": [5.0, 5.0], \"C\": [11.0, 13.0]})\n result = df.rolling(min_periods=1, window=2, axis=1).sum()\n tm.assert_frame_equal(result, expected)\n\n result = df.T.rolling(min_periods=1, window=2).sum().T\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 08:08\", freq=\"T\"),\n \"2T\",\n ),\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 12:00\", freq=\"30T\"),\n \"1h\",\n ),\n ],\n)\[email protected](\n (\"func\", \"values\"),\n [\n (\"min\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"sum\", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),\n ],\n)\ndef test_rolling_period_index(index, window, func, values):\n # GH: 34225\n ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)\n result = getattr(ds.rolling(window, closed=\"left\"), func)()\n expected = Series(values, index=index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_sem(frame_or_series):\n # GH: 26476\n obj = frame_or_series([0, 1, 2])\n result = obj.rolling(2, min_periods=1).sem()\n if isinstance(result, DataFrame):\n result = Series(result[0].values)\n expected = Series([np.nan] + [0.707107] * 2)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"func\", \"third_value\", \"values\"),\n [\n (\"var\", 1, [5e33, 0, 0.5, 0.5, 2, 0]),\n (\"std\", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]),\n (\"var\", 2, [5e33, 0.5, 0, 0.5, 2, 0]),\n (\"std\", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]),\n ],\n)\ndef test_rolling_var_numerical_issues(func, third_value, values):\n # GH: 37051\n ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])\n result = getattr(ds.rolling(2), func)()\n expected = Series([np.nan] + values)\n tm.assert_series_equal(result, expected)\n\n\ndef test_timeoffset_as_window_parameter_for_corr():\n # GH: 28266\n exp = DataFrame(\n {\n \"B\": [\n np.nan,\n np.nan,\n 0.9999999999999998,\n -1.0,\n 1.0,\n -0.3273268353539892,\n 0.9999999999999998,\n 1.0,\n 0.9999999999999998,\n 1.0,\n ],\n \"A\": [\n np.nan,\n np.nan,\n -1.0,\n 1.0000000000000002,\n -0.3273268353539892,\n 0.9999999999999966,\n 1.0,\n 1.0000000000000002,\n 1.0,\n 1.0000000000000002,\n ],\n },\n index=MultiIndex.from_tuples(\n [\n (Timestamp(\"20130101 09:00:00\"), \"B\"),\n (Timestamp(\"20130101 09:00:00\"), \"A\"),\n (Timestamp(\"20130102 09:00:02\"), \"B\"),\n (Timestamp(\"20130102 09:00:02\"), \"A\"),\n (Timestamp(\"20130103 09:00:03\"), \"B\"),\n (Timestamp(\"20130103 09:00:03\"), \"A\"),\n (Timestamp(\"20130105 09:00:05\"), \"B\"),\n (Timestamp(\"20130105 09:00:05\"), \"A\"),\n (Timestamp(\"20130106 09:00:06\"), \"B\"),\n (Timestamp(\"20130106 09:00:06\"), \"A\"),\n ]\n ),\n )\n\n df = DataFrame(\n {\"B\": [0, 1, 2, 4, 3], \"A\": [7, 4, 6, 9, 3]},\n index=[\n Timestamp(\"20130101 09:00:00\"),\n Timestamp(\"20130102 09:00:02\"),\n Timestamp(\"20130103 09:00:03\"),\n Timestamp(\"20130105 09:00:05\"),\n Timestamp(\"20130106 09:00:06\"),\n ],\n )\n\n res = df.rolling(window=\"3d\").corr()\n\n tm.assert_frame_equal(exp, res)\n\n\[email protected](\"method\", [\"var\", \"sum\", \"mean\", \"skew\", \"kurt\", \"min\", \"max\"])\ndef test_rolling_decreasing_indices(method):\n \"\"\"\n Make sure that decreasing indices give the same results as increasing indices.\n\n GH 36933\n \"\"\"\n df = DataFrame({\"values\": np.arange(-15, 10) ** 2})\n df_reverse = DataFrame({\"values\": df[\"values\"][::-1]}, index=df.index[::-1])\n\n increasing = getattr(df.rolling(window=5), method)()\n decreasing = getattr(df_reverse.rolling(window=5), method)()\n\n assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12\n\n\[email protected](\n \"method,expected\",\n [\n (\n \"var\",\n [\n float(\"nan\"),\n 43.0,\n float(\"nan\"),\n 136.333333,\n 43.5,\n 94.966667,\n 182.0,\n 318.0,\n ],\n ),\n (\"mean\", [float(\"nan\"), 7.5, float(\"nan\"), 21.5, 6.0, 9.166667, 13.0, 17.5]),\n (\"sum\", [float(\"nan\"), 30.0, float(\"nan\"), 86.0, 30.0, 55.0, 91.0, 140.0]),\n (\n \"skew\",\n [\n float(\"nan\"),\n 0.709296,\n float(\"nan\"),\n 0.407073,\n 0.984656,\n 0.919184,\n 0.874674,\n 0.842418,\n ],\n ),\n (\n \"kurt\",\n [\n float(\"nan\"),\n -0.5916711736073559,\n float(\"nan\"),\n -1.0028993131317954,\n -0.06103844629409494,\n -0.254143227116194,\n -0.37362637362637585,\n -0.45439658241367054,\n ],\n ),\n ],\n)\ndef test_rolling_non_monotonic(method, expected):\n \"\"\"\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n \"\"\"\n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = i + 1\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=4, use_expanding=use_expanding)\n\n result = getattr(df.rolling(indexer), method)()\n expected = DataFrame({\"values\": expected})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [([0, 1, 2, 3, 4], 2), (date_range(\"2001-01-01\", freq=\"D\", periods=5), \"2D\")],\n)\ndef test_rolling_corr_timedelta_index(index, window):\n # GH: 31286\n x = Series([1, 2, 3, 4, 5], index=index)\n y = x.copy()\n x[0:2] = 0.0\n result = x.rolling(window).corr(y)\n expected = Series([np.nan, np.nan, 1, 1, 1], index=index)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_groupby_rolling_nan_included():\n # GH 35542\n data = {\"group\": [\"g1\", np.nan, \"g1\", \"g2\", np.nan], \"B\": [0, 1, 2, 3, 4]}\n df = DataFrame(data)\n result = df.groupby(\"group\", dropna=False).rolling(1, min_periods=1).mean()\n expected = DataFrame(\n {\"B\": [0.0, 2.0, 3.0, 1.0, 4.0]},\n # GH-38057 from_tuples puts the NaNs in the codes, result expects them\n # to be in the levels, at the moment\n # index=MultiIndex.from_tuples(\n # [(\"g1\", 0), (\"g1\", 2), (\"g2\", 3), (np.nan, 1), (np.nan, 4)],\n # names=[\"group\", None],\n # ),\n index=MultiIndex(\n [[\"g1\", \"g2\", np.nan], [0, 1, 2, 3, 4]],\n [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],\n names=[\"group\", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"method\", [\"skew\", \"kurt\"])\ndef test_rolling_skew_kurt_numerical_stability(method):\n # GH: 6929\n s = Series(np.random.rand(10))\n expected = getattr(s.rolling(3), method)()\n s = s + 50000\n result = getattr(s.rolling(3), method)()\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"method\", \"values\"),\n [\n (\"skew\", [2.0, 0.854563, 0.0, 1.999984]),\n (\"kurt\", [4.0, -1.289256, -1.2, 3.999946]),\n ],\n)\ndef test_rolling_skew_kurt_large_value_range(method, values):\n # GH: 37557\n s = Series([3000000, 1, 1, 2, 3, 4, 999])\n result = getattr(s.rolling(4), method)()\n expected = Series([np.nan] * 3 + values)\n tm.assert_series_equal(result, expected)\n" ]
[ [ "pandas.MultiIndex", "numpy.ones", "pandas.Series", "pandas._testing.assert_frame_equal", "numpy.random.RandomState", "pandas._testing.assert_series_equal", "pandas.period_range", "pandas._testing.assert_produces_warning", "numpy.abs", "pandas._testing.assert_equal", "pandas._testing.assert_almost_equal", "pandas.to_datetime", "numpy.random.rand", "pandas.to_timedelta", "pandas.Timestamp", "pandas.date_range", "pandas.MultiIndex.from_product", "pandas.Timedelta", "numpy.arange", "pandas.MultiIndex.from_tuples", "pandas.DatetimeIndex", "numpy.empty", "pandas.MultiIndex.from_arrays", "pandas.DataFrame", "pandas._testing.assert_index_equal", "numpy.array" ] ]
tyler-e-marshall/prymetime
[ "c1daa783c8091adbc5900a51d98522b1269d0107" ]
[ "PRYMETIME/nucmer4.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTitle: Sending Contigs to Nucmer\nCreated on Tue Aug 13 2019\n\n@author: Eric\n@email: [email protected]\n\"\"\"\nimport glob, os\nimport pandas as pd\nfrom Bio import SeqIO\nfrom pymummer import nucmer\nfrom pathlib import Path\n\npath_to_file = \"pilon.fasta\"\npath = Path(path_to_file)\n\nshort_contigs = []\ncontigs = []\n\nif path.is_file():\n\n for x in SeqIO.parse(open(\"pilon.fasta\"),'fasta'):\n\n if len(x.seq) < 50000:\n short_contigs.append(x)\n SeqIO.write(x, \"%(x)s.fasta\" % {'x':x.id}, 'fasta')\n\n else:\n contigs.append(x)\n #print(\"long\", x.id)\n\n for pathname in glob.glob(\"*.fasta\"):\n basename = os.path.basename(pathname)\n\n for x in short_contigs:\n\n if x.id in basename :\n runner = nucmer.Runner(basename, basename, \"%(x)s_out.coords\" % {'x':x.id},\n maxmatch=True, simplify=False, mincluster=2000, min_id=99, min_length=2000, coords_header=True)\n\n runner.run()\n\n# The below lines are for saving fasta files of the contigs if desired\n#SeqIO.write(short_contigs , \"short_contigs.fasta\", \"fasta\")\n#SeqIO.write(lin_contigs , \"lin_contigs.fasta\", \"fasta\")\n\n# The below lines are for visually checking which files are repetitive or not\n'''\nfor pathname in glob.glob(\"*.coords\"):\n\n basename = os.path.basename(pathname)\n name = basename.split(\".\")\n\n df = pd.read_csv(basename)\n\n print(df)\n\n if len(df.index) > 1 :\n\n print(name[0], \"morethan 1\")\n'''\n\ncir_path = \"cir_contigs.fasta\"\npath_cir = Path(cir_path)\n\nif path_cir.is_file():\n\n cir_rep_contigs = [x for x in SeqIO.parse(open(\"cir_contigs.fasta\"), 'fasta')]\n\n for x in short_contigs:\n if len(pd.read_csv(\"%(x)s_out.coords\" % {'x': x.id}).index) > 4 :\n cir_rep_contigs.append(x)\n else:\n #print(x.id)\n contigs.append(x)\n\n for x in cir_rep_contigs :\n SeqIO.write(cir_rep_contigs, \"cir_rep_contigs.fasta\", \"fasta\")\n\nSeqIO.write(contigs, \"polished_contigs.fasta\", \"fasta\")\n" ]
[ [ "pandas.read_csv" ] ]
pfistfl/openml-defaults
[ "0678167f807512bd0c957f82a83ff8181461090c" ]
[ "examples/legacy/evaluate_defaults_live.py" ]
[ "import argparse\nimport copy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport openmldefaults\nimport os\nimport pandas as pd\n\n\n# sshfs [email protected]:/rigel/home/jv2657/experiments ~/habanero_experiments\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_path', type=str,\n default=os.path.expanduser('~') + '/data/openml-defaults/surrogate__adaboost__predictive_accuracy__c8.arff')\n parser.add_argument('--resized_grid_size', type=int, default=8)\n parser.add_argument('--input_file', type=str, default=os.path.expanduser('~') + '/habanero_experiments/openml-defaults/20180826/surrogate__adaboost__predictive_accuracy__c8.arff/live_random_search/results.csv')\n return parser.parse_args()\n\n\ndef plot(df, y_label, output_file):\n sns_plot = sns.boxplot(x='n_defaults', y='evaluation', hue='strategy_type', data=df, palette=\"Set3\")\n fig = sns_plot.get_figure()\n fig.savefig(output_file)\n plt.clf()\n print(openmldefaults.utils.get_time(), 'saved to', output_file)\n\n\ndef count_results(df):\n print(df.groupby([\"strategy_type\", \"n_defaults\"]).agg(\"count\"))\n\n\ndef normalize_scores(df, task_minscore, task_maxscore):\n def normalize(row):\n eval = row['evaluation']\n min_score = task_minscore[row['task_id']]\n max_score = task_maxscore[row['task_id']]\n if min_score != max_score:\n return (eval - min_score) / (max_score - min_score)\n else:\n return min_score\n\n df = copy.deepcopy(df)\n df['evaluation'] = df.apply(lambda row: normalize(row), axis=1)\n return df\n\n\ndef run():\n args = parse_args()\n if not os.path.isfile(args.input_file):\n raise ValueError('Could not locate input file: %s' % args.input_file)\n\n dataset_name = os.path.basename(args.dataset_path)\n output_dir = os.path.dirname(args.input_file)\n df = pd.read_csv(filepath_or_buffer=args.input_file, sep=',')\n meta_data = openmldefaults.utils.get_dataset_metadata(args.dataset_path)\n\n df['strategy_type'] = df['strategy_name'].apply(lambda x: x.split('__')[0])\n df['n_defaults'] = df['strategy_name'].apply(lambda x: int(x.split('__')[1]))\n df = df.groupby(['strategy_name', 'task_id', 'strategy_type', 'n_defaults']).mean().reset_index()\n # removed unnamed columns\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\n\n df = df.loc[df['configuration_specification'] == args.resized_grid_size]\n\n # print statistics\n count_results(df)\n\n # normalize\n task_minscores = dict()\n task_maxscores = dict()\n for task_id in getattr(df, 'task_id').unique():\n df_task = df.loc[df['task_id'] == task_id]\n task_min = df_task.evaluation.min()\n task_max = df_task.evaluation.max()\n\n task_minscores[task_id] = task_min\n task_maxscores[task_id] = task_max\n\n outputfile_vanilla = os.path.join(output_dir, \"%s_live.png\" % dataset_name)\n plot(df, meta_data['scoring'], outputfile_vanilla)\n\n df_normalized = normalize_scores(df, task_minscores, task_maxscores)\n outputfile_normalized = os.path.join(output_dir, \"%s_live__normalized.png\" % dataset_name)\n plot(df_normalized, meta_data['scoring'], outputfile_normalized)\n\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.clf" ] ]
TileDB-Inc/TileDB-CLI
[ "e18e148fe5c6044b87d28595f5370eecac0b3c8f" ]
[ "tiledb_cli/tests/test_convert_from.py" ]
[ "import tiledb\nfrom tiledb_cli.root import root\nfrom tiledb_cli.convert_from import parse_kwargs\n\nfrom click.testing import CliRunner\nimport os\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n\[email protected](autouse=True, scope=\"session\")\ndef create_test_simple_csv(temp_rootdir):\n \"\"\"\n Create a simple dense test array.\n \"\"\"\n path = os.path.abspath(os.path.join(temp_rootdir, \"simple.csv\"))\n\n with open(path, mode=\"w\") as csv_input:\n csv_input.write(\n (\n \"a,b,c,date\\n\"\n '1,\"text\",3.4,Mar/02/2021\\n'\n '2,\"hello\",1.234,Apr/07/2021\\n'\n '3,\"goodbye\",111.232,Dec/17/2021\\n'\n '4,\"world\",123123.12,Jul/21/2021\\n'\n '10,\"raisins\",14.232,Nov/09/2021\\n'\n )\n )\n\n expected_output = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 10],\n \"b\": [\"text\", \"hello\", \"goodbye\", \"world\", \"raisins\"],\n \"c\": [3.400, 1.234, 111.232, 123123.120, 14.232],\n \"date\": [\n \"Mar/02/2021\",\n \"Apr/07/2021\",\n \"Dec/17/2021\",\n \"Jul/21/2021\",\n \"Nov/09/2021\",\n ],\n }\n )\n\n return (\"simple\", expected_output)\n\n\nclass TestCSV:\n def test_parse_kwargs(self):\n kwargs = parse_kwargs(\n [\n \"--bool\",\n \"True\",\n \"--str\",\n \"helloworld\",\n \"--num\",\n \"2\",\n \"--numisstr\",\n '\"2\"',\n \"--boolisstr\",\n '\"False\"',\n \"--dictints\",\n \"hello:1;world:2\",\n \"--dictstrs\",\n 'hello:world;\"1\":\"2\"',\n \"--dictbools\",\n \"good:True;bye:False\",\n \"--dictmix\",\n 'bool:False;str:\"1\";int:2;3:\"three\";list:hey,\"hi\",True,1',\n \"--listabc\",\n \"a,b,c\",\n \"--list123\",\n \"1,2,3\",\n \"--listbool\",\n \"True,True,False\",\n \"--listmix\",\n 'False,\"1\",2',\n ]\n )\n\n assert kwargs[\"bool\"] == True\n assert kwargs[\"str\"] == \"helloworld\"\n assert kwargs[\"num\"] == 2\n assert kwargs[\"numisstr\"] == \"2\"\n assert kwargs[\"boolisstr\"] == \"False\"\n assert kwargs[\"dictints\"] == {\"hello\": 1, \"world\": 2}\n assert kwargs[\"dictstrs\"] == {\"hello\": \"world\", \"1\": \"2\"}\n assert kwargs[\"dictbools\"] == {\"good\": True, \"bye\": False}\n assert kwargs[\"dictmix\"] == {\n \"bool\": False,\n \"str\": \"1\",\n \"int\": 2,\n 3: \"three\",\n \"list\": [\"hey\", \"hi\", True, 1],\n }\n assert kwargs[\"listabc\"] == [\"a\", \"b\", \"c\"]\n assert kwargs[\"list123\"] == [1, 2, 3]\n assert kwargs[\"listbool\"] == [True, True, False]\n assert kwargs[\"listmix\"] == [False, \"1\", 2]\n\n def test_no_options(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri]\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert pd.DataFrame.equals(array.df[:], expected_output)\n\n def test_sparse(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sparse True\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_sparse.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--sparse\", \"True\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.sparse == True\n\n def test_dense(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sparse False\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_dense.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--sparse\", \"False\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.sparse == False\n\n def test_duplicates(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --allows-duplicates (False|True)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n\n uri = os.path.join(temp_rootdir, \"test_no_duplicates.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--allows-duplicates\",\n \"False\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.allows_duplicates == False\n\n uri = os.path.join(temp_rootdir, \"test_allows_duplicates.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--allows-duplicates\",\n \"True\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.allows_duplicates == True\n\n def test_capacity(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --capacity <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_capacity.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--capacity\", \"123456\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.capacity == 123456\n\n def test_cell_order(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --cell-order (row-major|col-major|global)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_cell_order.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--cell-order\", \"global\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.cell_order == \"global\"\n\n def test_full_domain(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --full-domain (True|False)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_full_domain.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--full-domain\", \"True\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n dim = array.schema.domain.dim(\"__tiledb_rows\")\n assert dim.domain[0] == np.iinfo(np.uint64).min\n assert dim.domain[1] == np.iinfo(np.uint64).max - dim.tile\n\n def test_date_spec(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --date-spec <column>:<datetime format spec>,...\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_date_spec.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--date-spec\", \"date:%b/%d/%Y\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert pd.DataFrame.equals(\n array.query([\"date\"]).df[:],\n pd.DataFrame(pd.to_datetime(expected_output[\"date\"])),\n )\n\n def test_mode_schema_only(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --mode (ingest|schema_only|append)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_mode_schema_only.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"schema_only\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.query(use_arrow=False).df[0].empty\n\n def test_row_start_idx(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --row-start-idx <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_row_start_idx.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"False\",\n \"--row-start-idx\",\n \"5\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.df[:].index.to_numpy()[0] == 5\n assert array.df[:].index.to_numpy()[-1] == 9\n\n def test_cell_order(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --cell-order (row-major|col-major|global)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_cell_order.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--cell-order\", \"col-major\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.cell_order == \"col-major\"\n\n def test_tile_int(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --tile <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_tile_int.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--tile\", \"2\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(\"__tiledb_rows\").tile == 2\n\n def test_tile_with_attr(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --tile <attr>:<int>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_tile_with_attr.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--tile\", \"__tiledb_rows:2\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(\"__tiledb_rows\").tile == 2\n\n def test_timestamp(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --timestamp <int>\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_timestamp.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"ingest\",\n \"--timestamp\",\n \"1\",\n ],\n )\n\n assert result.exit_code == 0\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"append\",\n \"--timestamp\",\n \"2\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri, timestamp=1) as array:\n assert pd.DataFrame.equals(\n array.df[:].loc[:, array.df[:].columns != \"__tiledb_rows\"],\n expected_output,\n )\n\n with tiledb.open(uri, timestamp=2) as array:\n assert pd.DataFrame.equals(\n array.df[:].loc[:, array.df[:].columns != \"__tiledb_rows\"],\n expected_output.append(expected_output, ignore_index=True),\n )\n\n def test_attr_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --attr-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_attr_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--attr-filters\", \"GzipFilter=9\"],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.attr(\"a\").filters.nfilters == 1\n assert array.schema.attr(\"a\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"b\").filters.nfilters == 1\n assert array.schema.attr(\"b\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"c\").filters.nfilters == 1\n assert array.schema.attr(\"c\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"date\").filters.nfilters == 1\n assert array.schema.attr(\"date\").filters[0] == tiledb.GzipFilter(9)\n\n def test_attr_filters_multi(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --attr-filters <attr name>:<filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_attr_filters_multi.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--attr-filters\",\n (\n \"a:LZ4Filter=10,BitShuffleFilter;\"\n \"b:DoubleDeltaFilter,PositiveDeltaFilter=3\"\n ),\n ],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.attr(\"a\").filters.nfilters == 2\n assert array.schema.attr(\"a\").filters[0] == tiledb.LZ4Filter(10)\n assert array.schema.attr(\"a\").filters[1] == tiledb.BitShuffleFilter()\n\n assert array.schema.attr(\"b\").filters.nfilters == 2\n assert array.schema.attr(\"b\").filters[0] == tiledb.DoubleDeltaFilter()\n assert array.schema.attr(\"b\").filters[1] == tiledb.PositiveDeltaFilter(3)\n\n assert array.schema.attr(\"c\").filters.nfilters == 0\n\n assert array.schema.attr(\"date\").filters.nfilters == 0\n\n def test_coords_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --coords-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_coords_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--coords-filters\",\n \"GzipFilter=9\",\n ],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.coords_filters.nfilters == 1\n assert array.schema.coords_filters[0] == tiledb.GzipFilter(9)\n\n def test_dim_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --dim-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_dim_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--dim-filters\", \"GzipFilter=9\"],\n )\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(0).filters.nfilters == 1\n assert array.schema.domain.dim(0).filters[0] == tiledb.GzipFilter(9)\n\n def test_sep(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sep <str>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_sep.tdb\")\n\n result = runner.invoke(\n root, [\"convert-from\", \"csv\", input_path, uri, \"--sep\", \" \"]\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert len(array.df[:].columns) == 1\n\n def test_header_and_names(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --header 0 --names <column name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_names.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--header\",\n \"0\",\n \"--names\",\n \"d,c,b,a\",\n ],\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert array.df[:].columns[0] == \"d\"\n assert array.df[:].columns[1] == \"c\"\n assert array.df[:].columns[2] == \"b\"\n assert array.df[:].columns[3] == \"a\"\n\n @pytest.mark.skip(\"does not work on windows?\")\n def test_skiprows(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --skiprows <int>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_skiprows.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--skiprows\", \"0,1\"],\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert len(array.df[:]) == 3\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame.equals", "pandas.DataFrame", "numpy.iinfo" ] ]
hankyul2/EfficientNetV2-pytorch
[ "bce59dae3ce69e3e7e8aa99e4f32214b015dd1f8" ]
[ "efficientnetv2/efficientnet_v2.py" ]
[ "import copy\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\n\nfrom efficientnetv2 import get_efficientnet_v2_structure\nfrom efficientnetv2 import load_from_zoo\n\n\nclass ConvBNAct(nn.Sequential):\n \"\"\"Convolution-Normalization-Activation Module\"\"\"\n def __init__(self, in_channel, out_channel, kernel_size, stride, groups, norm_layer, act, conv_layer=nn.Conv2d):\n super(ConvBNAct, self).__init__(\n conv_layer(in_channel, out_channel, kernel_size, stride=stride, padding=(kernel_size-1)//2, groups=groups, bias=False),\n norm_layer(out_channel),\n act()\n )\n\n\nclass SEUnit(nn.Module):\n \"\"\"Squeeze-Excitation Unit\n\n paper: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper\n\n \"\"\"\n def __init__(self, in_channel, reduction_ratio=4, act1=partial(nn.SiLU, inplace=True), act2=nn.Sigmoid):\n super(SEUnit, self).__init__()\n hidden_dim = in_channel // reduction_ratio\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc1 = nn.Conv2d(in_channel, hidden_dim, (1, 1), bias=True)\n self.fc2 = nn.Conv2d(hidden_dim, in_channel, (1, 1), bias=True)\n self.act1 = act1()\n self.act2 = act2()\n\n def forward(self, x):\n return x * self.act2(self.fc2(self.act1(self.fc1(self.avg_pool(x)))))\n\n\nclass StochasticDepth(nn.Module):\n \"\"\"StochasticDepth\n\n paper: https://link.springer.com/chapter/10.1007/978-3-319-46493-0_39\n\n :arg\n - prob: Probability of dying\n - mode: \"row\" or \"all\". \"row\" means that each row survives with different probability\n \"\"\"\n def __init__(self, prob, mode):\n super(StochasticDepth, self).__init__()\n self.prob = prob\n self.survival = 1.0 - prob\n self.mode = mode\n\n def forward(self, x):\n if self.prob == 0.0 or not self.training:\n return x\n else:\n shape = [x.size(0)] + [1] * (x.ndim - 1) if self.mode == 'row' else [1]\n return x * torch.empty(shape).bernoulli_(self.survival).div_(self.survival).to(x.device)\n\n\nclass MBConvConfig:\n \"\"\"EfficientNet Building block configuration\"\"\"\n def __init__(self, expand_ratio: float, kernel: int, stride: int, in_ch: int, out_ch: int, layers: int,\n use_se: bool, fused: bool, act=nn.SiLU, norm_layer=nn.BatchNorm2d):\n self.expand_ratio = expand_ratio\n self.kernel = kernel\n self.stride = stride\n self.in_ch = in_ch\n self.out_ch = out_ch\n self.num_layers = layers\n self.act = act\n self.norm_layer = norm_layer\n self.use_se = use_se\n self.fused = fused\n\n @staticmethod\n def adjust_channels(channel, factor, divisible=8):\n new_channel = channel * factor\n divisible_channel = max(divisible, (int(new_channel + divisible / 2) // divisible) * divisible)\n divisible_channel += divisible if divisible_channel < 0.9 * new_channel else 0\n return divisible_channel\n\n\nclass MBConv(nn.Module):\n \"\"\"EfficientNet main building blocks\n\n :arg\n - c: MBConvConfig instance\n - sd_prob: stochastic path probability\n \"\"\"\n def __init__(self, c, sd_prob=0.0):\n super(MBConv, self).__init__()\n inter_channel = c.adjust_channels(c.in_ch, c.expand_ratio)\n block = []\n\n if c.expand_ratio == 1:\n block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))\n elif c.fused:\n block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))\n block.append(('fused_point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))\n else:\n block.append(('linear_bottleneck', ConvBNAct(c.in_ch, inter_channel, 1, 1, 1, c.norm_layer, c.act)))\n block.append(('depth_wise', ConvBNAct(inter_channel, inter_channel, c.kernel, c.stride, inter_channel, c.norm_layer, c.act)))\n block.append(('se', SEUnit(inter_channel, 4 * c.expand_ratio)))\n block.append(('point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))\n\n self.block = nn.Sequential(OrderedDict(block))\n self.use_skip_connection = c.stride == 1 and c.in_ch == c.out_ch\n self.stochastic_path = StochasticDepth(sd_prob, \"row\")\n\n def forward(self, x):\n out = self.block(x)\n if self.use_skip_connection:\n out = x + self.stochastic_path(out)\n return out\n\n\nclass EfficientNetV2(nn.Module):\n \"\"\"Pytorch Implementation of EfficientNetV2\n\n paper: https://arxiv.org/abs/2104.00298\n\n - reference 1 (pytorch): https://github.com/d-li14/efficientnetv2.pytorch/blob/main/effnetv2.py\n - reference 2 (official): https://github.com/google/automl/blob/master/efficientnetv2/effnetv2_configs.py\n\n :arg\n - layer_infos: list of MBConvConfig\n - out_channels: bottleneck channel\n - nlcass: number of class\n - dropout: dropout probability before classifier layer\n - stochastic depth: stochastic depth probability\n \"\"\"\n def __init__(self, layer_infos, out_channels=1280, nclass=0, dropout=0.2, stochastic_depth=0.0,\n block=MBConv, act_layer=nn.SiLU, norm_layer=nn.BatchNorm2d):\n super(EfficientNetV2, self).__init__()\n self.layer_infos = layer_infos\n self.norm_layer = norm_layer\n self.act = act_layer\n\n self.in_channel = layer_infos[0].in_ch\n self.final_stage_channel = layer_infos[-1].out_ch\n self.out_channels = out_channels\n\n self.cur_block = 0\n self.num_block = sum(stage.num_layers for stage in layer_infos)\n self.stochastic_depth = stochastic_depth\n\n self.stem = ConvBNAct(3, self.in_channel, 3, 2, 1, self.norm_layer, self.act)\n self.blocks = nn.Sequential(*self.make_stages(layer_infos, block))\n self.head = nn.Sequential(OrderedDict([\n ('bottleneck', ConvBNAct(self.final_stage_channel, out_channels, 1, 1, 1, self.norm_layer, self.act)),\n ('avgpool', nn.AdaptiveAvgPool2d((1, 1))),\n ('flatten', nn.Flatten()),\n ('dropout', nn.Dropout(p=dropout, inplace=True)),\n ('classifier', nn.Linear(out_channels, nclass) if nclass else nn.Identity())\n ]))\n\n def make_stages(self, layer_infos, block):\n return [layer for layer_info in layer_infos for layer in self.make_layers(copy.copy(layer_info), block)]\n\n def make_layers(self, layer_info, block):\n layers = []\n for i in range(layer_info.num_layers):\n layers.append(block(layer_info, sd_prob=self.get_sd_prob()))\n layer_info.in_ch = layer_info.out_ch\n layer_info.stride = 1\n return layers\n\n def get_sd_prob(self):\n sd_prob = self.stochastic_depth * (self.cur_block / self.num_block)\n self.cur_block += 1\n return sd_prob\n\n def forward(self, x):\n return self.head(self.blocks(self.stem(x)))\n\n def change_dropout_rate(self, p):\n self.head[-2] = nn.Dropout(p=p, inplace=True)\n\n\ndef efficientnet_v2_init(model):\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0.0, std=0.01)\n nn.init.zeros_(m.bias)\n\n\ndef get_efficientnet_v2(model_name, pretrained, nclass=0, dropout=0.1, stochastic_depth=0.2, **kwargs):\n residual_config = [MBConvConfig(*layer_config) for layer_config in get_efficientnet_v2_structure(model_name)]\n model = EfficientNetV2(residual_config, 1280, nclass, dropout=dropout, stochastic_depth=stochastic_depth, block=MBConv, act_layer=nn.SiLU)\n efficientnet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model" ]
[ [ "torch.empty", "torch.nn.init.kaiming_normal_", "torch.nn.Linear", "torch.nn.Flatten", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.init.ones_", "torch.nn.Conv2d", "torch.nn.init.zeros_", "torch.nn.Identity", "torch.nn.Dropout" ] ]
nlp-tlp/quickgraph
[ "34d888b055a78939095005f9cef363c0430664be" ]
[ "server_cluster/app.py" ]
[ "'''\n API for rank order clustering documents.\n'''\n\nimport itertools\nimport pathlib\nfrom collections import Counter, defaultdict\nfrom enum import Enum\nfrom typing import List, Optional\n\nimport numpy as np\nimport uvicorn\nfrom fastapi import Body, FastAPI, HTTPException\nfrom loguru import logger\nfrom nltk import FreqDist\nfrom pydantic import BaseModel, Field\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\nfrom sklearn.decomposition import LatentDirichletAllocation\n\nlog_path = pathlib.Path(__file__).parent.resolve()\n\n\nlogger.add(\n f\"{log_path}/api.log\", rotation=\"10 MB\")\n\napp = FastAPI()\n\n# Load SBERT model\nlogger.info(f'Loading model')\nmodel_checkpoint = 'all-distilroberta-v1'\nmodel = SentenceTransformer(model_checkpoint)\nlogger.info(f'{model_checkpoint} loaded')\n\n\[email protected](\"/ping\")\ndef ping_pong():\n ''' Checks API service '''\n return {\"message\": \"pong\"}\n\n\nclass Data(BaseModel):\n corpus: List[str]\n\n\[email protected](\"/rank_cluster\")\ndef rank_cluster(data: Data):\n '''\n\n '''\n\n logger.info(\n \"Performing rank order clustering with SentBERT and Agglomerative clustering\")\n logger.info(f'Corpus size: {len(data.corpus)}')\n\n # Embed sentences\n logger.info(f'Corpus embedding started')\n corpus_embeddings = model.encode(\n data.corpus, batch_size=64) # show_progress_bar=False, convert_to_tensor=True\n logger.info(f'Corpus embedding finished')\n\n logger.info(f'Clustering started')\n logger.info('Transforming embedding for agglomerative clustering')\n # Normalize the embeddings to unit length\n corpus_embeddings = corpus_embeddings / \\\n np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)\n\n # , affinity='cosine', linkage='average', distance_threshold=0.4)\n clustering_model = AgglomerativeClustering(\n n_clusters=None, distance_threshold=1.5)\n\n clustering_model.fit(corpus_embeddings)\n logger.info('fitted cluster model')\n\n cluster_assignment = clustering_model.labels_\n # logger.debug(cluster_assignment)\n logger.info(f'Clustering finished')\n\n clustered_corpus = []\n for sentence_id, cluster_id in enumerate(cluster_assignment):\n # print(sentence_id, cluster_id)\n clustered_corpus.append({\"id\": int(sentence_id), \"cluster\": int(\n cluster_id), \"sentence\": data.corpus[sentence_id]})\n\n # Get human-interpretable label for cluster\n groups = defaultdict(list)\n\n # Group clusters into arrays\n for obj in clustered_corpus:\n groups[obj[\"cluster\"]].append(obj)\n\n # Find topn terms in clusters\n cluster_terms = {}\n for cluster in groups.values():\n cluster_number = cluster[0]['cluster']\n\n cluster_tokens = list(itertools.chain(\n *[text['sentence'].split() for text in cluster]))\n\n token_freq_dist = FreqDist(cluster_tokens)\n top_n_terms = token_freq_dist.most_common(5)\n top_n_term_string = \"|\".join([term for term, _ in top_n_terms])\n cluster_terms[cluster_number] = top_n_term_string\n\n # Get cluster counts / distribution\n cluster_distribution = Counter(\n sentence['cluster'] for sentence in clustered_corpus)\n # print(cluster_distribution)\n\n cluster_details = [{\"cluster_number\": cluster_no, 'count': cluster_distribution[cluster_no],\n 'top_n_terms': cluster_terms[cluster_no]} for cluster_no in cluster_distribution.keys()]\n\n cluster_details_sorted = sorted(\n cluster_details, key=lambda d: d['cluster_number'])\n\n return {'clustered_corpus': clustered_corpus, 'cluster_details': cluster_details_sorted}\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n" ]
[ [ "numpy.linalg.norm", "sklearn.cluster.AgglomerativeClustering" ] ]
ritchie46/flopy
[ "8e7284dcb3aaf5c12293d442248c2c2d9959f835" ]
[ "flopy/modflow/mfdrn.py" ]
[ "\"\"\"\r\nmfdrn module. Contains the ModflowDrn class. Note that the user can access\r\nthe ModflowDrn class as `flopy.modflow.ModflowDrn`.\r\n\r\nAdditional information for this MODFLOW package can be found at the `Online\r\nMODFLOW Guide\r\n<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_.\r\n\r\n\"\"\"\r\nimport sys\r\nimport numpy as np\r\nfrom ..pakbase import Package\r\nfrom ..utils.util_list import MfList\r\nfrom ..utils.recarray_utils import create_empty_recarray\r\n\r\n\r\nclass ModflowDrn(Package):\r\n \"\"\"\r\n MODFLOW Drain Package Class.\r\n\r\n Parameters\r\n ----------\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to which\r\n this package will be added.\r\n ipakcb : int\r\n A flag that is used to determine if cell-by-cell budget data should be\r\n saved. If ipakcb is non-zero cell-by-cell budget data will be saved.\r\n (default is None).\r\n stress_period_data : list of boundaries, recarrays, or dictionary of\r\n boundaries.\r\n Each drain cell is defined through definition of\r\n layer(int), row(int), column(int), elevation(float),\r\n conductance(float).\r\n The simplest form is a dictionary with a lists of boundaries for each\r\n stress period, where each list of boundaries itself is a list of\r\n boundaries. Indices of the dictionary are the numbers of the stress\r\n period. This gives the form of::\r\n\r\n stress_period_data =\r\n {0: [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ],\r\n 1: [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ], ...\r\n kper:\r\n [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ]\r\n }\r\n\r\n Note that if no values are specified for a certain stress period, then\r\n the list of boundaries for the previous stress period for which values\r\n were defined is used. Full details of all options to specify\r\n stress_period_data can be found in the flopy3boundaries Notebook in\r\n the basic subdirectory of the examples directory.\r\n dtype : dtype definition\r\n if data type is different from default\r\n options : list of strings\r\n Package options. (default is None).\r\n extension : string\r\n Filename extension (default is 'drn')\r\n unitnumber : int\r\n File unit number (default is None).\r\n filenames : str or list of str\r\n Filenames to use for the package and the output files. If\r\n filenames=None the package name will be created using the model name\r\n and package extension and the cbc output name will be created using\r\n the model name and .cbc extension (for example, modflowtest.cbc),\r\n if ipakcbc is a number greater than zero. If a single string is passed\r\n the package will be set to the string and cbc output names will be\r\n created using the model name and .cbc extension, if ipakcbc is a\r\n number greater than zero. To define the names for all package files\r\n (input and output) the length of the list of strings should be 2.\r\n Default is None.\r\n\r\n Attributes\r\n ----------\r\n\r\n Methods\r\n -------\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n Parameters are not supported in FloPy.\r\n If \"RETURNFLOW\" in passed in options, the drain return package (DRT) activated, which expects\r\n a different (longer) dtype for stress_period_data\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> ml = flopy.modflow.Modflow()\r\n >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all\r\n >>> #stress periods\r\n >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec)\r\n\r\n \"\"\"\r\n\r\n def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None,\r\n extension='drn', unitnumber=None, options=None,\r\n filenames=None, **kwargs):\r\n\r\n # set default unit number of one is not specified\r\n if unitnumber is None:\r\n unitnumber = ModflowDrn.defaultunit()\r\n\r\n # set filenames\r\n if filenames is None:\r\n filenames = [None, None]\r\n elif isinstance(filenames, str):\r\n filenames = [filenames, None]\r\n elif isinstance(filenames, list):\r\n if len(filenames) < 2:\r\n filenames.append(None)\r\n\r\n # update external file information with cbc output, if necessary\r\n if ipakcb is not None:\r\n fname = filenames[1]\r\n model.add_output_file(ipakcb, fname=fname,\r\n package=ModflowDrn.ftype())\r\n else:\r\n ipakcb = 0\r\n\r\n if options is None:\r\n options = []\r\n self.is_drt = False\r\n for opt in options:\r\n if opt.upper() == \"RETURNFLOW\":\r\n self.is_drt = True\r\n break\r\n if self.is_drt:\r\n name = [\"DRT\"]\r\n else:\r\n name = [ModflowDrn.ftype()]\r\n units = [unitnumber]\r\n extra = ['']\r\n\r\n # set package name\r\n fname = [filenames[0]]\r\n\r\n # Call ancestor's init to set self.parent, extension, name and unit number\r\n Package.__init__(self, model, extension=extension, name=name,\r\n unit_number=units, extra=extra, filenames=fname)\r\n\r\n self.heading = '# {} package for '.format(self.name[0]) + \\\r\n ' {}, '.format(model.version_types[model.version]) + \\\r\n 'generated by Flopy.'\r\n self.url = 'drn.htm'\r\n\r\n self.ipakcb = ipakcb\r\n\r\n self.np = 0\r\n\r\n self.options = options\r\n if dtype is not None:\r\n self.dtype = dtype\r\n else:\r\n self.dtype = self.get_default_dtype(\r\n structured=self.parent.structured, is_drt=self.is_drt)\r\n self.stress_period_data = MfList(self, stress_period_data)\r\n self.parent.add_package(self)\r\n\r\n @staticmethod\r\n def get_default_dtype(structured=True, is_drt=False):\r\n if structured:\r\n if not is_drt:\r\n dtype = np.dtype([(\"k\", np.int), (\"i\", np.int),\r\n (\"j\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32)])\r\n else:\r\n dtype = np.dtype([(\"k\", np.int), (\"i\", np.int),\r\n (\"j\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32), (\"layr\", np.int),\r\n (\"rowr\", np.int), (\"colr\", np.int),\r\n (\"rfprop\", np.float32)])\r\n else:\r\n dtype = np.dtype([(\"node\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32)])\r\n return dtype\r\n\r\n def ncells(self):\r\n # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package)\r\n # print 'Function must be implemented properly for drn package'\r\n return self.stress_period_data.mxact\r\n\r\n def write_file(self, check=True):\r\n \"\"\"\r\n Write the package file.\r\n\r\n Parameters\r\n ----------\r\n check : boolean\r\n Check package data for common errors. (default True)\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n if check: # allows turning off package checks when writing files at model level\r\n self.check(f='{}.chk'.format(self.name[0]),\r\n verbose=self.parent.verbose, level=1)\r\n f_drn = open(self.fn_path, 'w')\r\n f_drn.write('{0}\\n'.format(self.heading))\r\n # f_drn.write('%10i%10i\\n' % (self.mxactd, self.idrncb))\r\n line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact,\r\n self.ipakcb)\r\n\r\n if self.is_drt:\r\n line += \"{0:10d}{0:10d}\".format(0)\r\n for opt in self.options:\r\n line += ' ' + str(opt)\r\n line += '\\n'\r\n f_drn.write(line)\r\n self.stress_period_data.write_transient(f_drn)\r\n f_drn.close()\r\n\r\n def add_record(self, kper, index, values):\r\n try:\r\n self.stress_period_data.add_record(kper, index, values)\r\n except Exception as e:\r\n raise Exception(\"mfdrn error adding record to list: \" + str(e))\r\n\r\n @staticmethod\r\n def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False):\r\n # get an empty recarray that corresponds to dtype\r\n dtype = ModflowDrn.get_default_dtype(structured=structured,\r\n is_drt=is_drt)\r\n if aux_names is not None:\r\n dtype = Package.add_to_dtype(dtype, aux_names, np.float32)\r\n return create_empty_recarray(ncells, dtype, default_value=-1.0E+10)\r\n\r\n @staticmethod\r\n def get_sfac_columns():\r\n return ['cond']\r\n\r\n @staticmethod\r\n def load(f, model, nper=None, ext_unit_dict=None, check=True):\r\n \"\"\"\r\n Load an existing package.\r\n\r\n Parameters\r\n ----------\r\n f : filename or file handle\r\n File to load.\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to\r\n which this package will be added.\r\n ext_unit_dict : dictionary, optional\r\n If the arrays in the file are specified using EXTERNAL,\r\n or older style array control records, then `f` should be a file\r\n handle. In this case ext_unit_dict is required, which can be\r\n constructed using the function\r\n :class:`flopy.utils.mfreadnam.parsenamefile`.\r\n check : boolean\r\n Check package data for common errors. (default True)\r\n\r\n Returns\r\n -------\r\n drn : ModflowDrn object\r\n ModflowDrn object.\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> m = flopy.modflow.Modflow()\r\n >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m)\r\n\r\n \"\"\"\r\n\r\n if model.verbose:\r\n sys.stdout.write('loading drn package file...\\n')\r\n\r\n return Package.load(f, model, ModflowDrn, nper=nper, check=check,\r\n ext_unit_dict=ext_unit_dict)\r\n\r\n @staticmethod\r\n def ftype():\r\n return 'DRN'\r\n\r\n @staticmethod\r\n def defaultunit():\r\n return 21\r\n" ]
[ [ "numpy.dtype" ] ]
molkjar/bachelor
[ "a0591691b820c6c8a45d16f8d55f3a7e80ea384b" ]
[ "NYS-covasim/second_wave_scenarios.py" ]
[ "import covasim as cv\nimport covasim.utils as cvu\nimport optuna as op\nimport sciris as sc\nimport pandas as pd\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport population\n\n## Interesting part starts around line 200\n## First part is setup, optimization workers and alike - Important to run before analysing but not that interesting.\n\n############# Dates #######################\n\nstart_day = '2020-03-01'\nend_day = '2022-03-01'\n\n''' NYSonPause:\n General closure, incorporate school closures (happened few days before)\n Shelter-in-place order etc.\n Everything non-essential closed\n Lifting: Chosen quite arbitrary although somewhat at the right time'''\n\nNYSonPause = '2020-03-22'\nschoolsClosure = '2020-03-16'\n\nlifting = '2020-07-20'\nliftingSW = '2021-08-01'\n\n############# Model Setup #################\n# Population file to load - Generate with 'make_ny_pop.py'\npopfile = 'nyppl.pop'\n\n# Layer specification file used in popgeneration\nlayers = pd.read_csv('layers.csv', index_col='layer')\n\n# Data files to fit with\ncumDeathsfile = 'EpiData/deathsNY20200106.csv' #Deaths 2021-01-06\n\n# Model parameters\npars = sc.objdict(\n pop_size = 200e3,\n pop_scale = 100,\n rescale = True,\n \n pop_infected = 10000, # 0.05% of population infected at start of simulation - Should have a reference, but is stated in a NYT article somewhere.\n \n contacts = layers['contacts'].to_dict(),\n \n beta = 0.07576320418933516,\n beta_layer = layers['beta_layer'].to_dict(),\n \n start_day = start_day,\n end_day = end_day,\n \n rand_seed = 271220,\n \n verbose = .1,\n )\n\n# Intervention level fitted to first wave \nintv = {'H': 1.2765967578928226, \n 'W': 0.07393991037226055,\n 'C': 0.07393991037226055}\n\n############ Interventions ###############\n''' Make interventions, as scaling of beta.\n-- Level specific intervention effects\n-- i.e. Households see increase in transmission with school/work closures\n\n** intv = 0 - No transmission\n** intv = 1 - Regular transmission (no intervention)\n** intv > 1 - increase in transmission\n\nAs of now keep schools closed, maybe open them in fall, and close again at thanksgiving/december??\n'''\n \ndef make_ints(lintv, intv=intv):\n \n interventions = [\n # School layer\n cv.change_beta(days = [schoolsClosure, lifting, liftingSW],\n changes = [0, lintv['S'], 1],\n layers = ['S'],\n do_plot = True,\n ),\n \n # Workplace layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['W'], lintv['W'], 1],\n layers = ['W'],\n do_plot = False,\n ),\n \n # Householsd layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['H'], lintv['H'], 1],\n layers = ['H'],\n do_plot = True,\n ),\n \n # Community layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['C'], lintv['C'], 1],\n layers = ['C1'],\n do_plot = False,\n ),\n cv.dynamic_pars(n_imports=dict(days=[0, 141, 142], vals=[0, 10, 0])),\n ]\n\n \n # Regenerate dynamic layers\n interventions.insert(0, population.UpdateNetworks())\n \n return interventions\n\n############## Simulation/calibration setup ############\n## Initialize simulation with intervention\ndef make_sim(pars, lintv={'S':1,'W':1,'H':1,'C':1}, load_pop=True, popfile=popfile, datafile=cumDeathsfile):\n sim = cv.Sim(pars=pars,\n popfile=popfile,\n load_pop=load_pop,\n datafile=datafile)\n \n sim.pars['interventions'] = make_ints(lintv=lintv)\n \n sim.initialize()\n \n return sim\n\n## Running simulation\ndef run_sim(pars, lintv={'S':1,'W':1,'H':1,'C':1}, popfile=popfile, return_stat=False, verbose=0.1):\n sim = make_sim(pars=pars, lintv=lintv, popfile=popfile)\n sim.run(verbose=verbose)\n \n if return_stat:\n stat = sim.results['cum_infections'][-1]\n return stat\n else:\n return sim\n\n\n \n\n############## Calibration settings ###############\nname = 'lintv-SW-herd'\n\nW_low = 0.07 #0\nW_high = 1 #1\n\nn_workers = 2 # Define how many workers to run in parallel\nn_trials = 50 # Define the number of trials, i.e. sim runs, per worker\n\ndb_name = f'{name}.db'\nstorage = f'sqlite:///{db_name}'\n\n\n\n############### Calibration workings ##############\ndef run_trial(trial):\n ''' Define the objective for Optuna ''' \n lintv_W = trial.suggest_uniform('lintv_W', W_low, W_high)\n lintv_H = -0.3*lintv_W+1.3\n lintv = {'S':lintv_W, 'W':lintv_W, 'H':lintv_H, 'C':lintv_W}\n \n cum_d = run_sim(pars, lintv=lintv, return_stat=True, verbose=0)\n return cum_d\n\ndef worker():\n ''' Run a single worker '''\n study = op.load_study(storage=storage, study_name=name)\n output = study.optimize(run_trial, n_trials=n_trials)\n return output\n\ndef run_workers():\n ''' Run multiple workers in parallel '''\n output = sc.parallelize(worker, n_workers)\n return output\n\n\ndef make_study():\n ''' Make a study, deleting one if it already exists '''\n if os.path.exists(db_name):\n os.remove(db_name)\n print(f'Removed existing calibration {db_name}')\n output = op.create_study(storage=storage, study_name=name)\n return output\n\n\n\n\n########### Run the optimization ############\nt0 = sc.tic()\nmake_study()\nrun_workers()\nstudy = op.load_study(storage=storage, study_name=name)\nbest_pars = study.best_params\nT = sc.toc(t0, output=True)\nprint(f'\\n\\nOutput: {best_pars}, time: {T:0.1f} s')\n\n'''\nOptimal intervention level estimate: lintv_W=0.355\n!! lintv_H=-0.3*lintv_W+1.3 = 1.195\n'''\n\n\n\n########### Scenarios #############\n## Code which is commented out (single #) are used to run the simulation which is loaded underneath\n\n#basesim = make_sim(pars=pars, lintv={'W':0.355, 'C':0.355, 'S':0.355, 'H':-0.3*0.35+1.3})\n#msim = cv.MultiSim(basesim)\n#msim.run(n_runs=50, n_cpus=10)\n#msim.median(quantiles=[0.025, 0.975])\n#msim.plot()\n#msim.save(\"second_wave_hd50.msim\")\n\nmsim = cv.load(\"alreadyRun/second_wave_hd50.msim\")\n\n## Check that there's still infectious individuals left\nfor sim in msim.sims:\n print(sim.label)\n print(sim.results['new_infectious'][409])\n \n \n## Final size --> Herd immunity threshold over different seeds - Quantiles\nfin_size = [0]*50\nind = 0\nfor sim in msim.sims:\n fin_size[ind] = sim.results['cum_deaths'][-1]\n ind += 1\n\nnp.quantile(fin_size, [0.025, 0.5, 0.975])\nnp.quantile(fin_size, [0.025, 0.5, 0.975])/200e3\n## [74.52449518, 75.2632428 , 75.86432382]\n\n \n \n###### Running without interventions \n#basesimf = make_sim(pars=pars, lintv={'W':1, 'C':1, 'S':1, 'H':1})\n#msimf = cv.MultiSim(basesimf)\n#msimf.run(n_runs=50, n_cpus=10)\n#msimf.median(quantiles=[0.025, 0.975])\n#msimf.save(\"second_wave_free50.msim\")\n\nmismf = cv.load(\"alreadyRun/second_wave_free50.msim\")\n\n\ncum_inf = [0]*50\nind = 0\nfor sim in msimf.sims:\n cum_inf[ind] = sim.results['cum_infectious'][-1]\n ind += 1\n\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])/200e3\n\n\n####### With current estimated interventions\n#I_W = 0.23084114685289414\n#basesimCI = make_sim(pars=pars, lintv={'W':I_W, 'C':I_W, 'S':I_W, 'H':-0.3*I_W+1.3})\n#msimCI = cv.MultiSim(basesimCI)\n#msimCI.run(n_runs=50, n_cpus=10)\n#msimCI.median(quantiles=[0.025, 0.975])\n#msimCI.save(\"second_wave_fit_reopen50.msim\")\n\nmsimCI = cv.load(\"alreadyRun/second_wave_fit_reopen50.msim\")\n\ncum_inf = [0]*50\nind = 0\nfor sim in msimCI.sims:\n cum_inf[ind] = sim.results['cum_deaths'][-1]\n ind += 1\n\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])/200e3\n\n\n\n\n\n\n\n\n\n\n###### Running with closed schools\nbasesimCS = make_sim(pars=pars, lintv={'W':0.44, 'C':0.44, 'S':0, 'H':-0.3*0.44+1.3})\nmsimCS = cv.MultiSim(basesimCS)\nmsimCS.run(n_runs=25, n_cpus=10)\nmsimCS.median(quantiles=[0.025, 0.975])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.quantile" ] ]
Arielce/dio
[ "eb8035664f605783f86b41d34006aeb9ef861f13" ]
[ "tutorials/bios-boot-tutorial/bios-boot-tutorial.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport numpy\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\nimport time\n\nargs = None\nlogFile = None\n\nunlockTimeout = 999999999\nfastUnstakeSystem = './fast.refund/dccio.system/dccio.system.wasm'\n\nsystemAccounts = [\n 'dccio.bpay',\n 'dccio.msig',\n 'dccio.names',\n 'dccio.ram',\n 'dccio.ramfee',\n 'dccio.saving',\n 'dccio.stake',\n 'dccio.token',\n 'dccio.vpay',\n]\n\ndef jsonArg(a):\n return \" '\" + json.dumps(a) + \"' \"\n\ndef run(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n if subprocess.call(args, shell=True):\n print('bios-boot-tutorial.py: exiting because of error')\n sys.exit(1)\n\ndef retry(args):\n while True:\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n if subprocess.call(args, shell=True):\n print('*** Retry')\n else:\n break\n\ndef background(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n return subprocess.Popen(args, shell=True)\n\ndef getOutput(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)\n return proc.communicate()[0].decode('utf-8')\n\ndef getJsonOutput(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)\n return json.loads(proc.communicate()[0])\n\ndef sleep(t):\n print('sleep', t, '...')\n time.sleep(t)\n print('resume')\n\ndef startWallet():\n run('rm -rf ' + os.path.abspath(args.wallet_dir))\n run('mkdir -p ' + os.path.abspath(args.wallet_dir))\n background(args.kdccd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir)))\n sleep(.4)\n run(args.cldcc + 'wallet create --to-console')\n\ndef importKeys():\n run(args.cldcc + 'wallet import --private-key ' + args.private_key)\n keys = {}\n for a in accounts:\n key = a['pvt']\n if not key in keys:\n if len(keys) >= args.max_user_keys:\n break\n keys[key] = True\n run(args.cldcc + 'wallet import --private-key ' + key)\n for i in range(firstProducer, firstProducer + numProducers):\n a = accounts[i]\n key = a['pvt']\n if not key in keys:\n keys[key] = True\n run(args.cldcc + 'wallet import --private-key ' + key)\n\ndef startNode(nodeIndex, account):\n dir = args.nodes_dir + ('%02d-' % nodeIndex) + account['name'] + '/'\n run('rm -rf ' + dir)\n run('mkdir -p ' + dir)\n otherOpts = ''.join(list(map(lambda i: ' --p2p-peer-address localhost:' + str(9000 + i), range(nodeIndex))))\n if not nodeIndex: otherOpts += (\n ' --plugin dccio::history_plugin'\n ' --plugin dccio::history_api_plugin'\n )\n cmd = (\n args.noddcc +\n ' --max-irreversible-block-age -1'\n ' --contracts-console'\n ' --genesis-json ' + os.path.abspath(args.genesis) +\n ' --blocks-dir ' + os.path.abspath(dir) + '/blocks'\n ' --config-dir ' + os.path.abspath(dir) +\n ' --data-dir ' + os.path.abspath(dir) +\n ' --chain-state-db-size-mb 1024'\n ' --http-server-address 127.0.0.1:' + str(8000 + nodeIndex) +\n ' --p2p-listen-endpoint 127.0.0.1:' + str(9000 + nodeIndex) +\n ' --max-clients ' + str(maxClients) +\n ' --p2p-max-nodes-per-host ' + str(maxClients) +\n ' --enable-stale-production'\n ' --producer-name ' + account['name'] +\n ' --private-key \\'[\"' + account['pub'] + '\",\"' + account['pvt'] + '\"]\\''\n ' --plugin dccio::http_plugin'\n ' --plugin dccio::chain_api_plugin'\n ' --plugin dccio::producer_plugin' +\n otherOpts)\n with open(dir + 'stderr', mode='w') as f:\n f.write(cmd + '\\n\\n')\n background(cmd + ' 2>>' + dir + 'stderr')\n\ndef startProducers(b, e):\n for i in range(b, e):\n startNode(i - b + 1, accounts[i])\n\ndef createSystemAccounts():\n for a in systemAccounts:\n run(args.cldcc + 'create account dccio ' + a + ' ' + args.public_key)\n\ndef intToCurrency(i):\n return '%d.%04d %s' % (i // 10000, i % 10000, args.symbol)\n\ndef allocateFunds(b, e):\n dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule\n dist.sort()\n dist.reverse()\n factor = 1_000_000_000 / sum(dist)\n total = 0\n for i in range(b, e):\n funds = round(factor * dist[i - b] * 10000)\n if i >= firstProducer and i < firstProducer + numProducers:\n funds = max(funds, round(args.min_producer_funds * 10000))\n total += funds\n accounts[i]['funds'] = funds\n return total\n\ndef createStakedAccounts(b, e):\n ramFunds = round(args.ram_funds * 10000)\n configuredMinStake = round(args.min_stake * 10000)\n maxUnstaked = round(args.max_unstaked * 10000)\n for i in range(b, e):\n a = accounts[i]\n funds = a['funds']\n print('#' * 80)\n print('# %d/%d %s %s' % (i, e, a['name'], intToCurrency(funds)))\n print('#' * 80)\n if funds < ramFunds:\n print('skipping %s: not enough funds to cover ram' % a['name'])\n continue\n minStake = min(funds - ramFunds, configuredMinStake)\n unstaked = min(funds - ramFunds - minStake, maxUnstaked)\n stake = funds - ramFunds - unstaked\n stakeNet = round(stake / 2)\n stakeCpu = stake - stakeNet\n print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))\n assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)\n retry(args.cldcc + 'system newaccount --transfer dccio %s %s --stake-net \"%s\" --stake-cpu \"%s\" --buy-ram \"%s\" ' % \n (a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))\n if unstaked:\n retry(args.cldcc + 'transfer dccio %s \"%s\"' % (a['name'], intToCurrency(unstaked)))\n\ndef regProducers(b, e):\n for i in range(b, e):\n a = accounts[i]\n retry(args.cldcc + 'system regproducer ' + a['name'] + ' ' + a['pub'] + ' https://' + a['name'] + '.com' + '/' + a['pub'])\n\ndef listProducers():\n run(args.cldcc + 'system listproducers')\n\ndef vote(b, e):\n for i in range(b, e):\n voter = accounts[i]['name']\n prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)\n prods = ' '.join(map(lambda x: accounts[x]['name'], prods))\n retry(args.cldcc + 'system voteproducer prods ' + voter + ' ' + prods)\n\ndef claimRewards():\n table = getJsonOutput(args.cldcc + 'get table dccio dccio producers -l 100')\n times = []\n for row in table['rows']:\n if row['unpaid_blocks'] and not row['last_claim_time']:\n times.append(getJsonOutput(args.cldcc + 'system claimrewards -j ' + row['owner'])['processed']['elapsed'])\n print('Elapsed time for claimrewards:', times)\n\ndef proxyVotes(b, e):\n vote(firstProducer, firstProducer + 1)\n proxy = accounts[firstProducer]['name']\n retry(args.cldcc + 'system regproxy ' + proxy)\n sleep(1.0)\n for i in range(b, e):\n voter = accounts[i]['name']\n retry(args.cldcc + 'system voteproducer proxy ' + voter + ' ' + proxy)\n\ndef updateAuth(account, permission, parent, controller):\n run(args.cldcc + 'push action dccio updateauth' + jsonArg({\n 'account': account,\n 'permission': permission,\n 'parent': parent,\n 'auth': {\n 'threshold': 1, 'keys': [], 'waits': [],\n 'accounts': [{\n 'weight': 1,\n 'permission': {'actor': controller, 'permission': 'active'}\n }]\n }\n }) + '-p ' + account + '@' + permission)\n\ndef resign(account, controller):\n updateAuth(account, 'owner', '', controller)\n updateAuth(account, 'active', 'owner', controller)\n sleep(1)\n run(args.cldcc + 'get account ' + account)\n\ndef randomTransfer(b, e):\n for j in range(20):\n src = accounts[random.randint(b, e - 1)]['name']\n dest = src\n while dest == src:\n dest = accounts[random.randint(b, e - 1)]['name']\n run(args.cldcc + 'transfer -f ' + src + ' ' + dest + ' \"0.0001 ' + args.symbol + '\"' + ' || true')\n\ndef msigProposeReplaceSystem(proposer, proposalName):\n requestedPermissions = []\n for i in range(firstProducer, firstProducer + numProducers):\n requestedPermissions.append({'actor': accounts[i]['name'], 'permission': 'active'})\n trxPermissions = [{'actor': 'dccio', 'permission': 'active'}]\n with open(fastUnstakeSystem, mode='rb') as f:\n setcode = {'account': 'dccio', 'vmtype': 0, 'vmversion': 0, 'code': f.read().hex()}\n run(args.cldcc + 'multisig propose ' + proposalName + jsonArg(requestedPermissions) + \n jsonArg(trxPermissions) + 'dccio setcode' + jsonArg(setcode) + ' -p ' + proposer)\n\ndef msigApproveReplaceSystem(proposer, proposalName):\n for i in range(firstProducer, firstProducer + numProducers):\n run(args.cldcc + 'multisig approve ' + proposer + ' ' + proposalName +\n jsonArg({'actor': accounts[i]['name'], 'permission': 'active'}) +\n '-p ' + accounts[i]['name'])\n\ndef msigExecReplaceSystem(proposer, proposalName):\n retry(args.cldcc + 'multisig exec ' + proposer + ' ' + proposalName + ' -p ' + proposer)\n\ndef msigReplaceSystem():\n run(args.cldcc + 'push action dccio buyrambytes' + jsonArg(['dccio', accounts[0]['name'], 200000]) + '-p dccio')\n sleep(1)\n msigProposeReplaceSystem(accounts[0]['name'], 'fast.unstake')\n sleep(1)\n msigApproveReplaceSystem(accounts[0]['name'], 'fast.unstake')\n msigExecReplaceSystem(accounts[0]['name'], 'fast.unstake')\n\ndef produceNewAccounts():\n with open('newusers', 'w') as f:\n for i in range(120_000, 200_000):\n x = getOutput(args.cldcc + 'create key --to-console')\n r = re.match('Private key: *([^ \\n]*)\\nPublic key: *([^ \\n]*)', x, re.DOTALL | re.MULTILINE)\n name = 'user'\n for j in range(7, -1, -1):\n name += chr(ord('a') + ((i >> (j * 4)) & 15))\n print(i, name)\n f.write(' {\"name\":\"%s\", \"pvt\":\"%s\", \"pub\":\"%s\"},\\n' % (name, r[1], r[2]))\n\ndef stepKillAll():\n run('killall kdccd noddcc || true')\n sleep(1.5)\ndef stepStartWallet():\n startWallet()\n importKeys()\ndef stepStartBoot():\n startNode(0, {'name': 'dccio', 'pvt': args.private_key, 'pub': args.public_key})\n sleep(1.5)\ndef stepInstallSystemContracts():\n run(args.cldcc + 'set contract dccio.token ' + args.contracts_dir + 'dccio.token/')\n run(args.cldcc + 'set contract dccio.msig ' + args.contracts_dir + 'dccio.msig/')\ndef stepCreateTokens():\n run(args.cldcc + 'push action dccio.token create \\'[\"dccio\", \"10000000000.0000 %s\"]\\' -p dccio.token' % (args.symbol))\n totalAllocation = allocateFunds(0, len(accounts))\n run(args.cldcc + 'push action dccio.token issue \\'[\"dccio\", \"%s\", \"memo\"]\\' -p dccio' % intToCurrency(totalAllocation))\n sleep(1)\ndef stepSetSystemContract():\n retry(args.cldcc + 'set contract dccio ' + args.contracts_dir + 'dccio.system/')\n sleep(1)\n run(args.cldcc + 'push action dccio setpriv' + jsonArg(['dccio.msig', 1]) + '-p dccio@active')\ndef stepCreateStakedAccounts():\n createStakedAccounts(0, len(accounts))\ndef stepRegProducers():\n regProducers(firstProducer, firstProducer + numProducers)\n sleep(1)\n listProducers()\ndef stepStartProducers():\n startProducers(firstProducer, firstProducer + numProducers)\n sleep(args.producer_sync_delay)\ndef stepVote():\n vote(0, 0 + args.num_voters)\n sleep(1)\n listProducers()\n sleep(5)\ndef stepProxyVotes():\n proxyVotes(0, 0 + args.num_voters)\ndef stepResign():\n resign('dccio', 'dccio.prods')\n for a in systemAccounts:\n resign(a, 'dccio')\ndef stepTransfer():\n while True:\n randomTransfer(0, args.num_senders)\ndef stepLog():\n run('tail -n 60 ' + args.nodes_dir + '00-dccio/stderr')\n\n# Command Line Arguments\n\nparser = argparse.ArgumentParser()\n\ncommands = [\n ('k', 'kill', stepKillAll, True, \"Kill all noddcc and kdccd processes\"),\n ('w', 'wallet', stepStartWallet, True, \"Start kdccd, create wallet, fill with keys\"),\n ('b', 'boot', stepStartBoot, True, \"Start boot node\"),\n ('s', 'sys', createSystemAccounts, True, \"Create system accounts (dccio.*)\"),\n ('c', 'contracts', stepInstallSystemContracts, True, \"Install system contracts (token, msig)\"),\n ('t', 'tokens', stepCreateTokens, True, \"Create tokens\"),\n ('S', 'sys-contract', stepSetSystemContract, True, \"Set system contract\"),\n ('T', 'stake', stepCreateStakedAccounts, True, \"Create staked accounts\"),\n ('p', 'reg-prod', stepRegProducers, True, \"Register producers\"),\n ('P', 'start-prod', stepStartProducers, True, \"Start producers\"),\n ('v', 'vote', stepVote, True, \"Vote for producers\"),\n ('R', 'claim', claimRewards, True, \"Claim rewards\"),\n ('x', 'proxy', stepProxyVotes, True, \"Proxy votes\"),\n ('q', 'resign', stepResign, True, \"Resign dccio\"),\n ('m', 'msg-replace', msigReplaceSystem, False, \"Replace system contract using msig\"),\n ('X', 'xfer', stepTransfer, False, \"Random transfer tokens (infinite loop)\"),\n ('l', 'log', stepLog, True, \"Show tail of node's log\"),\n]\n\nparser.add_argument('--public-key', metavar='', help=\"dccIO Public Key\", default='dcc8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', dest=\"public_key\")\nparser.add_argument('--private-Key', metavar='', help=\"dccIO Private Key\", default='5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p', dest=\"private_key\")\nparser.add_argument('--cldcc', metavar='', help=\"Cldcc command\", default='../../build/programs/cldcc/cldcc --wallet-url http://127.0.0.1:6666 ')\nparser.add_argument('--noddcc', metavar='', help=\"Path to noddcc binary\", default='../../build/programs/noddcc/noddcc')\nparser.add_argument('--kdccd', metavar='', help=\"Path to kdccd binary\", default='../../build/programs/kdccd/kdccd')\nparser.add_argument('--contracts-dir', metavar='', help=\"Path to contracts directory\", default='../../build/contracts/')\nparser.add_argument('--nodes-dir', metavar='', help=\"Path to nodes directory\", default='./nodes/')\nparser.add_argument('--genesis', metavar='', help=\"Path to genesis.json\", default=\"./genesis.json\")\nparser.add_argument('--wallet-dir', metavar='', help=\"Path to wallet directory\", default='./wallet/')\nparser.add_argument('--log-path', metavar='', help=\"Path to log file\", default='./output.log')\nparser.add_argument('--symbol', metavar='', help=\"The dccio.system symbol\", default='SYS')\nparser.add_argument('--user-limit', metavar='', help=\"Max number of users. (0 = no limit)\", type=int, default=3000)\nparser.add_argument('--max-user-keys', metavar='', help=\"Maximum user keys to import into wallet\", type=int, default=10)\nparser.add_argument('--ram-funds', metavar='', help=\"How much funds for each user to spend on ram\", type=float, default=0.1)\nparser.add_argument('--min-stake', metavar='', help=\"Minimum stake before allocating unstaked funds\", type=float, default=0.9)\nparser.add_argument('--max-unstaked', metavar='', help=\"Maximum unstaked funds\", type=float, default=10)\nparser.add_argument('--producer-limit', metavar='', help=\"Maximum number of producers. (0 = no limit)\", type=int, default=0)\nparser.add_argument('--min-producer-funds', metavar='', help=\"Minimum producer funds\", type=float, default=1000.0000)\nparser.add_argument('--num-producers-vote', metavar='', help=\"Number of producers for which each user votes\", type=int, default=20)\nparser.add_argument('--num-voters', metavar='', help=\"Number of voters\", type=int, default=10)\nparser.add_argument('--num-senders', metavar='', help=\"Number of users to transfer funds randomly\", type=int, default=10)\nparser.add_argument('--producer-sync-delay', metavar='', help=\"Time (s) to sleep to allow producers to sync\", type=int, default=80)\nparser.add_argument('-a', '--all', action='store_true', help=\"Do everything marked with (*)\")\nparser.add_argument('-H', '--http-port', type=int, default=8000, metavar='', help='HTTP port for cldcc')\n\nfor (flag, command, function, inAll, help) in commands:\n prefix = ''\n if inAll: prefix += '*'\n if prefix: help = '(' + prefix + ') ' + help\n if flag:\n parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command)\n else:\n parser.add_argument('--' + command, action='store_true', help=help, dest=command)\n \nargs = parser.parse_args()\n\nargs.cldcc += '--url http://127.0.0.1:%d ' % args.http_port\n\nlogFile = open(args.log_path, 'a')\n\nlogFile.write('\\n\\n' + '*' * 80 + '\\n\\n\\n')\n\nwith open('accounts.json') as f:\n a = json.load(f)\n if args.user_limit:\n del a['users'][args.user_limit:]\n if args.producer_limit:\n del a['producers'][args.producer_limit:]\n firstProducer = len(a['users'])\n numProducers = len(a['producers'])\n accounts = a['users'] + a['producers']\n\nmaxClients = numProducers + 10\n\nhaveCommand = False\nfor (flag, command, function, inAll, help) in commands:\n if getattr(args, command) or inAll and args.all:\n if function:\n haveCommand = True\n function()\nif not haveCommand:\n print('bios-boot-tutorial.py: Tell me what to do. -a does almost everything. -h shows options.')\n" ]
[ [ "numpy.random.pareto" ] ]
11BP11/inverse_problems_GAN
[ "1d8ece55f7de1610b5481d39945b083a4ed3fcc0" ]
[ "problems/center_inpainting.py" ]
[ "\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom problems.problem import *\r\n\r\nname = \"center inpainting\"\r\n \r\ng_tf_info_placeholder = tf.placeholder(tf.float32, [None], name='g_transform_info')\r\n \r\ndef problem_loss(x_tformed, g_tformed):\r\n return tf.reduce_mean(tf.abs(x_tformed-g_tformed),[1,2,3])\r\n\r\ndef merge(g_output, x_tformed, g_tform_info):\r\n h, w = x_tformed.shape[1:3]\r\n h4, w4 = h//6, w//6\r\n merged = np.copy(x_tformed)\r\n merged[:,h4:h-h4,w4:w-w4,:] = g_output[:,h4:h-h4,w4:w-w4,:]\r\n return merged\r\n\r\ndef transform_tf(x, g_tf_info):\r\n not_x = - tf.ones_like(x, dtype=tf.float32)\r\n mask = np.ones(x.get_shape(), dtype=np.float32)\r\n mask0 = np.zeros(x.get_shape(), dtype=np.float32)\r\n mask = merge(mask0, mask, None)\r\n output = mask * x + (1-mask) * not_x\r\n return output\r\n\r\n \r\ndef transform(x, g_tf_info):\r\n not_x = - np.ones_like(x, dtype=np.float32)\r\n output = merge(not_x, x, None)\r\n return output\r\n \r\ndef create_tform_info(args):\r\n return [0]*args.batch_size\r\n\r\ndef safe_format(tformed):\r\n return np.clip(tformed,0,1)\r\n \r\n " ]
[ [ "tensorflow.placeholder", "tensorflow.ones_like", "numpy.ones_like", "numpy.copy", "tensorflow.abs", "numpy.clip" ] ]
nicoguertler/leibnizgym
[ "2c1cb14fbfece09644445d58fe7ac28c41611e5f" ]
[ "leibnizgym/envs/trifinger/sample.py" ]
[ "\"\"\"\n@author Mayank Mittal\n@email [email protected]\n@brief Defines sampling stratergies.\n\n# TODO: These functions are generic. Can put in leibnizgym.utils.torch_utils module.\n\"\"\"\n\n# leibnizgym\nfrom leibnizgym.utils.torch_utils import quaternion_from_euler_xyz\n# python\nfrom typing import Union, List, Tuple\nimport numpy as np\nimport torch\nimport torch.nn.functional\n\n\"\"\"\nSampling of cuboidal object\n\"\"\"\n\n\[email protected]\ndef random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)\"\"\"\n # sample radius of circle\n radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device))\n radius *= max_com_distance_to_center\n # sample theta of point\n theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)\n # x,y-position of the cube\n x = radius * torch.cos(theta)\n y = radius * torch.sin(theta)\n\n return x, y\n\n\[email protected]\ndef random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:\n \"\"\"Returns sampled height of the goal object.\"\"\"\n z = torch.rand(num, dtype=torch.float, device=device)\n z = (max_height - min_height) * z + min_height\n\n return z\n\n\[email protected]\ndef default_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns identity rotation transform.\"\"\"\n quat = torch.zeros((num, 4,), dtype=torch.float, device=device)\n quat[..., -1] = 1.0\n\n return quat\n\n\[email protected]\ndef random_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns sampled rotation in 3D as quaternion.\n Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html\n \"\"\"\n # sample random orientation from normal distribution\n quat = torch.randn((num, 4,), dtype=torch.float, device=device)\n # normalize the quaternion\n quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)\n\n return quat\n\[email protected]\ndef random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor:\n \"\"\"Samples a random angular velocity with standard deviation `magnitude_stdev`\"\"\"\n\n axis = torch.randn((num, 3,), dtype=torch.float, device=device)\n axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1)\n magnitude = torch.randn((num, 1,), dtype=torch.float, device=device)\n magnitude *= magnitude_stdev\n return magnitude * axis\n\[email protected]\ndef random_yaw_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns sampled rotation around z-axis.\"\"\"\n roll = torch.zeros(num, dtype=torch.float, device=device)\n pitch = torch.zeros(num, dtype=torch.float, device=device)\n yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)\n\n return quaternion_from_euler_xyz(roll, pitch, yaw)\n\n# EOF\n" ]
[ [ "torch.cos", "torch.randn", "torch.nn.functional.normalize", "torch.rand", "torch.norm", "torch.sin", "torch.zeros" ] ]
magne-max/zipline
[ "41172cd3a320806c4116bcfafa6a607fa300acde" ]
[ "zipline/pipeline/loaders/events.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom six import viewvalues\nfrom toolz import groupby, merge\n\nfrom .base import PipelineLoader\nfrom .frame import DataFrameLoader\nfrom zipline.pipeline.common import (\n EVENT_DATE_FIELD_NAME,\n SID_FIELD_NAME,\n TS_FIELD_NAME,\n)\nfrom zipline.pipeline.loaders.utils import (\n next_event_indexer,\n previous_event_indexer,\n)\n\n\ndef required_event_fields(next_value_columns, previous_value_columns):\n \"\"\"\n Compute the set of resource columns required to serve\n ``next_value_columns`` and ``previous_value_columns``.\n \"\"\"\n # These metadata columns are used to align event indexers.\n return {\n TS_FIELD_NAME,\n SID_FIELD_NAME,\n EVENT_DATE_FIELD_NAME,\n }.union(\n # We also expect any of the field names that our loadable columns\n # are mapped to.\n viewvalues(next_value_columns),\n viewvalues(previous_value_columns),\n )\n\n\ndef validate_column_specs(events, next_value_columns, previous_value_columns):\n \"\"\"\n Verify that the columns of ``events`` can be used by an EventsLoader to\n serve the BoundColumns described by ``next_value_columns`` and\n ``previous_value_columns``.\n \"\"\"\n required = {\n TS_FIELD_NAME,\n SID_FIELD_NAME,\n EVENT_DATE_FIELD_NAME,\n }.union(\n # We also expect any of the field names that our loadable columns\n # are mapped to.\n viewvalues(next_value_columns),\n viewvalues(previous_value_columns),\n )\n received = set(events.columns)\n missing = required - received\n if missing:\n raise ValueError(\n \"EventsLoader missing required columns {missing}.\\n\"\n \"Got Columns: {received}\\n\"\n \"Expected Columns: {required}\".format(\n missing=sorted(missing),\n received=sorted(received),\n required=sorted(required),\n )\n )\n\n\nclass EventsLoader(PipelineLoader):\n \"\"\"\n Base class for PipelineLoaders that supports loading the next and previous\n value of an event field.\n\n Does not currently support adjustments.\n\n Parameters\n ----------\n events : pd.DataFrame\n A DataFrame representing events (e.g. share buybacks or\n earnings announcements) associated with particular companies.\n\n ``events`` must contain at least three columns::\n sid : int64\n The asset id associated with each event.\n\n event_date : datetime64[ns]\n The date on which the event occurred.\n\n timestamp : datetime64[ns]\n The date on which we learned about the event.\n\n next_value_columns : dict[BoundColumn -> str]\n Map from dataset columns to raw field names that should be used when\n searching for a next event value.\n\n previous_value_columns : dict[BoundColumn -> str]\n Map from dataset columns to raw field names that should be used when\n searching for a previous event value.\n \"\"\"\n def __init__(self,\n events,\n next_value_columns,\n previous_value_columns):\n validate_column_specs(\n events,\n next_value_columns,\n previous_value_columns,\n )\n\n events = events[events[EVENT_DATE_FIELD_NAME].notnull()]\n\n # We always work with entries from ``events`` directly as numpy arrays,\n # so we coerce from a frame here.\n self.events = {\n name: np.asarray(series)\n for name, series in events.sort(EVENT_DATE_FIELD_NAME).iteritems()\n }\n\n # Columns to load with self.load_next_events.\n self.next_value_columns = next_value_columns\n\n # Columns to load with self.load_previous_events.\n self.previous_value_columns = previous_value_columns\n\n def split_next_and_previous_event_columns(self, requested_columns):\n \"\"\"\n Split requested columns into columns that should load the next known\n value and columns that should load the previous known value.\n\n Parameters\n ----------\n requested_columns : iterable[BoundColumn]\n\n Returns\n -------\n next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]\n ``requested_columns``, partitioned into sub-sequences based on\n whether the column should produce values from the next event or the\n previous event\n \"\"\"\n def next_or_previous(c):\n if c in self.next_value_columns:\n return 'next'\n elif c in self.previous_value_columns:\n return 'previous'\n\n raise ValueError(\n \"{c} not found in next_value_columns \"\n \"or previous_value_columns\".format(c=c)\n )\n groups = groupby(next_or_previous, requested_columns)\n return groups.get('next', ()), groups.get('previous', ())\n\n def next_event_indexer(self, dates, sids):\n return next_event_indexer(\n dates,\n sids,\n self.events[EVENT_DATE_FIELD_NAME],\n self.events[TS_FIELD_NAME],\n self.events[SID_FIELD_NAME],\n )\n\n def previous_event_indexer(self, dates, sids):\n return previous_event_indexer(\n dates,\n sids,\n self.events[EVENT_DATE_FIELD_NAME],\n self.events[TS_FIELD_NAME],\n self.events[SID_FIELD_NAME],\n )\n\n def load_next_events(self, columns, dates, sids, mask):\n if not columns:\n return {}\n\n return self._load_events(\n name_map=self.next_value_columns,\n indexer=self.next_event_indexer(dates, sids),\n columns=columns,\n dates=dates,\n sids=sids,\n mask=mask,\n )\n\n def load_previous_events(self, columns, dates, sids, mask):\n if not columns:\n return {}\n\n return self._load_events(\n name_map=self.previous_value_columns,\n indexer=self.previous_event_indexer(dates, sids),\n columns=columns,\n dates=dates,\n sids=sids,\n mask=mask,\n )\n\n def _load_events(self, name_map, indexer, columns, dates, sids, mask):\n def to_frame(array):\n return pd.DataFrame(array, index=dates, columns=sids)\n\n out = {}\n for c in columns:\n raw = self.events[name_map[c]][indexer]\n # indexer will be -1 for locations where we don't have a known\n # value.\n raw[indexer < 0] = c.missing_value\n\n # Delegate the actual array formatting logic to a DataFrameLoader.\n loader = DataFrameLoader(c, to_frame(raw), adjustments=None)\n out[c] = loader.load_adjusted_array([c], dates, sids, mask)[c]\n return out\n\n def load_adjusted_array(self, columns, dates, sids, mask):\n n, p = self.split_next_and_previous_event_columns(columns)\n return merge(\n self.load_next_events(n, dates, sids, mask),\n self.load_previous_events(p, dates, sids, mask),\n )\n" ]
[ [ "pandas.DataFrame", "numpy.asarray" ] ]
gmtpritam/stolgo
[ "8ced9b4c3ea2b0a89c929c2d2765ebc8593d00b2" ]
[ "lib/stolgo/nasdaq.py" ]
[ "import requests\nimport io\n\nfrom datetime import timedelta\nimport pandas as pd\n\nfrom stolgo.helper import get_date_range,get_formated_dateframe\nfrom stolgo.request import RequestUrl,Curl\n\n#default params for url connection\nDEFAULT_TIMEOUT = 5 # seconds\nMAX_RETRIES = 2\n#default periods\nDEFAULT_DAYS = 250\n\nclass NasdaqUrls:\n def __init__(self):\n self.STK_DATA_PRE_URL = r\"https://www.nasdaq.com/api/v1/historical/\"\n self.date_formats = {\"stock_data\":\"%Y-%m-%d\"}\n\n #historical data header\n self.header = {\n \"authority\":\"www.nasdaq.com\",\n \"method\":\"GET\",\n \"path\":\"/market-activity/stocks/aapl/historical\",\n \"scheme\":\"https\",\n \"accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"accept-encoding\":\"gzip, deflate, br\",\n \"accept-language\":\"en-GB,en-US;q=0.9,en;q=0.8\",\n \"cache-control\":\"max-age=0\",\n \"referer\":\"https://www.nasdaq.com/market-activity/quotes/historical\",\n \"sec-fetch-dest\":\"document\",\n \"sec-fetch-mode\":\"navigate\",\n \"sec-fetch-site\":\"same-origin\",\n \"sec-fetch-user\":\"?1\",\n \"upgrade-insecure-requests\":\"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\"\n }\n\n def get_data_url(self,symbol,start,end):\n try:\n start = start.strftime(self.date_formats[\"stock_data\"])\n end = end.strftime(self.date_formats[\"stock_data\"])\n url = self.STK_DATA_PRE_URL + symbol + r\"/stocks/\" + start + r\"/\" + end\n return url\n except Exception as err:\n raise Exception(\"Error occurred in URL constructions \", str(err))\n\nclass Nasdaq:\n \"\"\"Nasdaq class to get data from nasdaq\n \"\"\"\n def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES,cloud_mode=False):\n if cloud_mode:\n self.requests = Curl(timeout,max_retries)\n else:\n self.requests = RequestUrl(timeout,max_retries)\n self.nasdaq_url = NasdaqUrls()\n\n def __get_data_adjusted(self,dfs,symbol,start=None,end=None,periods=None):\n if periods and (dfs.shape[0] < periods):\n new_periods = periods - dfs.shape[0]\n try:\n s_from = e_till = None\n #if only start, find till today\n if start and (not end):\n s_from = dfs.index[0] + timedelta(1)\n e_till = None\n #if not start, can go to past\n elif((end and (not start)) or periods):\n s_from = None\n e_till = dfs.index[-1] - timedelta(1)\n except IndexError as err:\n raise Exception(\"Nasdaq Access error.\")\n except Exception as exc:\n raise Exception(\"Nasdaq data error: \",str(exc))\n try:\n dfs_new = self.get_data(symbol,start = s_from,end = e_till,periods = new_periods)\n dfs = self.__join_dfs(dfs,dfs_new).sort_index(ascending=False)\n except Exception as exc:\n #Small part of data may not be available\n pass\n return dfs\n\n def __join_dfs(self,join,joiner):\n \"\"\"will append joiner to join for oi_dfs\n\n :param join: df which will be appended\n :type join: pandas.DataFrame\n :param joiner: df which we want to append\n :type joiner: pandas.DataFrame\n :return: merged data frame\n :rtype: pandas.DataFrame\n \"\"\"\n return join.append(joiner)\n\n def get_data(self,symbol,start=None,end=None,periods=None,dayfirst=False):\n \"\"\"get_data API to fetch data from nasdaq\n\n :param symbol: stock symbol\n :type symbol: string\n :param start: start date, defaults to None\n :type start: string, optional\n :param end: end date, defaults to None\n :type end: string, optional\n :param periods: number of days, defaults to None\n :type periods: integer, optional\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :raises ValueError: for invalid inputs\n :raises Exception: incase if no data found\n :return: stock data\n :rtype: pandas.DataFrame\n \"\"\"\n try:\n #Step1: get the date range\n s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)\n\n if s_from > e_till:\n raise ValueError(\"End should grater than start.\")\n\n url = self.nasdaq_url.get_data_url(symbol=symbol,start=s_from,end=e_till)\n res = self.requests.get(url,headers=self.nasdaq_url.header)\n\n try:\n dfs = pd.read_csv(io.StringIO(res.content.decode('utf-8')))\n except Exception as err:\n #increase data range, nasdaq not returning for small set\n if e_till == get_formated_dateframe():\n raise Exception(\"Nasdaq not retruning data for this date range.\\\n Please, retry with other date ranges\")\n e_till = get_formated_dateframe()\n if (e_till - s_from).days < DEFAULT_DAYS:\n s_from = e_till - DEFAULT_DAYS\n dfs = self.get_data(symbol,start=s_from,end=e_till)\n\n dfs.set_index(\"Date\",inplace=True)\n #convert to datetime\n dfs.index = pd.to_datetime(dfs.index)\n dfs = self.__get_data_adjusted(dfs,symbol,start=start,end=end,periods=periods)\n return dfs\n except Exception as err:\n raise Exception(\"Error occurred while getting data :\", str(err))" ]
[ [ "pandas.to_datetime" ] ]
guoyang328/pytorch-dann
[ "1971cf1a7b9ecadc17932a8ecb3f0c34609751a3" ]
[ "datasets/mnist.py" ]
[ "\"\"\"Dataset setting and data loader for MNIST.\"\"\"\n\n\nimport torch\nfrom torchvision import datasets, transforms\nimport os\n\ndef get_mnist(dataset_root, batch_size, train):\n \"\"\"Get MNIST datasets loader.\"\"\"\n # image pre-processing\n pre_process = transforms.Compose([transforms.Resize(28), # different img size settings for mnist(28), usps(16) and svhn(32).\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.1307], # Mean of MNIST train data\n std=[0.3015] # std of MNIST train data\n )])\n\n # datasets and data loader\n mnist_dataset = datasets.MNIST(root=os.path.join(dataset_root),\n train=train,\n transform=pre_process,\n download=True)\n\n\n mnist_data_loader = torch.utils.data.DataLoader(\n dataset=mnist_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=8)\n\n return mnist_data_loader" ]
[ [ "torch.utils.data.DataLoader" ] ]
heiseApple/learn2learn
[ "df3c3291b4681440a80a69a7815090a4bd3cd661" ]
[ "examples/text/news_topic_classification.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport random\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\nimport learn2learn as l2l\n\n\nclass Net(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, num_classes, input_dim=768, inner_dim=200, pooler_dropout=0.3):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.activation_fn = nn.ReLU()\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, x, **kwargs):\n x = self.dropout(x)\n x = self.dense(x)\n x = self.activation_fn(x)\n x = self.dropout(x)\n x = F.log_softmax(self.out_proj(x), dim=1)\n return x\n\n\ndef accuracy(predictions, targets):\n predictions = predictions.argmax(dim=1)\n acc = (predictions == targets).sum().float()\n acc /= len(targets)\n return acc.item()\n\n\ndef collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n size = max(v.size(0) for v in values)\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\nclass _BatchedDataset(torch.utils.data.Dataset):\n def __init__(self, batched):\n self.sents = [s for s in batched[0]]\n self.ys = [y for y in batched[1]]\n \n def __len__(self):\n return len(self.ys)\n \n def __getitem__(self, idx):\n return (self.sents[idx], self.ys[idx])\n\n\ndef compute_loss(task, roberta, device, learner, loss_func, batch=15):\n loss = 0.0\n acc = 0.0\n for i, (x, y) in enumerate(torch.utils.data.DataLoader(\n _BatchedDataset(task), batch_size=batch, shuffle=True, num_workers=0)):\n # RoBERTa ENCODING\n x = collate_tokens([roberta.encode(sent) for sent in x], pad_idx=1)\n with torch.no_grad():\n x = roberta.extract_features(x)\n x = x[:, 0, :]\n\n # Moving to device\n x, y = x.to(device), y.view(-1).to(device)\n\n output = learner(x)\n curr_loss = loss_func(output, y)\n acc += accuracy(output, y)\n loss += curr_loss / len(task)\n loss /= len(task)\n return loss, acc\n\n\ndef main(lr=0.005, maml_lr=0.01, iterations=1000, ways=5, shots=1, tps=32, fas=5, device=torch.device(\"cpu\"),\n download_location=\"/tmp/text\"):\n dataset = l2l.text.datasets.NewsClassification(root=download_location, download=True)\n dataset = l2l.data.MetaDataset(dataset)\n\n classes = list(range(len(dataset.labels))) # 41 classes\n random.shuffle(classes)\n\n train_dataset, validation_dataset, test_dataset = dataset, dataset, dataset\n\n train_gen = l2l.data.TaskDataset(\n train_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n train_dataset, n=ways, k=shots, filter_labels=classes[:20]),\n l2l.data.transforms.LoadData(train_dataset),\n l2l.data.transforms.RemapLabels(train_dataset)],)\n\n validation_gen = l2l.data.TaskDataset(\n validation_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n validation_dataset, n=ways, k=shots, filter_labels=classes[20:30]),\n l2l.data.transforms.LoadData(validation_dataset),\n l2l.data.transforms.RemapLabels(validation_dataset)],)\n\n test_gen = l2l.data.TaskDataset(\n test_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n test_dataset, n=ways, k=shots, filter_labels=classes[30:]),\n l2l.data.transforms.LoadData(test_dataset),\n l2l.data.transforms.RemapLabels(test_dataset)],)\n\n torch.hub.set_dir(download_location)\n roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')\n roberta.eval()\n roberta.to(device)\n model = Net(num_classes=ways)\n model.to(device)\n meta_model = l2l.algorithms.MAML(model, lr=maml_lr)\n opt = optim.Adam(meta_model.parameters(), lr=lr)\n loss_func = nn.NLLLoss(reduction=\"sum\")\n\n tqdm_bar = tqdm(range(iterations))\n\n accs = []\n for _ in tqdm_bar:\n iteration_error = 0.0\n iteration_acc = 0.0\n for _ in range(tps):\n learner = meta_model.clone()\n train_task, valid_task = train_gen.sample(), validation_gen.sample()\n\n # Fast Adaptation\n for _ in range(fas):\n train_error, _ = compute_loss(train_task, roberta, device, learner, loss_func, batch=shots * ways)\n learner.adapt(train_error)\n\n # Compute validation loss\n valid_error, valid_acc = compute_loss(valid_task, roberta, device, learner, loss_func,\n batch=shots * ways)\n iteration_error += valid_error\n iteration_acc += valid_acc\n\n iteration_error /= tps\n iteration_acc /= tps\n tqdm_bar.set_description(\"Loss : {:.3f} Acc : {:.3f}\".format(iteration_error.item(), iteration_acc))\n accs.append(iteration_acc)\n # Take the meta-learning step\n opt.zero_grad()\n iteration_error.backward()\n opt.step()\n print (f'first and best validation accuracy: {accs[0]:.4f}, {max(accs):.4f}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn2Learn Text Classification Example')\n\n parser.add_argument('--ways', type=int, default=5, metavar='N',\n help='number of ways (default: 5)')\n parser.add_argument('--shots', type=int, default=1, metavar='N',\n help='number of shots (default: 1)')\n parser.add_argument('-tps', '--tasks-per-step', type=int, default=32, metavar='N',\n help='tasks per step (default: 32)')\n parser.add_argument('-fas', '--fast-adaption-steps', type=int, default=5, metavar='N',\n help='steps per fast adaption (default: 5)')\n\n parser.add_argument('--iterations', type=int, default=1000, metavar='N',\n help='number of iterations (default: 1000)')\n\n parser.add_argument('--lr', type=float, default=0.005, metavar='LR',\n help='learning rate (default: 0.005)')\n parser.add_argument('--maml-lr', type=float, default=0.01, metavar='LR',\n help='learning rate for MAML (default: 0.01)')\n\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n parser.add_argument('--download-location', type=str, default=\"/tmp/text\", metavar='S',\n help='download location for train data and roberta(default : /tmp/text')\n\n args = parser.parse_args()\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n main(lr=args.lr, maml_lr=args.maml_lr, iterations=args.iterations, ways=args.ways, shots=args.shots,\n tps=args.tasks_per_step, fas=args.fast_adaption_steps, device=device,\n download_location=args.download_location)\n" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Linear", "torch.manual_seed", "torch.no_grad", "torch.nn.ReLU", "torch.cuda.is_available", "torch.hub.load", "torch.hub.set_dir", "torch.device", "torch.nn.Dropout" ] ]
taqtiqa-mark/pymarket
[ "2f8db92010d5f9407a72941788500351e92cbe81" ]
[ "pymarket/bids/demand_curves.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom typing import Tuple, Union\nfrom pymarket.bids import BidManager\n\n\ndef demand_curve_from_bids(\n bids: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Creates a demand curve from a set of buying bids.\n It is the inverse cumulative distribution of quantity\n as a function of price.\n\n Parameters\n ----------\n bids\n Collection of all the bids in the market. The algorithm\n filters only the buying bids.\n\n Returns\n ---------\n demand_curve: np.ndarray\n Stepwise constant demand curve represented as a collection\n of the N rightmost points of each interval (N-1 bids). It is stored\n as a (N, 2) matrix where the first column is the x-coordinate\n and the second column is the y-coordinate.\n An extra point is a))dded with x coordinate at infinity and\n price at 0 to represent the end of the curve.\n\n index : np.ndarray\n The order of the identifier of each bid in the demand\n curve.\n\n Examples\n ---------\n\n A minimal example, selling bid is ignored:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 1, 0, buying=True)\n 0\n >>> bm.add_bid(1, 1, 1, buying=False)\n 1\n >>> dc, index = pm.demand_curve_from_bids(bm.get_df())\n >>> dc\n array([[ 1., 1.],\n [inf, 0.]])\n >>> index\n array([0])\n\n A larger example with reordering of bids:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 1, 0, buying=True)\n 0\n >>> bm.add_bid(1, 1, 1, buying=False)\n 1\n >>> bm.add_bid(3, 0.5, 2, buying=True)\n 2\n >>> bm.add_bid(2.3, 0.1, 3, buying=True)\n 3\n >>> dc, index = pm.demand_curve_from_bids(bm.get_df())\n >>> dc\n array([[1. , 1. ],\n [4. , 0.5],\n [6.3, 0.1],\n [inf, 0. ]])\n >>> index\n array([0, 2, 3])\n\n \"\"\"\n buying = bids[bids.buying]\n buying = buying.sort_values('price', ascending=False)\n buying['acum'] = buying.quantity.cumsum()\n demand_curve = buying[['acum', 'price']].values\n demand_curve = np.vstack([demand_curve, [np.inf, 0]])\n index = buying.index.values\n return demand_curve, index\n\n\ndef supply_curve_from_bids(\n bids: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Creates a supply curve from a set of selling bids.\n It is the cumulative distribution of quantity\n as a function of price.\n\n Parameters\n ----------\n bids\n Collection of all the bids in the market. The algorithm\n filters only the selling bids.\n\n Returns\n ---------\n supply_curve: np.ndarray\n Stepwise constant demand curve represented as a collection\n of the N rightmost points of each interval (N-1 bids). It is stored\n as a (N, 2) matrix where the first column is the x-coordinate\n and the second column is the y-coordinate.\n An extra point is added with x coordinate at infinity and\n price at infinity to represent the end of the curve.\n\n index : np.ndarray\n The order of the identifier of each bid in the supply\n curve.\n\n Examples\n ---------\n\n A minimal example, selling bid is ignored:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 3, 0, False)\n 0\n >>> bm.add_bid(2.1, 3, 3, True)\n 1\n >>> sc, index = pm.supply_curve_from_bids(bm.get_df())\n >>> sc\n array([[ 1., 3.],\n [inf, inf]])\n >>> index\n array([0])\n\n A larger example with reordering:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 3, 0, False)\n 0\n >>> bm.add_bid(2.1, 3, 3, True)\n 1\n >>> bm.add_bid(0.2, 1, 3, False)\n 2\n >>> bm.add_bid(1.7, 6, 4, False)\n 3\n >>> sc, index = pm.supply_curve_from_bids(bm.get_df())\n >>> sc\n array([[0.2, 1. ],\n [1.2, 3. ],\n [2.9, 6. ],\n [inf, inf]])\n >>> index\n array([2, 0, 3])\n\n\n \"\"\"\n selling = bids[bids.buying == False]\n selling = selling.sort_values('price')\n selling['acum'] = selling.quantity.cumsum()\n supply_curve = selling[['acum', 'price']].values\n supply_curve = np.vstack([supply_curve, [np.inf, np.inf]])\n index = selling.index.values\n return supply_curve, index\n\n\ndef get_value_stepwise(x: float, f: np.ndarray) -> Union[float, None]:\n \"\"\"\n Returns the value of a stepwise constant\n function defined by the right extrems\n of its interval\n Functions are assumed to be defined\n in (0, inf).\n\n Parameters\n ----------\n x\n Value in which the function is to be\n evaluated\n f\n Stepwise function represented as a 2 column\n matrix. Each row is the rightmost extreme\n point of each constant interval. The first column\n contains the x coordinate and is sorted increasingly.\n f is assumed to be defined only in the interval\n :math: (0, \\infty)\n Returns\n --------\n float or None\n The image of x under f: `f(x)`. If `x` is negative,\n then None is returned instead. If x is outside\n the range of the function (greater than `f[-1, 0]`),\n then the method returns None.\n\n Examples\n ---------\n >>> f = np.array([\n ... [1, 1],\n ... [3, 4]])\n >>> [pm.get_value_stepwise(x, f)\n ... for x in [-1, 0, 0.5, 1, 2, 3, 4]]\n [None, 1, 1, 1, 4, 4, None]\n\n \"\"\"\n if x < 0:\n return None\n\n for step in f:\n if x <= step[0]:\n return step[1]\n\n\ndef intersect_stepwise(\n f: np.ndarray,\n g: np.ndarray,\n k: float=0.5\n ) -> Tuple[\n Union[float, None],\n Union[int, None],\n Union[int, None],\n float]:\n \"\"\"\n Finds the intersection of\n two stepwise constants functions\n where f is assumed to be bigger at 0\n than g.\n If no intersection is found, None is returned.\n\n Parameters\n ----------\n f\n Stepwise constant function represented as\n a 2 column matrix where each row is the rightmost\n point of the constat interval. The first column\n is sorted increasingly.\n Preconditions: f is non-increasing.\n\n g\n Stepwise constant function represented as\n a 2 column matrix where each row is the rightmost\n point of the constat interval. The first column\n is sorted increasingly.\n Preconditions: g is non-decreasing and\n `f[0, 0] > g[0, 0]`\n k\n If the intersection is empty or an interval,\n a convex combination of the y-values of f and g\n will be returned and k will be used to determine\n hte final value. `k=1` will be the value of g\n while `k=0` will be the value of f.\n\n Returns\n --------\n x_ast : float or None\n Axis coordinate of the intersection of both\n functions. If the intersection is empty,\n then it returns None.\n f_ast : int or None\n Index of the rightmost extreme\n of the interval of `f` involved in the\n intersection. If the intersection is\n empty, returns None\n g_ast : int or None\n Index of the rightmost extreme\n of the interval of `g` involved in the\n intersection. If the intersection is\n empty, returns None.\n v : float or None\n Ordinate of the intersection if it\n is uniquely identified, otherwise\n the k-convex combination of the\n y values of `f` and `g` in the last\n point when they were both defined.\n\n Examples\n ---------\n Simple intersection with diferent domains\n\n >>> f = np.array([[1, 3], [3, 1]])\n >>> g = np.array([[2,2]])\n >>> pm.intersect_stepwise(f, g)\n (1, 0, 0, 2)\n\n Empty intersection, returning the middle value\n\n >>> f = np.array([[1,3], [2, 2.5]])\n >>> g = np.array([[1,1], [2, 2]])\n >>> pm.intersect_stepwise(f, g)\n (None, None, None, 2.25)\n \"\"\"\n x_max = np.min([f.max(axis=0)[0], g.max(axis=0)[0]])\n xs = sorted([x for x in set(g[:, 0]).union(set(f[:, 0])) if x <= x_max])\n fext = [get_value_stepwise(x, f) for x in xs]\n gext = [get_value_stepwise(x, g) for x in xs]\n x_ast = None\n for i in range(len(xs) - 1):\n if (fext[i] > gext[i]) and (fext[i + 1] < gext[i + 1]):\n x_ast = xs[i]\n\n f_ast = np.argmax(f[:, 0] >= x_ast) if x_ast is not None else None\n g_ast = np.argmax(g[:, 0] >= x_ast) if x_ast is not None else None\n\n g_val = g[g_ast, 1] if g_ast is not None else get_value_stepwise(xs[-1], g)\n f_val = f[f_ast, 1] if f_ast is not None else get_value_stepwise(xs[-1], f)\n\n intersect_domain_both = x_ast in f[:, 0] and x_ast in g[:, 0]\n if not (intersect_domain_both) and (x_ast is not None):\n v = g_val if x_ast in f[:, 0] else f_val\n else:\n v = g_val * k + (1 - k) * f_val\n\n return x_ast, f_ast, g_ast, v\n" ]
[ [ "numpy.vstack", "numpy.argmax" ] ]
ethanjli/liquid-handling-robotics
[ "999ab03c225b4c5382ab9fcac6a4988d0c232c67" ]
[ "lhrhost/robot/axes.py" ]
[ "\"\"\"Abstractions for the axes of a liquid-handling robot.\"\"\"\n\n# Standard imports\nimport logging\nfrom abc import abstractmethod\n\n# Local package imiports\nfrom lhrhost.protocol.linear_actuator import Receiver as LinearActuatorReceiver\nfrom lhrhost.util.containers import add_to_tree, get_from_tree\nfrom lhrhost.util.files import load_from_json, save_to_json\nfrom lhrhost.util.interfaces import InterfaceClass\n\n# External imports\nimport scipy.stats as stats\n\n# Logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass RobotAxis(LinearActuatorReceiver, metaclass=InterfaceClass):\n \"\"\"High-level controller mixin interface for axes with physical position units.\"\"\"\n\n @property\n @abstractmethod\n def protocol(self):\n \"\"\"Return the associated linear actuator protocol.\"\"\"\n return None\n\n @abstractmethod\n def physical_to_sensor(self, physical_position):\n \"\"\"Convert a position in physical units to a unitless sensor position.\"\"\"\n pass\n\n @abstractmethod\n def sensor_to_physical(self, sensor_position):\n \"\"\"Convert a unitless sensor position to a position in physical units.\"\"\"\n pass\n\n @property\n @abstractmethod\n def physical_unit(self):\n \"\"\"Return a string representation of the physical units.\"\"\"\n pass\n\n def load_tunings_json(self, json_path=None):\n \"\"\"Load localized controller tunings from the provided JSON file path.\n\n Default path: 'calibrations/{}_tunings.json' where {} is replaced with\n the axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_tunings.json'.format(self.name)\n trees = load_from_json(json_path)\n self.default_tuning = trees['default']\n self.target_position_tunings = trees['target positions']\n return trees\n\n def save_tunings_json(self, json_path=None):\n \"\"\"Save a localized controller tunings tree to the provided JSON file path.\"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_tunings.json'.format(self.name)\n save_to_json({\n 'default': self.default_tuning,\n 'target positions': self.target_position_tunings\n }, json_path)\n\n async def go_to_sensor_position(\n self, sensor_position, apply_tunings=True, restore_tunings=True\n ):\n \"\"\"Go to the specified sensor position.\n\n Returns the final sensor position.\n \"\"\"\n if apply_tunings:\n current_tuning = self.default_tuning\n for tuning in self.target_position_tunings:\n if sensor_position >= tuning['min'] and sensor_position < tuning['max']:\n current_tuning = tuning\n else:\n logger.debug(\n 'PID tunings for sensor position {} unspecified, using defaults.'\n .format(int(sensor_position))\n )\n kp = current_tuning['pid']['kp']\n kd = current_tuning['pid']['kd']\n motor_limits = current_tuning['limits']['motor']\n duty_forwards_max = motor_limits['forwards']['max']\n duty_forwards_min = motor_limits['forwards']['min']\n duty_backwards_max = motor_limits['backwards']['max']\n duty_backwards_min = motor_limits['backwards']['min']\n (prev_kp, prev_kd, prev_ki) = await self.set_pid_gains(kp=kp, kd=kd)\n (\n prev_duty_forwards_max, prev_duty_forwards_min,\n prev_duty_backwards_max, prev_duty_backwards_min\n ) = await self.set_motor_limits(\n forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,\n backwards_max=duty_backwards_max, backwards_min=duty_backwards_min\n )\n await self.protocol.feedback_controller.request_complete(\n int(sensor_position)\n )\n if apply_tunings and restore_tunings:\n await self.set_pid_gains(kp=prev_kp, kd=prev_kd, ki=prev_ki)\n await self.set_motor_limits(\n forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,\n backwards_max=duty_backwards_max, backwards_min=duty_backwards_min\n )\n return self.protocol.position.last_response_payload\n\n async def go_to_low_end_position(self, speed=None):\n \"\"\"Go to the lowest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n if speed is None:\n speed = (\n self.protocol.feedback_controller.limits.motor\n .backwards.high.last_response_payload\n )\n await self.protocol.motor.request_complete(speed)\n await self.protocol.position.request()\n return self.protocol.position.last_response_payload\n\n async def go_to_high_end_position(self, speed=None):\n \"\"\"Go to the highest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n if speed is None:\n speed = (\n self.protocol.feedback_controller.limits.motor\n .forwards.high.last_response_payload\n )\n await self.protocol.motor.request_complete(speed)\n await self.protocol.position.request()\n return self.protocol.position.last_response_payload\n\n async def go_to_physical_position(self, physical_position):\n \"\"\"Go to the specified physical position.\n\n Returns the final physical position.\n \"\"\"\n sensor_position = self.physical_to_sensor(physical_position)\n sensor_position = await self.go_to_sensor_position(sensor_position)\n return self.sensor_to_physical(sensor_position)\n\n async def move_by_sensor_delta(self, sensor_delta):\n \"\"\"Go forwards/backwards by the specified sensor displacement.\n\n Returns the final physical displacement.\n \"\"\"\n position = await self.sensor_position\n target_position = position + sensor_delta\n final_position = await self.go_to_sensor_position(target_position)\n return final_position - position\n\n async def move_by_physical_delta(self, physical_delta):\n \"\"\"Go forwards/backwards by the specified physical displacement.\n\n Returns the final physical displacement.\n \"\"\"\n position = await self.physical_position\n target_position = position + physical_delta\n final_position = await self.go_to_physical_position(target_position)\n return final_position - position\n\n async def wait_until_initialized(self):\n \"\"\"Wait until the axis is ready to control.\"\"\"\n await self.protocol.initialized.wait()\n await self.protocol.position.initialized.wait()\n await self.protocol.motor.initialized.wait()\n\n async def synchronize_values(self):\n \"\"\"Request the values of all channels.\"\"\"\n await self.protocol.request_all()\n\n @property\n def name(self):\n \"\"\"Return the name of the axis.\"\"\"\n return self.protocol.node_name\n\n @property\n def last_position_limits(self):\n \"\"\"Get the last received position limits of the axis.\"\"\"\n return (\n self.protocol.feedback_controller.limits.position.low.last_response_payload,\n self.protocol.feedback_controller.limits.position.high.last_response_payload\n )\n\n @property\n async def sensor_position(self):\n \"\"\"Get the current sensor position of the axis.\"\"\"\n await self.protocol.position.request()\n return self.last_sensor_position\n\n @property\n def last_sensor_position(self):\n \"\"\"Get the last received sensor position of the axis.\"\"\"\n return self.protocol.position.last_response_payload\n\n @property\n async def physical_position(self):\n \"\"\"Get the current physical position of the axis.\"\"\"\n await self.protocol.position.request()\n return self.last_physical_position\n\n @property\n def last_physical_position(self):\n \"\"\"Get the last received physical position of the axis.\"\"\"\n return self.sensor_to_physical(self.last_sensor_position)\n\n async def set_pid_gains(self, kp=None, kd=None, ki=None, floating_point=True):\n \"\"\"Set values for the PID gains whose values are specified.\n\n Returns the previous values of the gains.\n \"\"\"\n pid_protocol = self.protocol.feedback_controller.pid\n prev_kp = pid_protocol.kp.last_response_payload\n prev_kd = pid_protocol.kd.last_response_payload\n prev_ki = pid_protocol.ki.last_response_payload\n if kp is not None and prev_kp != int(kp * 100 if floating_point else kp):\n await pid_protocol.kp.request(int(kp * 100 if floating_point else kp))\n if kd is not None and prev_kd != int(kd * 100 if floating_point else kp):\n await pid_protocol.kd.request(int(kd * 100 if floating_point else kp))\n if ki is not None and prev_ki != int(ki * 100 if floating_point else kp):\n await pid_protocol.ki.request(int(ki * 100 if floating_point else kp))\n return (\n prev_kp / 100 if floating_point else prev_kp,\n prev_kd / 100 if floating_point else prev_kd,\n prev_ki / 100 if floating_point else prev_ki\n )\n\n async def set_motor_limits(\n self, forwards_max=None, forwards_min=None, backwards_max=None, backwards_min=None\n ):\n \"\"\"Set values for the motor duty cycle limits where specified.\n\n Returns the previous values of the limits.\n \"\"\"\n limits_protocol = self.protocol.feedback_controller.limits.motor\n prev_forwards_max = limits_protocol.forwards.high.last_response_payload\n prev_forwards_min = limits_protocol.forwards.low.last_response_payload\n prev_backwards_max = -limits_protocol.backwards.high.last_response_payload\n prev_backwards_min = -limits_protocol.backwards.low.last_response_payload\n if forwards_max is not None and prev_forwards_max != int(forwards_max):\n await limits_protocol.forwards.high.request(int(forwards_max))\n if forwards_min is not None and prev_forwards_min != int(forwards_min):\n await limits_protocol.forwards.high.request(int(forwards_min))\n if backwards_max is not None and prev_backwards_max != int(backwards_max):\n await limits_protocol.backwards.high.request(int(-backwards_max))\n if backwards_min is not None and prev_backwards_min != int(backwards_min):\n await limits_protocol.backwards.high.request(int(-backwards_min))\n return (\n prev_forwards_max, prev_forwards_min,\n prev_backwards_max, prev_backwards_min\n )\n\n\nclass ContinuousRobotAxis(RobotAxis):\n \"\"\"High-level controller mixin interface for axes with continuous positions.\n\n Assumes a linear transformation exists between sensor and physical positions.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize member variables.\"\"\"\n super().__init__()\n self._calibration_samples = []\n self.linear_regression = None\n\n def clear_calibration_samples(self):\n \"\"\"Discard the stored calibration data.\"\"\"\n self._calibration_samples = []\n self.linear_regression = None\n\n def add_calibration_sample(self, sensor_position, physical_position):\n \"\"\"Add a (sensor, physical) position pair for calibration.\"\"\"\n self.linear_regression = None\n self._calibration_samples.append((sensor_position, physical_position))\n\n def fit_calibration_linear(self):\n \"\"\"Perform a linear regression on the calibration data and store results.\n\n Returns the regression slope, intercept, R-value, and standard error.\n The regression is for physical_position = slope * sensor_position + intercept.\n \"\"\"\n linear_regression = stats.linregress(self._calibration_samples)\n self.linear_regression = [\n linear_regression[0], linear_regression[1],\n linear_regression[2], linear_regression[4]\n ]\n return self.linear_regression\n\n @property\n def calibration_data(self):\n \"\"\"Return a JSON-exportable structure of calibration data.\"\"\"\n calibration_data = {\n 'parameters': {\n 'slope': self.linear_regression[0],\n 'intercept': self.linear_regression[1],\n 'rsquared': self.linear_regression[2],\n 'stderr': self.linear_regression[3]\n },\n 'physical unit': self.physical_unit,\n 'samples': [\n {\n 'sensor': calibration_sample[0],\n 'physical': calibration_sample[1]\n }\n for calibration_sample in self._calibration_samples\n ]\n }\n return calibration_data\n\n def load_calibration(self, calibration_data):\n \"\"\"Load a calibration from the provided calibration data structure.\"\"\"\n self._calibration_samples = [\n (calibration_sample['sensor'], calibration_sample['physical'])\n for calibration_sample in calibration_data['samples']\n ]\n self.fit_calibration_linear()\n\n def load_calibration_json(self, json_path=None):\n \"\"\"Load a calibration from a provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_physical.json'.format(self.name)\n self.load_calibration(load_from_json(json_path))\n\n def save_calibration_json(self, json_path=None):\n \"\"\"Save the calibration to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_physical.json'.format(self.name)\n save_to_json(self.calibration_data, json_path)\n\n @property\n def sensor_to_physical_scaling(self):\n \"\"\"Return the scaling factor from sensor to physical positions.\"\"\"\n if self.linear_regression is None:\n self._fit_calibration_linear()\n return self.linear_regression[0]\n\n @property\n def sensor_to_physical_offset(self):\n \"\"\"Return the post-scaling offset from sensor to physical positions.\"\"\"\n if self.linear_regression is None:\n self._fit_calibration_linear()\n return self.linear_regression[1]\n\n # Implement RobotAxis\n\n def physical_to_sensor(self, physical_position):\n \"\"\"Convert a position in physical units to a unitless integer sensor position.\"\"\"\n return (\n (physical_position - self.sensor_to_physical_offset) /\n self.sensor_to_physical_scaling\n )\n\n def sensor_to_physical(self, sensor_position):\n \"\"\"Convert a unitless sensor position to a position in physical units.\"\"\"\n return (\n self.sensor_to_physical_scaling * sensor_position +\n self.sensor_to_physical_offset\n )\n\n\nclass PresetRobotAxis(RobotAxis):\n \"\"\"High-level controller mixin for axes with preset positions.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize member variables.\"\"\"\n super().__init__()\n self.preset_sensor_position_tree = {}\n self.preset_physical_position_tree = {}\n self.current_preset_position = None\n\n def set_preset_sensor_position(self, preset_position, sensor_position):\n \"\"\"Associate a preset position with a sensor position.\"\"\"\n try:\n physical_position = self.preset_to_physical(\n preset_position, use_sensor_if_needed=False\n )\n except (AttributeError, KeyError):\n physical_position = None\n if physical_position is not None:\n raise KeyError(\n 'Preset position {} is already set to physical position {} {}!'\n .format(preset_position, physical_position, self.physical_units)\n )\n add_to_tree(\n self.preset_sensor_position_tree, preset_position,\n sensor_position\n )\n\n def set_preset_physical_position(self, preset_position, physical_position):\n \"\"\"Associate a preset position with a physical position.\"\"\"\n try:\n sensor_position = self.preset_to_sensor(\n preset_position, use_physical_if_needed=False\n )\n except KeyError:\n sensor_position = None\n if sensor_position is not None:\n raise KeyError(\n 'Preset position {} is already set to sensor position {}!'\n .format(preset_position, sensor_position)\n )\n add_to_tree(\n self.preset_physical_position_tree, preset_position,\n physical_position\n )\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n position_node = get_from_tree(presets_tree, preset_position)\n if isinstance(position_node, dict):\n try:\n type = position_node['type']\n except KeyError:\n raise TypeError(\n 'Type-less preset position {}!'.format(preset_position)\n )\n if type == 'implicit':\n raise TypeError(\n 'Cannot use implicit preset position {}!'.format(preset_position)\n )\n if type == 'constants':\n raise TypeError(\n 'Cannot use partially-specified preset position {}!'\n .format(preset_position)\n )\n if type == 'constant':\n return position_node['value']\n raise NotImplementedError(\n 'Unknown type {} for preset position {}!'\n .format(type, preset_position)\n )\n return position_node\n\n def preset_to_sensor(self, preset_position, use_physical_if_needed=True):\n \"\"\"Convert a preset position to a sensor position.\"\"\"\n try:\n return self.get_preset_position(\n self.preset_sensor_position_tree, preset_position\n )\n except KeyError:\n if use_physical_if_needed:\n physical_position = self.preset_to_physical(preset_position, False)\n return self.physical_to_sensor(physical_position)\n else:\n raise\n\n def preset_to_physical(self, preset_position, use_sensor_if_needed=True):\n \"\"\"Convert a preset position to a physical position.\"\"\"\n try:\n return self.get_preset_position(\n self.preset_physical_position_tree, preset_position\n )\n except KeyError:\n if use_sensor_if_needed:\n sensor_position = self.preset_to_sensor(preset_position, False)\n return self.sensor_to_physical(sensor_position)\n else:\n raise\n\n async def go_to_preset_position(self, preset_position, force_go=False):\n \"\"\"Go to the specified preset position.\n\n Returns the physical position error between the desired physical position\n and the final physical position.\n \"\"\"\n if self.current_preset_position == preset_position and not force_go:\n return\n physical_position = self.preset_to_physical(preset_position)\n final_physical_position = await self.go_to_physical_position(physical_position)\n if isinstance(preset_position, str):\n preset_position = (preset_position,)\n self.current_preset_position = preset_position\n return physical_position - final_physical_position\n\n def load_preset_json(self, json_path=None):\n \"\"\"Load a preset positions tree from the provided JSON file path.\n\n Default path: 'calibrations/{}_preset.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n trees = load_from_json(json_path)\n self.preset_physical_position_tree = trees['physical']\n self.preset_sensor_position_tree = trees['sensor']\n return trees\n\n def save_preset_json(self, json_path=None):\n \"\"\"Save a preset positions tree to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n save_to_json({\n 'physical': self.preset_physical_position_tree,\n 'sensor': self.preset_sensor_position_tree\n }, json_path)\n\n # Implement RobotAxis\n\n async def go_to_sensor_position(self, sensor_position):\n \"\"\"Go to the specified sensor position.\n\n Returns the final sensor position.\n \"\"\"\n self.current_preset_position = None\n return await super().go_to_sensor_position(sensor_position)\n\n async def go_to_low_end_position(self, speed=None):\n \"\"\"Go to the lowest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n try:\n return await self.go_to_preset_position('low end')\n except (KeyError, TypeError):\n if self.current_preset_position == ('low end',):\n return\n self.current_preset_position = ('low end',)\n return await super().go_to_low_end_position(speed)\n\n async def go_to_high_end_position(self, speed=None):\n \"\"\"Go to the highest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n try:\n return await self.go_to_preset_position('high end')\n except (KeyError, TypeError):\n if self.current_preset_position == ('high end',):\n return\n self.current_preset_position = ('high end',)\n return await super().go_to_high_end_position(speed)\n\n\nclass AlignedRobotAxis(PresetRobotAxis):\n \"\"\"High-level controller mixin for axes with alignment.\"\"\"\n\n def at_alignment_hole(self):\n \"\"\"Return whether the axis is already at the alignment hole.\"\"\"\n return self.current_preset_position == ('alignment hole',)\n\n async def go_to_alignment_hole(self):\n \"\"\"Move to the alignment hole.\"\"\"\n if self.at_alignment_hole():\n return\n try:\n await self.go_to_preset_position('alignment hole')\n except TypeError:\n await self.go_to_physical_position(0)\n self.current_preset_position = ('alignment hole',)\n\n\nclass ManuallyAlignedRobotAxis(AlignedRobotAxis, ContinuousRobotAxis):\n \"\"\"High-level controller mixin for axes with manual alignment.\"\"\"\n\n async def set_alignment(self):\n \"\"\"Update the physical calibration to align against the current position.\"\"\"\n position = await self.sensor_position\n self.linear_regression[1] = -self.linear_regression[0] * position\n\n\nclass ModularRobotAxis(PresetRobotAxis):\n \"\"\"High-level controller mixin for axes with modular sets of positions.\"\"\"\n\n def at_module(self, module):\n \"\"\"Return whether the axis is already at the module.\n\n Module may be the module's name or type, depending on the axis.\n \"\"\"\n return self.current_preset_position[0] == module\n\n def at_module_position(self, module, position):\n \"\"\"Return whether the axis is already at the position for the module.\n\n Module may be the module's name or type, depending on the axis.\n \"\"\"\n return self.current_preset_position == (module, position)\n\n def get_indexed_offset(self, module_params, index, origin_index_key='origin index'):\n \"\"\"Return the physical offset for the provided module indexed preset position.\"\"\"\n def to_num(index):\n if isinstance(index, str) and len(index) == 1:\n return ord(index)\n return index\n index = to_num(index)\n min_index = to_num(module_params['min index'])\n max_index = to_num(module_params['max index'])\n origin_index = to_num(module_params[origin_index_key])\n if (index < min_index) or (max_index is not None and index > max_index):\n raise IndexError(\n 'Index {} is out of the range ({}, {})!'\n .format(index, min_index, max_index)\n )\n return (index - origin_index) * module_params['increment']\n\n def get_continuous_offset(self, module_params, offset):\n \"\"\"Return the physical offset for the provided module continuous preset position.\"\"\"\n min = module_params['min']\n max = module_params['max']\n if (offset < min) or (max is not None and offset > max):\n raise ValueError(\n 'Offset {} is out of the range ({}, {})!'\n .format(offset, min, max)\n )\n return offset\n\n def get_module_mount_position(self, presets_tree, module_type):\n \"\"\"Get the position of the module's mount.\"\"\"\n return self.get_preset_position(presets_tree, 'mount')\n\n def get_module_offset_position(self, module_params, offset):\n \"\"\"Get the position on the module relative to the module's origin.\"\"\"\n if module_params['type'] == 'indexed':\n return self.get_indexed_offset(module_params, offset)\n elif module_params['type'] == 'continuous':\n return self.get_continuous_offset(module_params, offset)\n else:\n raise NotImplementedError(\n 'Unknown module type {}!'.format(module_params['type'])\n )\n\n def get_module_position(self, presets_tree, module_params, preset_position):\n \"\"\"Get the actual position from a preset module position tree node.\"\"\"\n (module, offset) = preset_position\n return (\n self.get_module_mount_position(presets_tree, module) +\n module_params['origin'] +\n self.get_module_offset_position(module_params, offset)\n )\n\n async def go_to_module_position(self, module, position):\n \"\"\"Move to the position for the specified module.\"\"\"\n await self.go_to_preset_position((module, position))\n\n # Implement PresetRobotAxis\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n try:\n return super().get_preset_position(presets_tree, preset_position)\n except KeyError:\n module_params = get_from_tree(presets_tree, preset_position[0])\n return self.get_module_position(presets_tree, module_params, preset_position)\n\n\nclass ConfigurableRobotAxis(ModularRobotAxis):\n \"\"\"High-level controller mixin for axes with reconfigurable sets of modules.\"\"\"\n\n def get_module_type(self, module_name):\n \"\"\"Return the module type of the named module.\"\"\"\n return self.configuration_tree[module_name]['type']\n\n def get_module_mount(self, module_name):\n \"\"\"Return the module type of the named module.\"\"\"\n return self.configuration_tree[module_name]['mount']\n\n # Implement PresetRobotAxis\n\n def load_preset_json(self, json_path=None):\n \"\"\"Load a preset positions tree from the provided JSON file path.\n\n Default path: 'calibrations/{}_preset.json' where {} is replaced with the\n axis name.\n \"\"\"\n trees = super().load_preset_json(json_path)\n if self.configuration is None:\n self.configuration = trees['default configuration']\n self.configurations = trees['configurations']\n self.configuration_tree = trees['configurations'][self.configuration]\n\n def save_preset_json(self, json_path=None):\n \"\"\"Save a preset positions tree to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n save_to_json({\n 'physical': self.preset_physical_position_tree,\n 'sensor': self.preset_sensor_position_tree,\n 'default configuration': self.configuration,\n 'configurations': self.configurations\n }, json_path)\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n try:\n return super().get_preset_position(presets_tree, preset_position)\n except KeyError:\n module_name = preset_position[0]\n module_type = self.get_module_type(module_name)\n module_params = get_from_tree(presets_tree, module_type)\n return self.get_module_position(presets_tree, module_params, preset_position)\n\n" ]
[ [ "scipy.stats.linregress" ] ]
vvmar/machine-learning-engineering-for-production-public
[ "0350e2e79eebc1dc2edb9e7b5e6f582b40fa74be" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n# rev 1\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n" ]
[ [ "numpy.array" ] ]
jiansowa/Paddle
[ "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13" ]
[ "python/paddle/fluid/trainer_factory.py", "python/paddle/dataset/uci_housing.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defination of TrainerFactory.\"\"\"\n\nimport threading\nimport time\nimport logging\nimport numpy as np\nfrom paddle.fluid.log_helper import get_logger\n\nlocal_logger = get_logger(\n __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')\n\nfrom .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer\nfrom .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT\nfrom .framework import Variable\nfrom multiprocessing import Process, Manager\n\n__all__ = [\"TrainerFactory\", \"FetchHandler\", \"FetchHandlerMonitor\"]\n\n\nclass TrainerFactory(object):\n \"\"\"\n Create trainer and device worker.\n If opt_info is not None, it will get configs from opt_info,\n otherwise create MultiTrainer and Hogwild.\n \"\"\"\n\n def __init__(self):\n pass\n\n def _create_trainer(self, opt_info=None):\n trainer = None\n device_worker = None\n if not opt_info:\n # default is MultiTrainer + Hogwild\n trainer = MultiTrainer()\n device_worker = Hogwild()\n trainer._set_device_worker(device_worker)\n else:\n trainer_class = opt_info[\"trainer\"]\n device_worker_class = opt_info[\"device_worker\"]\n trainer = globals()[trainer_class]()\n device_worker = globals()[device_worker_class]()\n\n # for debug tools\n if opt_info is not None:\n if opt_info.get(\"dump_slot\") is not None:\n trainer._set_dump_slot(opt_info[\"dump_slot\"])\n if opt_info.get(\"mpi_rank\") is not None:\n trainer._set_mpi_rank(opt_info[\"mpi_rank\"])\n if opt_info.get(\"mpi_size\") is not None:\n trainer._set_mpi_size(opt_info[\"mpi_size\"])\n if opt_info.get(\"dump_fields\") is not None and len(\n opt_info.get(\"dump_fields\")) != 0:\n trainer._set_dump_fields(opt_info[\"dump_fields\"])\n if opt_info.get(\"dump_fields_path\") is not None and len(\n opt_info.get(\"dump_fields_path\")) != 0:\n trainer._set_dump_fields_path(opt_info[\"dump_fields_path\"])\n if opt_info.get(\"dump_file_num\") is not None:\n trainer._set_dump_file_num(opt_info[\"dump_file_num\"])\n if opt_info.get(\"dump_converter\") is not None:\n trainer._set_dump_converter(opt_info[\"dump_converter\"])\n if opt_info.get(\"dump_param\") is not None and len(\n opt_info.get(\"dump_param\")) != 0:\n trainer._set_dump_param(opt_info[\"dump_param\"])\n if opt_info.get(\"worker_places\") is not None:\n trainer._set_worker_places(opt_info[\"worker_places\"])\n if opt_info.get(\"enable_random_dump\") is not None:\n trainer._set_enable_random_dump(opt_info[\n \"enable_random_dump\"])\n if opt_info.get(\"dump_interval\") is not None:\n trainer._set_dump_interval(opt_info[\"dump_interval\"])\n if opt_info.get(\"random_with_lineid\") is not None:\n trainer._set_random_with_lineid(opt_info[\n \"random_with_lineid\"])\n\n if \"fleet_desc\" in opt_info:\n device_worker._set_fleet_desc(opt_info[\"fleet_desc\"])\n trainer._set_fleet_desc(opt_info[\"fleet_desc\"])\n if opt_info.get(\"use_cvm\") is not None:\n trainer._set_use_cvm(opt_info[\"use_cvm\"])\n if opt_info.get(\"no_cvm\") is not None:\n trainer._set_no_cvm(opt_info[\"no_cvm\"])\n if opt_info.get(\"scale_datanorm\") is not None:\n trainer._set_scale_datanorm(opt_info[\"scale_datanorm\"])\n if opt_info.get(\"adjust_ins_weight\") is not None:\n trainer._set_adjust_ins_weight(opt_info[\n \"adjust_ins_weight\"])\n if opt_info.get(\"copy_table\") is not None:\n trainer._set_copy_table_config(opt_info[\"copy_table\"])\n if opt_info.get(\"check_nan_var_names\") is not None:\n trainer._set_check_nan_var_names(opt_info[\n \"check_nan_var_names\"])\n if opt_info.get(\"loss_names\") is not None:\n trainer._set_loss_names(opt_info[\"loss_names\"])\n trainer._set_device_worker(device_worker)\n return trainer\n\n\nclass FetchHandlerMonitor(object):\n \"\"\"\n Defination of FetchHandlerMonitor class,\n it's for fetch handler.\n \"\"\"\n\n def __init__(self, scope, handler):\n self.fetch_instance = handler\n self.fetch_thread = threading.Thread(\n target=self.handler_launch_func, args=(scope, self.fetch_instance))\n self.running_lock = threading.Lock()\n self.running = False\n\n def handler_launch_func(self, scope, handler):\n fetch_instance = handler\n period_secs = fetch_instance.period_secs\n var_name_to_key = {}\n for key in fetch_instance.var_dict:\n if isinstance(fetch_instance.var_dict[key], Variable):\n var_name_to_key[fetch_instance.var_dict[key].name] = key\n else:\n local_logger.warning(\"the value of {} is not a Variable\".format(\n key))\n var_name_to_key[\"None.var\"] = key\n elapsed_secs = 0\n while True:\n self.running_lock.acquire()\n if self.running == False:\n break\n if elapsed_secs < period_secs:\n # TODO(guru4elephant): needs customized condition\n time.sleep(1)\n elapsed_secs += 1\n else:\n elapsed_secs = 0\n fetch_dict = {}\n for key in var_name_to_key:\n var = scope.find_var(key)\n fetch_dict[key] = var\n if var == None:\n local_logger.warning(\"{} value currently not available\".\n format(var_name_to_key[key]))\n res_dict = {}\n for key in fetch_dict:\n user_name = var_name_to_key[key]\n if fetch_dict[key] == None:\n res_dict[user_name] = None\n continue\n else:\n res_dict[user_name] = fetch_dict[key].get_tensor()\n\n lod = res_dict[user_name].lod()\n if len(lod) > 0:\n raise RuntimeError(\"Some of your fetched tensors \\\n hold LoD information. \\\n They can not be completely cast \\\n to Python ndarray. We can \\\n not return LoDTensor itself directly, \\\n please choose another targets\")\n if res_dict[user_name]._is_initialized():\n res_dict[user_name] = np.array(res_dict[user_name])\n else:\n res_dict[user_name] = None\n fetch_instance.handler(res_dict)\n self.running_lock.release()\n\n def start(self):\n \"\"\"\n start monitor,\n it will start a monitor thread.\n \"\"\"\n self.running_lock.acquire()\n self.running = True\n self.running_lock.release()\n self.fetch_thread.setDaemon(True)\n self.fetch_thread.start()\n\n def stop(self):\n self.running_lock.acquire()\n self.running = False\n self.running_lock.release()\n", "# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUCI Housing dataset.\n\nThis module will download dataset from\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and\nparse training set and test set into paddle reader creators.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nimport tempfile\nimport tarfile\nimport os\nimport paddle.dataset.common\n\n__all__ = ['train', 'test']\n\nURL = 'http://paddlemodels.bj.bcebos.com/uci_housing/housing.data'\nMD5 = 'd4accdce7a25600298819f8e28e8d593'\nfeature_names = [\n 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',\n 'PTRATIO', 'B', 'LSTAT'\n]\n\nUCI_TRAIN_DATA = None\nUCI_TEST_DATA = None\n\nFLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar'\nFLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b'\n\n\ndef feature_range(maximums, minimums):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n feature_num = len(maximums)\n ax.bar(list(range(feature_num)),\n maximums - minimums,\n color='r',\n align='center')\n ax.set_title('feature scale')\n plt.xticks(list(range(feature_num)), feature_names)\n plt.xlim([-1, feature_num])\n fig.set_figheight(6)\n fig.set_figwidth(10)\n if not os.path.exists('./image'):\n os.makedirs('./image')\n fig.savefig('image/ranges.png', dpi=48)\n plt.close(fig)\n\n\ndef load_data(filename, feature_num=14, ratio=0.8):\n global UCI_TRAIN_DATA, UCI_TEST_DATA\n if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:\n return\n\n data = np.fromfile(filename, sep=' ')\n data = data.reshape(data.shape[0] // feature_num, feature_num)\n maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(\n axis=0) / data.shape[0]\n # if you want to print the distribution of input data, you could use function of feature_range\n #feature_range(maximums[:-1], minimums[:-1])\n for i in six.moves.range(feature_num - 1):\n data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])\n offset = int(data.shape[0] * ratio)\n UCI_TRAIN_DATA = data[:offset]\n UCI_TEST_DATA = data[offset:]\n\n\ndef train():\n \"\"\"\n UCI_HOUSING training set creator.\n\n It returns a reader creator, each sample in the reader is features after\n normalization and price number.\n\n :return: Training reader creator\n :rtype: callable\n \"\"\"\n global UCI_TRAIN_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n\n def reader():\n for d in UCI_TRAIN_DATA:\n yield d[:-1], d[-1:]\n\n return reader\n\n\ndef test():\n \"\"\"\n UCI_HOUSING test set creator.\n\n It returns a reader creator, each sample in the reader is features after\n normalization and price number.\n\n :return: Test reader creator\n :rtype: callable\n \"\"\"\n global UCI_TEST_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n\n def reader():\n for d in UCI_TEST_DATA:\n yield d[:-1], d[-1:]\n\n return reader\n\n\ndef fluid_model():\n parameter_tar = paddle.dataset.common.download(\n FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar')\n\n tar = tarfile.TarFile(parameter_tar, mode='r')\n dirpath = tempfile.mkdtemp()\n tar.extractall(path=dirpath)\n\n return dirpath\n\n\ndef predict_reader():\n \"\"\"\n It returns just one tuple data to do inference.\n\n :return: one tuple data\n :rtype: tuple\n \"\"\"\n global UCI_TEST_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n return (UCI_TEST_DATA[0][:-1], )\n\n\ndef fetch():\n paddle.dataset.common.download(URL, 'uci_housing', MD5)\n" ]
[ [ "numpy.array" ], [ "numpy.fromfile", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "matplotlib.use" ] ]
rishikksh20/TalkNet2-pytorch
[ "baa6bf90c054634185932ed4b17a6ce8866feaba" ]
[ "model.py" ]
[ "\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom utils import get_mask_from_lengths\nfrom embedding import GaussianEmbedding\nfrom quartznet import QuartzNet5x5, QuartzNet9x5\nfrom module import MaskedInstanceNorm1d, StyleResidual, Postnet\n\n\n\nclass GraphemeDuration(nn.Module):\n\n def __init__(self, idim, embed_dim=64, padding_idx=0):\n super(GraphemeDuration, self).__init__()\n self.embed = nn.Embedding(idim, embedding_dim=embed_dim, padding_idx=padding_idx)\n self.predictor = QuartzNet5x5(embed_dim, 32)\n self.projection = nn.Conv1d(32, 1, kernel_size=1)\n\n def forward(self, text, text_len, is_mask=True):\n x, x_len = self.embed(text).transpose(1, 2), text_len\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n out = self.predictor(x, mask)\n out = self.projection(out).squeeze(1)\n\n return out\n\n @staticmethod\n def _metrics(true_durs, true_text_len, pred_durs):\n loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none')\n mask = get_mask_from_lengths(true_text_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n\n durs_pred = pred_durs.exp() - 1\n durs_pred[durs_pred < 0.0] = 0.0\n durs_pred = durs_pred.round().long()\n\n acc = ((true_durs == durs_pred) * mask).sum().float() / mask.sum() * 100\n acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * mask).sum().float() / mask.sum() * 100\n acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * mask).sum().float() / mask.sum() * 100\n\n return loss, acc, acc_dist_1, acc_dist_3\n\n\nclass PitchPredictor(nn.Module):\n\n def __init__(self, idim, embed_dim=64):\n super(PitchPredictor, self).__init__()\n self.embed = GaussianEmbedding(idim, embed_dim)\n self.predictor = QuartzNet5x5(embed_dim, 32)\n self.sil_proj = nn.Conv1d(32, 1, kernel_size=1)\n self.body_proj = nn.Conv1d(32, 1, kernel_size=1)\n\n def forward(self, text, durs, is_mask=True):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n out = self.predictor(x, mask)\n uv = self.sil_proj(out).squeeze(1)\n value = self.body_proj(out).squeeze(1)\n\n return uv, value\n\n def _metrics(self, true_f0, true_f0_mask, pred_f0_sil, pred_f0_body):\n sil_mask = true_f0 < 1e-5\n sil_gt = sil_mask.long()\n sil_loss = F.binary_cross_entropy_with_logits(input=pred_f0_sil, target=sil_gt.float(), reduction='none', )\n sil_loss *= true_f0_mask.type_as(sil_loss)\n sil_loss = sil_loss.sum() / true_f0_mask.sum()\n sil_acc = ((torch.sigmoid(pred_f0_sil) > 0.5).long() == sil_gt).float() # noqa\n sil_acc *= true_f0_mask.type_as(sil_acc)\n sil_acc = sil_acc.sum() / true_f0_mask.sum()\n\n body_mse = F.mse_loss(pred_f0_body, (true_f0 - self.f0_mean) / self.f0_std, reduction='none')\n body_mask = ~sil_mask\n body_mse *= body_mask.type_as(body_mse) # noqa\n body_mse = body_mse.sum() / body_mask.sum() # noqa\n body_mae = ((pred_f0_body * self.f0_std + self.f0_mean) - true_f0).abs()\n body_mae *= body_mask.type_as(body_mae) # noqa\n body_mae = body_mae.sum() / body_mask.sum() # noqa\n\n loss = sil_loss + body_mse\n\n return loss, sil_acc, body_mae\n\n\nclass TalkNet2(nn.Module):\n\n def __init__(self, idim, odim=80, embed_dim=256, postnet_layers = 0):\n super(TalkNet2, self).__init__()\n self.embed = GaussianEmbedding(idim, embed_dim)\n self.norm_f0 = MaskedInstanceNorm1d(1)\n self.res_f0 = StyleResidual(embed_dim, 1, kernel_size=3)\n\n self.generator = QuartzNet9x5(embed_dim, odim)\n\n # define postnet\n self.postnet = (\n None\n if postnet_layers == 0\n else Postnet(\n odim=odim,\n n_layers=postnet_layers,\n n_chans=256,\n n_filts=5,\n use_batch_norm=True,\n dropout_rate=0.5,\n )\n )\n\n\n def forward(self, text, durs, f0, is_mask=True):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n f0, f0_mask = f0.clone(), f0 > 0.0\n f0 = self.norm_f0(f0.unsqueeze(1), f0_mask)\n f0[~f0_mask.unsqueeze(1)] = 0.0\n x = self.res_f0(x, f0)\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n\n before_outs = self.generator(x, mask)\n if self.postnet is None:\n return before_outs, None\n else:\n after_outs = before_outs + self.postnet(\n before_outs\n )\n return before_outs, after_outs\n\n\n\n @staticmethod\n def _metrics(true_mel, true_mel_len, pred_mel):\n loss = F.mse_loss(pred_mel, true_mel, reduction='none').mean(dim=-2)\n mask = get_mask_from_lengths(true_mel_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n return loss\n" ]
[ [ "torch.nn.Conv1d", "torch.nn.functional.mse_loss", "torch.nn.Embedding" ] ]
kmaasrud/vmc-fys4411
[ "e96e2f6b1403118ee48ad5b5ff38582310ba4d2a" ]
[ "vmc/python/SGD_alphas.py" ]
[ "import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\n\nimport numpy as np\n\n\n#location for files and plots\n\nPLOT_DIR = \"../plots/\"\nDATA_DIR = \"../data\"\nFILENAME_PLOT = 'SGD_alphas'\nPLOT_DIR = \"./\"\n\n\n#figure size and resolution\nfig = plt.figure()\nplt.style.use(\"seaborn\")\n#colour, linewith, linestyle\n#boundaries\n#plt.xlim(min(x)*1.1, max(x)*1.1)\nplt.ylim(0.15, 0.95)\n#legend\nplt.legend(loc = 'best', prop = {'size':14}, frameon = False)\nplt.rc('font', size=10)\nplt.rc('axes', titlesize=12)\nplt.xlabel(\"Iterations\")\nplt.ylabel(r\"$\\alpha$\")\nplt.title(r\"SGD: Start $\\alpha$\")\n\n\n\n\nstart_alphas = [\"0.2\",\"0.3\", \"0.4\", \"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\"]\n\nfor start_alpha in start_alphas:\n dim = 3\n\n DATA_DIR = f\"../data/sgd_noninteracting/start-alpha/start-alpha_{start_alpha}.csv\"\n\n df = pd.read_csv(DATA_DIR)\n\n energy = df[\"Energy\"]\n \n alpha = df[\"Alpha\"]\n x = np.linspace(0, len(alpha), len(alpha))\n plt.plot(x, alpha, label = start_alpha, linewidth = 2)\n\nplt.legend()\nplt.draw()\nplt.show()\n#plt.save_fig(PLOT_DIR + \"SGD_start-alpha.png\")\n\n\n" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.legend", "matplotlib.pyplot.draw", "matplotlib.pyplot.rc", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
idies/RMHDConverter
[ "24ff6e5ae2767b3aac4d24e9d6f1a116ef002eba" ]
[ "py/plot_slices.py" ]
[ "########################################################################\n#\n# Copyright 2015 Johns Hopkins University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Contact: [email protected]\n# Website: http://turbulence.pha.jhu.edu/\n#\n########################################################################\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport subprocess\n\ndef slice_plotter(n0 = 4, n1 = 8):\n ax = plt.figure(figsize = (8, 8)).add_axes([.0, .0, 1., 1.])\n ax.set_axis_off()\n for i in range(n0, n1, 4):\n print('plotting frame ', i)\n data = np.load(\n '/export/scratch0/clalescu/RMHD/2D_slices/data_rs2_u_t{0:0>4x}.npy'.format(i))\n for j in range(3):\n for k in range(2):\n ax.cla()\n ax.set_axis_off()\n ax.imshow(data[j, :, :, k])\n plt.gcf().savefig(\n 'figs/u{0}_{1}_t{2:0>4}.png'.format(j, k, i),\n dpi = max(data[j, :, :, k].shape)//8,\n format = 'png')\n return None\n\n\ngenerate_png = True\ngenerate_gif = False\n\nif generate_png:\n slice_plotter(n0 = 128, n1 = 0x280)\n\nif generate_gif:\n for j in range(3):\n for k in range(2):\n subprocess.call(['convert',\n 'figs/u{0}_{1}_t*.png'.format(j, k),\n 'figs/u{0}_{1}.gif'.format(j, k)])\n\n\n\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.gcf" ] ]
jessica-dl/2XB3-ML-Training
[ "aa82d64c7b8b35eb79060a7bd7d22d09323b1c06" ]
[ "trainer/dataset.py" ]
[ "from tensorflow.python.lib.io import file_io\nimport h5py\nimport numpy as np\n\n\nclass Dataset:\n\n def __init__(self, path, local):\n \"\"\"\n Initialize the dataset\n :param path: Path to the hdf5 dataset file\n :param local: True if the path is to a local file, False otherwise\n \"\"\"\n self.path = path\n self.local = local\n\n if not local:\n with file_io.FileIO(path, mode='rb') as dataset_f:\n with open('dataset.h5', 'wb') as local_dataset:\n local_dataset.write(dataset_f.read())\n path = 'dataset.h5'\n\n hf = h5py.File(path, 'r')\n self.x = hf.get('x')[:]\n self.y = hf.get('y')[:]\n hf.close()\n\n self.x = (self.x.astype(np.float32) - 127.5) / 127.5\n\n # self.__make_overfit()\n\n print('Loaded dataset')\n print('X:', self.x.shape)\n print('Y:', self.y.shape)\n\n def __make_overfit(self):\n \"\"\"\n Modify dataset for overfitting by only including 3 samples from each class\n :return:\n \"\"\"\n minimal_x = self.x[:1]\n minimal_y = self.y[:1]\n\n per_class = 3\n\n i = 1\n found = np.array([0 for _ in range(self.y.shape[-1])])\n found[np.argmax(minimal_y[0])] += 1\n\n while sum(found) < self.y.shape[-1] * per_class:\n for c in range(self.y.shape[-1]):\n if found[np.argmax(self.y[i])] < per_class:\n minimal_x = np.concatenate([minimal_x, self.x[i:i+1]])\n minimal_y = np.concatenate([minimal_y, self.y[i:i+1]])\n found[np.argmax(self.y[i])] += 1\n i += 1\n\n self.x = minimal_x\n self.y = minimal_y\n\n\n\n\n\n" ]
[ [ "numpy.concatenate", "numpy.argmax", "tensorflow.python.lib.io.file_io.FileIO" ] ]
SigfriedHache/euler-project
[ "7c38deee65a793a441830a6d0916da61e86b8cf7" ]
[ "Solutions/Q0005_Smallest_Evenly_Divisible_Number.py" ]
[ "\"\"\"\nhttps://projecteuler.net/problem=5\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\"\"\"\nfrom numpy import prod\n\nfrom Common.Logger import get_logger, init_logger\nfrom Common.Numbers import prime_factorization\nfrom Common.Utilities import performance_run\n\nPERFORMANCE_RUNS = 100_000\nUPPER_BOUND = 20\n\n\ndef fastest(ceiling: int = UPPER_BOUND) -> int:\n \"\"\"\n This algorithm finds the prime factorization of every number between 1 and ceiling, maintains the greatest counts of\n each prime factor found, and then multiplies them out at the end\n :param ceiling: The upper bound (inclusive) of the factor for which to find the evenly-divided quotient\n :return: The quotient evenly-divisible by every number from 1 to ceiling\n \"\"\"\n return prime_tally(ceiling)\n\n\ndef prime_tally(ceiling: int = UPPER_BOUND) -> int:\n \"\"\"\n This algorithm finds the prime factorization of every number between 1 and ceiling, maintains the greatest counts of\n each prime factor found, and then multiplies them out at the end\n --> benchmark: 104 ms/run\n :param ceiling: The upper bound (inclusive) of the factor for which to find the evenly-divided quotient\n :return: The quotient evenly-divisible by every number from 1 to ceiling\n \"\"\"\n factorization_merge = []\n\n for number in range(1, ceiling+1):\n number_factorization = prime_factorization(number)\n for factor in set(number_factorization):\n factor_count_difference = number_factorization.count(factor) - factorization_merge.count(factor)\n if factor_count_difference > 0:\n factorization_merge += [factor] * factor_count_difference\n\n return prod(factorization_merge)\n\n\nif __name__ == \"__main__\":\n # Log stuff\n init_logger()\n logger = get_logger()\n\n # Performance run\n # performance_run(prime_tally, iterations=PERFORMANCE_RUNS)()\n print(fastest(UPPER_BOUND))\n" ]
[ [ "numpy.prod" ] ]
garrettmflynn/sortingview
[ "0bb3df40d5d031ec651c4821f928787bbee71fbb" ]
[ "sortingview/helpers/prepare_snippets_h5_old.py" ]
[ "from typing import Dict, Union\n\nimport os\nimport hither2 as hi\nimport kachery_client as kc\nimport numpy as np\nimport spikeextractors as se\nfrom sortingview.extractors import LabboxEphysSortingExtractor, LabboxEphysRecordingExtractor\nfrom .SubsampledSortingExtractor import SubsampledSortingExtractor\nfrom .find_unit_peak_channels import find_unit_peak_channels\nfrom .find_unit_neighborhoods import find_unit_neighborhoods\nfrom .get_unit_waveforms import get_unit_waveforms\n\[email protected](\n 'prepare_snippets_h5', '0.2.7',\n image=hi.RemoteDockerImage('docker://magland/labbox-ephys-processing:0.3.19'),\n modules=['sortingview']\n)\ndef prepare_snippets_h5(\n recording_object,\n sorting_object,\n start_frame=None,\n end_frame=None,\n max_events_per_unit=None,\n max_neighborhood_size=15,\n snippet_len=(50, 80)\n):\n if recording_object['recording_format'] == 'snippets1':\n return recording_object['data']['snippets_h5_uri']\n\n recording = LabboxEphysRecordingExtractor(recording_object)\n sorting = LabboxEphysSortingExtractor(sorting_object)\n\n with kc.TemporaryDirectory() as tmpdir:\n save_path = tmpdir + '/snippets.h5'\n prepare_snippets_h5_from_extractors(\n recording=recording,\n sorting=sorting,\n output_h5_path=save_path,\n start_frame=start_frame,\n end_frame=end_frame,\n max_events_per_unit=max_events_per_unit,\n max_neighborhood_size=max_neighborhood_size,\n snippet_len=snippet_len\n )\n return kc.store_file(save_path)\n\ndef prepare_snippets_h5_from_extractors(\n recording: se.RecordingExtractor,\n sorting: se.SortingExtractor,\n output_h5_path: str,\n start_frame,\n end_frame,\n max_neighborhood_size: int,\n max_events_per_unit: Union[None, int]=None,\n snippet_len=(50, 80)\n):\n import h5py\n if start_frame is not None:\n recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=start_frame, end_frame=end_frame)\n sorting = se.SubSortingExtractor(parent_sorting=sorting, start_frame=start_frame, end_frame=end_frame)\n\n unit_ids = sorting.get_unit_ids()\n samplerate = recording.get_sampling_frequency()\n \n # Use this optimized function rather than spiketoolkit's version\n # for efficiency with long recordings and/or many channels, units or spikes\n # we should submit this to the spiketoolkit project as a PR\n print('Subsampling sorting')\n if max_events_per_unit is not None:\n sorting_subsampled = SubsampledSortingExtractor(parent_sorting=sorting, max_events_per_unit=max_events_per_unit, method='random')\n else:\n sorting_subsampled = sorting\n print('Finding unit peak channels')\n peak_channels_by_unit = find_unit_peak_channels(recording=recording, sorting=sorting, unit_ids=unit_ids)\n print('Finding unit neighborhoods')\n channel_ids_by_unit = find_unit_neighborhoods(recording=recording, peak_channels_by_unit=peak_channels_by_unit, max_neighborhood_size=max_neighborhood_size)\n print(f'Getting unit waveforms for {len(unit_ids)} units')\n unit_waveforms = get_unit_waveforms(\n recording=recording,\n sorting=sorting_subsampled,\n unit_ids=unit_ids,\n channel_ids_by_unit=channel_ids_by_unit,\n snippet_len=snippet_len\n )\n # unit_waveforms = st.postprocessing.get_unit_waveforms(\n # recording=recording,\n # sorting=sorting,\n # unit_ids=unit_ids,\n # ms_before=1,\n # ms_after=1.5,\n # max_spikes_per_unit=500\n # )\n\n save_path = output_h5_path\n with h5py.File(save_path, 'w') as f:\n f.create_dataset('unit_ids', data=np.array(unit_ids).astype(np.int32))\n f.create_dataset('sampling_frequency', data=np.array([samplerate]).astype(np.float64))\n f.create_dataset('channel_ids', data=np.array(recording.get_channel_ids()))\n f.create_dataset('num_frames', data=np.array([recording.get_num_frames()]).astype(np.int32))\n channel_locations = recording.get_channel_locations()\n f.create_dataset(f'channel_locations', data=np.array(channel_locations))\n for ii, unit_id in enumerate(unit_ids):\n x = sorting.get_unit_spike_train(unit_id=unit_id)\n f.create_dataset(f'unit_spike_trains/{unit_id}', data=np.array(x).astype(np.float64))\n f.create_dataset(f'unit_waveforms/{unit_id}/waveforms', data=unit_waveforms[ii].astype(np.float32))\n f.create_dataset(f'unit_waveforms/{unit_id}/channel_ids', data=np.array(channel_ids_by_unit[int(unit_id)]).astype(int))\n f.create_dataset(f'unit_waveforms/{unit_id}/spike_train', data=np.array(sorting_subsampled.get_unit_spike_train(unit_id=unit_id)).astype(np.float64))\n" ]
[ [ "numpy.array" ] ]
YimiaoSun/network-slimming
[ "8ab3e6932fc3febd893faf83e23bee1aeb28be13" ]
[ "models/preresnet.py" ]
[ "# 本resnet结构选自:https://arxiv.org/pdf/1603.05027.pdf\n# 是原作者在resnet上的更新版本(实际用的没有原始版本广,认可度有质疑)\n\nfrom __future__ import absolute_import\nimport math\nimport torch.nn as nn\nfrom .channel_selection import channel_selection\n\n\n__all__ = ['resnet']\n\n\"\"\"\npreactivation resnet with bottleneck design.\n\"\"\"\n\nclass Bottleneck(nn.Module):\n expansion = 4\n # self.inplanes = 16, 64, 128 (planes * 4)\n # planes = 16, 32, 64\n # stride=1, 2, 2\n def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.select = channel_selection(inplanes)\n self.conv1 = nn.Conv2d(cfg[0], cfg[1], kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(cfg[1])\n self.conv2 = nn.Conv2d(cfg[1], cfg[2], kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(cfg[2])\n # 从conv3看出,planes*4应该=inplances\n self.conv3 = nn.Conv2d(cfg[2], planes * 4, kernel_size=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.bn1(x)\n out = self.select(out)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n return out\n\nclass resnet(nn.Module):\n def __init__(self, depth=164, dataset='cifar10', cfg=None):\n # depth=164 is too big to me. Let me change it to 20.\n super(resnet, self).__init__()\n assert (depth - 2) % 9 == 0, 'depth should be 9n+2'\n\n n = (depth - 2) // 9 # n = blocks = 2\n block = Bottleneck\n\n # cfg目的是针对prune后生成相应channel的新的network(cfg中的个数来源于bn.weight.data.gt(thre),定义于resprune.py)\n if cfg is None:\n # Construct config variable.\n cfg = [[16, 16, 16], [64, 16, 16]*(n-1), [64, 32, 32], [128, 32, 32]*(n-1), [128, 64, 64], [256, 64, 64]*(n-1), [256]]\n # 此行:拆掉最内层维度,使cfg变成一维list\n cfg = [item for sub_list in cfg for item in sub_list]\n\n self.inplanes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,\n bias=False)\n self.layer1 = self._make_layer(block, 16, n, cfg=cfg[0:3*n])\n self.layer2 = self._make_layer(block, 32, n, cfg=cfg[3*n:6*n], stride=2)\n self.layer3 = self._make_layer(block, 64, n, cfg=cfg[6*n:9*n], stride=2)\n self.bn = nn.BatchNorm2d(64 * block.expansion)\n self.select = channel_selection(64 * block.expansion) # select必须接在batchnorm后面\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(8)\n\n if dataset == 'cifar10':\n self.fc = nn.Linear(cfg[-1], 10)\n elif dataset == 'cifar100':\n self.fc = nn.Linear(cfg[-1], 100)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(0.5)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, cfg, stride=1):\n \"\"\"\n 做resnet的基本组件(层,layer)\n :param block: 原始resnet有两种。Basic和Bottleneck。区别在于Basic适用于短的resnet(34层以下);bottleneck是50层以上的。\n Basic是2个3x3 conv,Bottleneck是1x1+3x3+1x1。不过它们彼此channel是兼容的,所以其实可以混用。\n 这里为了简化,全部采用Bottleneck。\n :param planes: 根据3层(layer),具体分别为: 16, 32, 64.\n :param blocks: 每层中多少个block,我用的是depth 20,所以blocks个数是2((depth - 2) // 9 = 2)\n :param cfg: 每次batchnorm需要保留的channel个数\n :param stride: layer1=1,layer2/3=2\n :return:\n \"\"\"\n downsample = None\n # self.inplanes = 16, 64, 128 (planes * 4)\n # planes = 16, 32, 64\n # block.expansion = 4\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, cfg[0:3], stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, cfg[3*i: 3*(i+1)]))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.layer1(x) # 32x32\n x = self.layer2(x) # 16x16\n x = self.layer3(x) # 8x8\n x = self.bn(x)\n x = self.select(x)\n x = self.relu(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
amroid/ibis
[ "9df27958f305a901728b540200bd8fa2820d4625" ]
[ "ibis/expr/tests/test_value_exprs.py" ]
[ "import functools\nimport operator\nimport os\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time\nfrom operator import methodcaller\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport toolz\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.analysis as L\nimport ibis.expr.api as api\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.rules as rlz\nimport ibis.expr.types as ir\nfrom ibis import literal\nfrom ibis.common.exceptions import IbisTypeError\nfrom ibis.expr.signature import Argument as Arg\nfrom ibis.tests.util import assert_equal\n\n\ndef test_null():\n expr = ibis.literal(None)\n assert isinstance(expr, ir.NullScalar)\n assert isinstance(expr.op(), ops.NullLiteral)\n assert expr._arg.value is None\n\n expr2 = ibis.null()\n assert_equal(expr, expr2)\n\n assert expr is expr2\n assert expr.type() is dt.null\n assert expr2.type() is dt.null\n\n\[email protected](\n raises=AssertionError,\n reason='UTF-8 support in Impala non-existent at the moment?',\n)\ndef test_unicode():\n assert False\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (5, 'int8'),\n (127, 'int8'),\n (128, 'int16'),\n (32767, 'int16'),\n (32768, 'int32'),\n (2147483647, 'int32'),\n (2147483648, 'int64'),\n (-5, 'int8'),\n (-128, 'int8'),\n (-129, 'int16'),\n (-32769, 'int32'),\n (-2147483649, 'int64'),\n (1.5, 'double'),\n ('foo', 'string'),\n ([1, 2, 3], 'array<int8>'),\n ],\n)\ndef test_literal_with_implicit_type(value, expected_type):\n expr = ibis.literal(value)\n\n assert isinstance(expr, ir.ScalarExpr)\n assert expr.type() == dt.dtype(expected_type)\n\n assert isinstance(expr.op(), ops.Literal)\n assert expr.op().value is value\n\n\npointA = (1, 2)\npointB = (-3, 4)\npointC = (5, 19)\nlineAB = [pointA, pointB]\nlineBC = [pointB, pointC]\nlineCA = [pointC, pointA]\npolygon1 = [lineAB, lineBC, lineCA]\npolygon2 = [lineAB, lineBC, lineCA]\nmultipolygon1 = [polygon1, polygon2]\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (5, 'int16'),\n (127, 'double'),\n (128, 'int64'),\n (32767, 'double'),\n (32768, 'float'),\n (2147483647, 'int64'),\n (-5, 'int16'),\n (-128, 'int32'),\n (-129, 'int64'),\n (-32769, 'float'),\n (-2147483649, 'double'),\n (1.5, 'double'),\n ('foo', 'string'),\n (list(pointA), 'point'),\n (tuple(pointA), 'point'),\n (list(lineAB), 'linestring'),\n (tuple(lineAB), 'linestring'),\n (list(polygon1), 'polygon'),\n (tuple(polygon1), 'polygon'),\n (list(multipolygon1), 'multipolygon'),\n (tuple(multipolygon1), 'multipolygon'),\n ],\n)\ndef test_literal_with_explicit_type(value, expected_type):\n expr = ibis.literal(value, type=expected_type)\n assert expr.type().equals(dt.validate_type(expected_type))\n\n\[email protected](\n ['value', 'expected_type', 'expected_class'],\n [\n (list('abc'), 'array<string>', ir.ArrayScalar),\n ([1, 2, 3], 'array<int8>', ir.ArrayScalar),\n ({'a': 1, 'b': 2, 'c': 3}, 'map<string, int8>', ir.MapScalar),\n ({1: 2, 3: 4, 5: 6}, 'map<int8, int8>', ir.MapScalar),\n (\n {'a': [1.0, 2.0], 'b': [], 'c': [3.0]},\n 'map<string, array<double>>',\n ir.MapScalar,\n ),\n (\n OrderedDict(\n [\n ('a', 1),\n ('b', list('abc')),\n ('c', OrderedDict([('foo', [1.0, 2.0])])),\n ]\n ),\n 'struct<a: int8, b: array<string>, c: struct<foo: array<double>>>',\n ir.StructScalar,\n ),\n ],\n)\ndef test_literal_complex_types(value, expected_type, expected_class):\n expr = ibis.literal(value)\n expr_type = expr.type()\n assert expr_type.equals(dt.validate_type(expected_type))\n assert isinstance(expr, expected_class)\n assert isinstance(expr.op(), ops.Literal)\n assert expr.op().value is value\n\n\ndef test_struct_operations():\n value = OrderedDict(\n [\n ('a', 1),\n ('b', list('abc')),\n ('c', OrderedDict([('foo', [1.0, 2.0])])),\n ]\n )\n expr = ibis.literal(value)\n assert isinstance(expr, ir.StructValue)\n assert isinstance(expr.b, ir.ArrayValue)\n assert isinstance(expr.a.op(), ops.StructField)\n\n\ndef test_simple_map_operations():\n value = {'a': [1.0, 2.0], 'b': [], 'c': [3.0]}\n value2 = {'a': [1.0, 2.0], 'c': [3.0], 'd': [4.0, 5.0]}\n expr = ibis.literal(value)\n expr2 = ibis.literal(value2)\n assert isinstance(expr, ir.MapValue)\n assert isinstance(expr.length().op(), ops.MapLength)\n assert isinstance((expr + expr2).op(), ops.MapConcat)\n assert isinstance((expr2 + expr).op(), ops.MapConcat)\n\n default = ibis.literal([0.0])\n assert isinstance(expr.get('d', default).op(), ops.MapValueOrDefaultForKey)\n\n # test for an invalid default type, nulls are ok\n with pytest.raises(IbisTypeError):\n expr.get('d', ibis.literal('foo'))\n\n assert isinstance(\n expr.get('d', ibis.literal(None)).op(), ops.MapValueOrDefaultForKey\n )\n\n assert isinstance(expr['b'].op(), ops.MapValueForKey)\n assert isinstance(expr.keys().op(), ops.MapKeys)\n assert isinstance(expr.values().op(), ops.MapValues)\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (32767, 'int8'),\n (32768, 'int16'),\n (2147483647, 'int16'),\n (2147483648, 'int32'),\n ('foo', 'double'),\n ],\n)\ndef test_literal_with_non_coercible_type(value, expected_type):\n expected_msg = 'Value .* cannot be safely coerced to .*'\n with pytest.raises(TypeError, match=expected_msg):\n ibis.literal(value, type=expected_type)\n\n\ndef test_non_inferrable_literal():\n expected_msg = (\n 'The datatype of value .* cannot be inferred, try '\n 'passing it explicitly with the `type` keyword.'\n )\n\n value = tuple(pointA)\n\n with pytest.raises(TypeError, match=expected_msg):\n ibis.literal(value)\n\n point = ibis.literal(value, type='point')\n assert point.type() == dt.point\n\n\ndef test_literal_list():\n what = [1, 2, 1000]\n expr = api.literal(what)\n\n assert isinstance(expr, ir.ArrayScalar)\n\n # it works!\n repr(expr)\n\n\ndef test_literal_array():\n what = []\n expr = api.literal(what)\n assert isinstance(expr, ir.ArrayValue)\n assert expr.type().equals(dt.Array(dt.null))\n\n\ndef test_mixed_arity(table):\n what = [\"bar\", table.g, \"foo\"]\n expr = api.as_value_expr(what)\n\n values = expr.op().values\n assert isinstance(values[1], ir.StringColumn)\n\n # it works!\n repr(expr)\n\n\[email protected]('container', [list, tuple, set, frozenset])\ndef test_isin_notin_list(table, container):\n values = container([1, 2, 3, 4])\n\n expr = table.a.isin(values)\n not_expr = table.a.notin(values)\n\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.Contains)\n\n assert isinstance(not_expr, ir.BooleanColumn)\n assert isinstance(not_expr.op(), ops.NotContains)\n\n\ndef test_value_counts(table, string_col):\n bool_clause = table[string_col].notin(['1', '4', '7'])\n expr = table[bool_clause][string_col].value_counts()\n assert isinstance(expr, ir.TableExpr)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_not_comparable():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_array_expr():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_invalid_cases():\n # For example, array expression in a list of values, where the inner\n # array values originate from some other table\n assert False\n\n\ndef test_isin_notin_scalars():\n a, b, c = [ibis.literal(x) for x in [1, 1, 2]]\n\n result = a.isin([1, 2])\n assert isinstance(result, ir.BooleanScalar)\n\n result = a.notin([b, c, 3])\n assert isinstance(result, ir.BooleanScalar)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_null():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_negate_isin():\n # Should yield a NotContains\n assert False\n\n\ndef test_scalar_isin_list_with_array(table):\n val = ibis.literal(2)\n\n options = [table.a, table.b, table.c]\n\n expr = val.isin(options)\n assert isinstance(expr, ir.BooleanColumn)\n\n not_expr = val.notin(options)\n assert isinstance(not_expr, ir.BooleanColumn)\n\n\ndef test_distinct_basic(functional_alltypes):\n expr = functional_alltypes.distinct()\n assert isinstance(expr.op(), ops.Distinct)\n assert isinstance(expr, ir.TableExpr)\n assert expr.op().table is functional_alltypes\n\n expr = functional_alltypes.string_col.distinct()\n assert isinstance(expr.op(), ops.DistinctColumn)\n\n assert isinstance(expr, ir.StringColumn)\n\n\[email protected](reason='NYT')\ndef test_distinct_array_interactions(functional_alltypes):\n # array cardinalities / shapes are likely to be different.\n a = functional_alltypes.int_col.distinct()\n b = functional_alltypes.bigint_col\n\n with pytest.raises(ir.RelationError):\n a + b\n\n\[email protected]('where', [lambda t: None, lambda t: t.int_col != 0])\ndef test_distinct_count(functional_alltypes, where):\n result = functional_alltypes.string_col.distinct().count(\n where=where(functional_alltypes)\n )\n assert isinstance(result.op(), ops.CountDistinct)\n\n expected = functional_alltypes.string_col.nunique(\n where=where(functional_alltypes)\n ).name('count')\n assert result.equals(expected)\n\n\ndef test_distinct_unnamed_array_expr():\n table = ibis.table(\n [('year', 'int32'), ('month', 'int32'), ('day', 'int32')], 'foo'\n )\n\n # it works!\n expr = (\n ibis.literal('-')\n .join(\n [\n table.year.cast('string'),\n table.month.cast('string'),\n table.day.cast('string'),\n ]\n )\n .distinct()\n )\n repr(expr)\n\n\ndef test_distinct_count_numeric_types(functional_alltypes):\n metric = (\n functional_alltypes.bigint_col.distinct()\n .count()\n .name('unique_bigints')\n )\n functional_alltypes.group_by('string_col').aggregate(metric)\n\n\ndef test_nunique(functional_alltypes):\n expr = functional_alltypes.string_col.nunique()\n assert isinstance(expr.op(), ops.CountDistinct)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_project_with_distinct():\n assert False\n\n\ndef test_isnull(table):\n expr = table['g'].isnull()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsNull)\n\n expr = ibis.literal('foo').isnull()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsNull)\n\n\ndef test_notnull(table):\n expr = table['g'].notnull()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.NotNull)\n\n expr = ibis.literal('foo').notnull()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.NotNull)\n\n\[email protected]('column', ['e', 'f'], ids=['float', 'double'])\ndef test_isnan_isinf_column(table, column):\n expr = table[column].isnan()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsNan)\n\n expr = table[column].isinf()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsInf)\n\n\[email protected]('value', [1.3, np.nan, np.inf, -np.inf])\ndef test_isnan_isinf_scalar(value):\n expr = ibis.literal(value).isnan()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsNan)\n\n expr = ibis.literal(value).isinf()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsInf)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_null_literal():\n assert False\n\n\[email protected](\n ['column', 'operation'],\n [\n ('d', 'cumsum'),\n ('d', 'cummean'),\n ('d', 'cummin'),\n ('d', 'cummax'),\n ('h', 'cumany'),\n ('h', 'cumall'),\n ],\n)\ndef test_cumulative_yield_array_types(table, column, operation):\n expr = getattr(getattr(table, column), operation)()\n assert isinstance(expr, ir.ColumnExpr)\n\n\[email protected](params=['ln', 'log', 'log2', 'log10'])\ndef log(request):\n return operator.methodcaller(request.param)\n\n\[email protected]('column', list('abcdef'))\ndef test_log(table, log, column):\n result = log(table[column])\n assert isinstance(result, ir.FloatingColumn)\n\n # is this what we want?\n # assert result.get_name() == c\n\n\ndef test_log_string(table):\n g = table.g\n\n with pytest.raises(IbisTypeError):\n ops.Log(g, None).to_expr()\n\n\[email protected]('klass', [ops.Ln, ops.Log2, ops.Log10])\ndef test_log_variants_string(table, klass):\n g = table.g\n\n with pytest.raises(IbisTypeError):\n klass(g).to_expr()\n\n\ndef test_log_boolean(table, log):\n # boolean not implemented for these\n h = table['h']\n with pytest.raises(IbisTypeError):\n log(h)\n\n\ndef test_log_literal(log):\n assert isinstance(log(ibis.literal(5)), ir.FloatingScalar)\n assert isinstance(log(ibis.literal(5.5)), ir.FloatingScalar)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_exp():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_sqrt():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_trig_functions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_round():\n assert False\n\n\ndef test_cast_same_type_noop(table):\n c = table.g\n assert c.cast('string') is c\n\n i = ibis.literal(5)\n assert i.cast('int8') is i\n\n\[email protected]('type', ['int8', 'int32', 'double', 'float'])\ndef test_string_to_number(table, type):\n casted = table.g.cast(type)\n casted_literal = ibis.literal('5').cast(type).name('bar')\n\n assert isinstance(casted, ir.ColumnExpr)\n assert casted.type() == dt.dtype(type)\n\n assert isinstance(casted_literal, ir.ScalarExpr)\n assert casted_literal.type() == dt.dtype(type)\n assert casted_literal.get_name() == 'bar'\n\n\[email protected]('col', list('abcdefh'))\ndef test_number_to_string_column(table, col):\n casted = table[col].cast('string')\n assert isinstance(casted, ir.StringColumn)\n\n\ndef test_number_to_string_scalar():\n casted_literal = ibis.literal(5).cast('string').name('bar')\n assert isinstance(casted_literal, ir.StringScalar)\n assert casted_literal.get_name() == 'bar'\n\n\ndef test_casted_exprs_are_named(table):\n expr = table.f.cast('string')\n assert expr.get_name() == 'cast(f, string)'\n\n # it works! per GH #396\n expr.value_counts()\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_nonzero():\n assert False\n\n\[email protected]('col', list('abcdefh'))\ndef test_negate(table, col):\n c = table[col]\n result = -c\n assert isinstance(result, type(c))\n assert isinstance(result.op(), ops.Negate)\n\n\ndef test_negate_boolean_scalar():\n result = -ibis.literal(False)\n assert isinstance(result, ir.BooleanScalar)\n assert isinstance(result.op(), ops.Negate)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isnull_notnull():\n assert False\n\n\[email protected]('column', ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\[email protected]('how', [None, 'first', 'last', 'heavy'])\[email protected]('condition_fn', [lambda t: None, lambda t: t.a > 8])\ndef test_arbitrary(table, column, how, condition_fn):\n col = table[column]\n where = condition_fn(table)\n expr = col.arbitrary(how=how, where=where)\n assert expr.type() == col.type()\n assert isinstance(expr, ir.ScalarExpr)\n assert L.is_reduction(expr)\n\n\[email protected](\n ['column', 'operation'],\n [\n ('h', lambda column: column.any()),\n ('h', lambda column: column.notany()),\n ('h', lambda column: column.all()),\n ('c', lambda column: (column == 0).any()),\n ('c', lambda column: (column == 0).all()),\n ],\n)\ndef test_any_all_notany(table, column, operation):\n expr = operation(table[column])\n assert isinstance(expr, ir.BooleanScalar)\n assert L.is_reduction(expr)\n\n\[email protected](\n 'operation',\n [\n operator.lt,\n operator.gt,\n operator.ge,\n operator.le,\n operator.eq,\n operator.ne,\n ],\n)\[email protected]('column', list('abcdef'))\[email protected]('case', [2, 2 ** 9, 2 ** 17, 2 ** 33, 1.5])\ndef test_numbers_compare_numeric_literal(table, operation, column, case):\n ex_op_class = {\n operator.eq: ops.Equals,\n operator.ne: ops.NotEquals,\n operator.le: ops.LessEqual,\n operator.lt: ops.Less,\n operator.ge: ops.GreaterEqual,\n operator.gt: ops.Greater,\n }\n\n col = table[column]\n\n result = operation(col, case)\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(result.op(), ex_op_class[operation])\n\n\ndef test_boolean_comparisons(table):\n bool_col = table.h\n\n result = bool_col == True # noqa\n assert isinstance(result, ir.BooleanColumn)\n\n result = bool_col == False # noqa\n assert isinstance(result, ir.BooleanColumn)\n\n\[email protected](\n 'operation',\n [\n operator.lt,\n operator.gt,\n operator.ge,\n operator.le,\n operator.eq,\n operator.ne,\n ],\n)\ndef test_string_comparisons(table, operation):\n string_col = table.g\n result = operation(string_col, 'foo')\n assert isinstance(result, ir.BooleanColumn)\n\n\[email protected](\n 'operation', [operator.xor, operator.or_, operator.and_]\n)\ndef test_boolean_logical_ops(table, operation):\n expr = table.a > 0\n\n result = operation(expr, table.h)\n assert isinstance(result, ir.BooleanColumn)\n\n result = operation(expr, True)\n refl_result = operation(True, expr)\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(refl_result, ir.BooleanColumn)\n\n true = ibis.literal(True)\n false = ibis.literal(False)\n\n result = operation(true, false)\n assert isinstance(result, ir.BooleanScalar)\n\n\ndef test_null_column():\n t = ibis.table([('a', 'string')], name='t')\n s = t.mutate(b=ibis.NA)\n assert s.b.type() == dt.null\n assert isinstance(s.b, ir.NullColumn)\n\n\ndef test_null_column_union():\n s = ibis.table([('a', 'string'), ('b', 'double')])\n t = ibis.table([('a', 'string')])\n with pytest.raises(ibis.common.exceptions.RelationError):\n s.union(t.mutate(b=ibis.NA)) # needs a type\n assert s.union(t.mutate(b=ibis.NA.cast('double'))).schema() == s.schema()\n\n\ndef test_string_compare_numeric_array(table):\n with pytest.raises(TypeError):\n table.g == table.f\n\n with pytest.raises(TypeError):\n table.g == table.c\n\n\ndef test_string_compare_numeric_literal(table):\n with pytest.raises(TypeError):\n table.g == ibis.literal(1.5)\n\n with pytest.raises(TypeError):\n table.g == ibis.literal(5)\n\n\ndef test_between(table):\n result = table.f.between(0, 1)\n\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(result.op(), ops.Between)\n\n # it works!\n result = table.g.between('a', 'f')\n assert isinstance(result, ir.BooleanColumn)\n\n result = ibis.literal(1).between(table.a, table.c)\n assert isinstance(result, ir.BooleanColumn)\n\n result = ibis.literal(7).between(5, 10)\n assert isinstance(result, ir.BooleanScalar)\n\n # Cases where between should immediately fail, e.g. incomparables\n with pytest.raises(TypeError):\n table.f.between('0', '1')\n\n with pytest.raises(TypeError):\n table.f.between(0, '1')\n\n with pytest.raises(TypeError):\n table.f.between('0', 1)\n\n\ndef test_chained_comparisons_not_allowed(table):\n with pytest.raises(ValueError):\n 0 < table.f < 1\n\n\[email protected](\n 'operation', [operator.add, operator.mul, operator.truediv, operator.sub]\n)\ndef test_binop_string_type_error(table, operation):\n # Strings are not valid for any numeric arithmetic\n ints = table.d\n strs = table.g\n\n with pytest.raises(TypeError):\n operation(ints, strs)\n\n with pytest.raises(TypeError):\n operation(strs, ints)\n\n\[email protected](\n ['op', 'name', 'case', 'ex_type'],\n [\n (operator.add, 'a', 0, 'int8'),\n (operator.add, 'a', 5, 'int16'),\n (operator.add, 'a', 100000, 'int32'),\n (operator.add, 'a', -100000, 'int32'),\n (operator.add, 'a', 1.5, 'double'),\n (operator.add, 'b', 0, 'int16'),\n (operator.add, 'b', 5, 'int32'),\n (operator.add, 'b', -5, 'int32'),\n (operator.add, 'c', 0, 'int32'),\n (operator.add, 'c', 5, 'int64'),\n (operator.add, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.add, 'd', 5, 'int64'),\n (operator.mul, 'a', 0, 'int8'),\n (operator.mul, 'a', 5, 'int16'),\n (operator.mul, 'a', 2 ** 24, 'int32'),\n (operator.mul, 'a', -2 ** 24 + 1, 'int32'),\n (operator.mul, 'a', 1.5, 'double'),\n (operator.mul, 'b', 0, 'int16'),\n (operator.mul, 'b', 5, 'int32'),\n (operator.mul, 'b', -5, 'int32'),\n (operator.mul, 'c', 0, 'int32'),\n (operator.mul, 'c', 5, 'int64'),\n (operator.mul, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.mul, 'd', 5, 'int64'),\n (operator.sub, 'a', 5, 'int16'),\n (operator.sub, 'a', 100000, 'int32'),\n (operator.sub, 'a', -100000, 'int32'),\n (operator.sub, 'a', 1.5, 'double'),\n (operator.sub, 'b', 5, 'int32'),\n (operator.sub, 'b', -5, 'int32'),\n (operator.sub, 'c', 5, 'int64'),\n (operator.sub, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.sub, 'd', 5, 'int64'),\n (operator.truediv, 'a', 5, 'double'),\n (operator.truediv, 'a', 1.5, 'double'),\n (operator.truediv, 'b', 5, 'double'),\n (operator.truediv, 'b', -5, 'double'),\n (operator.truediv, 'c', 5, 'double'),\n (operator.pow, 'a', 0, 'double'),\n (operator.pow, 'b', 0, 'double'),\n (operator.pow, 'c', 0, 'double'),\n (operator.pow, 'd', 0, 'double'),\n (operator.pow, 'e', 0, 'float'),\n (operator.pow, 'f', 0, 'double'),\n (operator.pow, 'a', 2, 'double'),\n (operator.pow, 'b', 2, 'double'),\n (operator.pow, 'c', 2, 'double'),\n (operator.pow, 'd', 2, 'double'),\n (operator.pow, 'a', 1.5, 'double'),\n (operator.pow, 'b', 1.5, 'double'),\n (operator.pow, 'c', 1.5, 'double'),\n (operator.pow, 'd', 1.5, 'double'),\n (operator.pow, 'e', 2, 'float'),\n (operator.pow, 'f', 2, 'double'),\n (operator.pow, 'a', -2, 'double'),\n (operator.pow, 'b', -2, 'double'),\n (operator.pow, 'c', -2, 'double'),\n (operator.pow, 'd', -2, 'double'),\n ],\n ids=lambda arg: str(getattr(arg, '__name__', arg)),\n)\ndef test_literal_promotions(table, op, name, case, ex_type):\n col = table[name]\n\n result = op(col, case)\n assert result.type() == dt.dtype(ex_type)\n\n result = op(case, col)\n assert result.type() == dt.dtype(ex_type)\n\n\[email protected](\n ('op', 'left_fn', 'right_fn', 'ex_type'),\n [\n (operator.sub, lambda t: t['a'], lambda t: 0, 'int8'),\n (operator.sub, lambda t: 0, lambda t: t['a'], 'int16'),\n (operator.sub, lambda t: t['b'], lambda t: 0, 'int16'),\n (operator.sub, lambda t: 0, lambda t: t['b'], 'int32'),\n (operator.sub, lambda t: t['c'], lambda t: 0, 'int32'),\n (operator.sub, lambda t: 0, lambda t: t['c'], 'int64'),\n ],\n ids=lambda arg: str(getattr(arg, '__name__', arg)),\n)\ndef test_zero_subtract_literal_promotions(\n table, op, left_fn, right_fn, ex_type\n):\n # in case of zero subtract the order of operands matters\n left, right = left_fn(table), right_fn(table)\n result = op(left, right)\n\n assert result.type() == dt.dtype(ex_type)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_add_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_subtract_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_multiply_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_divide_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_string_add_concat():\n assert False\n\n\[email protected]\ndef expr():\n exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]\n\n return ibis.expr_list(exprs)\n\n\ndef test_names(expr):\n assert expr.names() == ['a', 'b']\n\n\ndef test_prefix(expr):\n prefixed = expr.prefix('foo_')\n result = prefixed.names()\n assert result == ['foo_a', 'foo_b']\n\n\ndef test_rename(expr):\n renamed = expr.rename(lambda x: 'foo({0})'.format(x))\n result = renamed.names()\n assert result == ['foo(a)', 'foo(b)']\n\n\ndef test_suffix(expr):\n suffixed = expr.suffix('.x')\n result = suffixed.names()\n assert result == ['a.x', 'b.x']\n\n\ndef test_concat():\n exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]\n exprs2 = [ibis.literal(3).name('c'), ibis.literal(4).name('d')]\n\n list1 = ibis.expr_list(exprs)\n list2 = ibis.expr_list(exprs2)\n\n result = list1.concat(list2)\n expected = ibis.expr_list(exprs + exprs2)\n assert_equal(result, expected)\n\n\ndef test_substitute_dict():\n table = ibis.table([('foo', 'string'), ('bar', 'string')], 't1')\n subs = {'a': 'one', 'b': table.bar}\n\n result = table.foo.substitute(subs)\n expected = (\n table.foo.case()\n .when('a', 'one')\n .when('b', table.bar)\n .else_(table.foo)\n .end()\n )\n assert_equal(result, expected)\n\n result = table.foo.substitute(subs, else_=ibis.NA)\n expected = (\n table.foo.case()\n .when('a', 'one')\n .when('b', table.bar)\n .else_(ibis.NA)\n .end()\n )\n assert_equal(result, expected)\n\n\[email protected](\n 'typ',\n [\n 'array<map<string, array<array<double>>>>',\n 'string',\n 'double',\n 'float',\n 'int64',\n ],\n)\ndef test_not_without_boolean(typ):\n t = ibis.table([('a', typ)], name='t')\n c = t.a\n with pytest.raises(TypeError):\n ~c\n\n\[email protected](\n ('position', 'names'),\n [\n (0, 'foo'),\n (1, 'bar'),\n ([0], ['foo']),\n ([1], ['bar']),\n ([0, 1], ['foo', 'bar']),\n ([1, 0], ['bar', 'foo']),\n ],\n)\[email protected](\n 'expr_func',\n [\n lambda t, args: t[args],\n lambda t, args: t.sort_by(args),\n lambda t, args: t.group_by(args).aggregate(bar_avg=t.bar.mean()),\n ],\n)\ndef test_table_operations_with_integer_column(position, names, expr_func):\n t = ibis.table([('foo', 'string'), ('bar', 'double')])\n result = expr_func(t, position)\n expected = expr_func(t, names)\n assert result.equals(expected)\n\n\[email protected]('value', ['abcdefg', ['a', 'b', 'c'], [1, 2, 3]])\[email protected](\n 'operation', ['pow', 'sub', 'truediv', 'floordiv', 'mod']\n)\ndef test_generic_value_api_no_arithmetic(value, operation):\n func = getattr(operator, operation)\n expr = ibis.literal(value)\n with pytest.raises(TypeError):\n func(expr, expr)\n\n\[email protected](\n ('value', 'expected'), [(5, dt.int8), (5.4, dt.double), ('abc', dt.string)]\n)\ndef test_fillna_null(value, expected):\n assert ibis.NA.fillna(value).type().equals(expected)\n\n\[email protected](\n ('left', 'right'),\n [\n (literal('2017-04-01'), date(2017, 4, 2)),\n (date(2017, 4, 2), literal('2017-04-01')),\n (literal('2017-04-01 01:02:33'), datetime(2017, 4, 1, 1, 3, 34)),\n (datetime(2017, 4, 1, 1, 3, 34), literal('2017-04-01 01:02:33')),\n ],\n)\[email protected](\n 'op',\n [\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(\n left, right\n ),\n lambda left, right: ibis.timestamp('2017-04-01')\n .cast(dt.date)\n .between(left, right),\n ],\n)\ndef test_string_temporal_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\[email protected](\n ('value', 'type', 'expected_type_class'),\n [\n (2.21, 'decimal', dt.Decimal),\n (3.14, 'double', dt.Double),\n (4.2, 'int64', dt.Double),\n (4, 'int64', dt.Int64),\n ],\n)\ndef test_decimal_modulo_output_type(value, type, expected_type_class):\n t = ibis.table([('a', type)])\n expr = t.a % value\n assert isinstance(expr.type(), expected_type_class)\n\n\[email protected](\n ('left', 'right'),\n [(literal('10:00'), time(10, 0)), (time(10, 0), literal('10:00'))],\n)\[email protected](\n 'op',\n [\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n ],\n)\ndef test_time_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\[email protected](\n ('left', 'right'),\n [\n (literal('10:00'), date(2017, 4, 2)),\n (literal('10:00'), datetime(2017, 4, 2, 1, 1)),\n (literal('10:00'), literal('2017-04-01')),\n ],\n)\[email protected](\n 'op', [operator.eq, operator.lt, operator.le, operator.gt, operator.ge]\n)\ndef test_time_timestamp_invalid_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\ndef test_scalar_parameter_set():\n value = ibis.param({dt.int64})\n\n assert isinstance(value.op(), ops.ScalarParameter)\n assert value.type().equals(dt.Set(dt.int64))\n\n\ndef test_scalar_parameter_repr():\n value = ibis.param(dt.timestamp).name('value')\n assert repr(value) == 'value = ScalarParameter[timestamp]'\n\n value_op = value.op()\n assert repr(value_op) == \"ScalarParameter(type=timestamp)\"\n\n\[email protected](\n ('left', 'right', 'expected'),\n [\n (\n # same value type, same name\n ibis.param(dt.timestamp),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different value type, same name\n ibis.param(dt.date),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # same value type, different name\n ibis.param(dt.timestamp),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different value type, different name\n ibis.param(dt.date),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different Python class, left side is param\n ibis.param(dt.timestamp),\n dt.date,\n False,\n ),\n (\n # different Python class, right side is param\n dt.date,\n ibis.param(dt.timestamp),\n False,\n ),\n ],\n)\ndef test_scalar_parameter_compare(left, right, expected):\n assert left.equals(right) == expected\n\n\[email protected](\n ('case', 'creator'),\n [\n (datetime.now(), toolz.compose(methodcaller('time'), ibis.timestamp)),\n ('now', toolz.compose(methodcaller('time'), ibis.timestamp)),\n (datetime.now().time(), ibis.time),\n ('10:37', ibis.time),\n ],\n)\[email protected](\n ('left', 'right'), [(1, 'a'), ('a', 1), (1.0, 2.0), (['a'], [1])]\n)\ndef test_between_time_failure_time(case, creator, left, right):\n value = creator(case)\n with pytest.raises(TypeError):\n value.between(left, right)\n\n\ndef test_custom_type_binary_operations():\n class Foo(ir.ValueExpr):\n def __add__(self, other):\n op = self.op()\n return type(op)(op.value + other).to_expr()\n\n __radd__ = __add__\n\n class FooNode(ops.ValueOp):\n value = Arg(rlz.integer)\n\n def output_type(self):\n return functools.partial(Foo, dtype=dt.int64)\n\n left = ibis.literal(2)\n right = FooNode(3).to_expr()\n result = left + right\n assert isinstance(result, Foo)\n assert isinstance(result.op(), FooNode)\n\n left = FooNode(3).to_expr()\n right = ibis.literal(2)\n result = left + right\n assert isinstance(result, Foo)\n assert isinstance(result.op(), FooNode)\n\n\ndef test_empty_array_as_argument():\n class Foo(ir.Expr):\n pass\n\n class FooNode(ops.ValueOp):\n value = Arg(rlz.value(dt.Array(dt.int64)))\n\n def output_type(self):\n return Foo\n\n node = FooNode([])\n value = node.value\n expected = literal([]).cast(dt.Array(dt.int64))\n\n assert value.type().equals(dt.Array(dt.null))\n assert value.cast(dt.Array(dt.int64)).equals(expected)\n\n\ndef test_struct_field_dir():\n t = ibis.table([('struct_col', 'struct<my_field: string>')])\n assert 'struct_col' in dir(t)\n assert 'my_field' in dir(t.struct_col)\n\n\ndef test_nullable_column_propagated():\n t = ibis.table(\n [\n ('a', dt.Int32(nullable=True)),\n ('b', dt.Int32(nullable=False)),\n ('c', dt.String(nullable=False)),\n ('d', dt.double), # nullable by default\n ('f', dt.Double(nullable=False)),\n ]\n )\n\n assert t.a.type().nullable is True\n assert t.b.type().nullable is False\n assert t.c.type().nullable is False\n assert t.d.type().nullable is True\n assert t.f.type().nullable is False\n\n s = t.a + t.d\n assert s.type().nullable is True\n\n s = t.b + t.d\n assert s.type().nullable is True\n\n s = t.b + t.f\n assert s.type().nullable is False\n\n\[email protected](\n 'base_expr',\n [\n ibis.table([('interval_col', dt.Interval(unit='D'))]).interval_col,\n ibis.interval(seconds=42),\n ],\n)\ndef test_interval_negate(base_expr):\n expr = -base_expr\n expr2 = base_expr.negate()\n expr3 = ibis.negate(base_expr)\n assert isinstance(expr.op(), ops.Negate)\n assert expr.equals(expr2)\n assert expr.equals(expr3)\n\n\ndef test_large_timestamp():\n expr = ibis.timestamp('4567-02-03')\n expected = datetime(year=4567, month=2, day=3)\n result = expr.op().value\n assert result == expected\n\n\[email protected]('tz', [None, 'UTC'])\ndef test_timestamp_with_timezone(tz):\n expr = ibis.timestamp('2017-01-01', timezone=tz)\n expected = pd.Timestamp('2017-01-01', tz=tz)\n result = expr.op().value\n assert expected == result\n\n\[email protected]('tz', [None, 'UTC'])\ndef test_timestamp_timezone_type(tz):\n expr = ibis.timestamp('2017-01-01', timezone=tz)\n expected = dt.Timestamp(timezone=tz)\n assert expected == expr.op().dtype\n\n\ndef test_map_get_broadcast():\n t = ibis.table([('a', 'string')], name='t')\n lookup_table = ibis.literal({'a': 1, 'b': 2})\n expr = lookup_table.get(t.a)\n assert isinstance(expr, ir.IntegerColumn)\n\n\ndef test_map_getitem_broadcast():\n t = ibis.table([('a', 'string')], name='t')\n lookup_table = ibis.literal({'a': 1, 'b': 2})\n expr = lookup_table[t.a]\n assert isinstance(expr, ir.IntegerColumn)\n\n\ndef test_map_keys_output_type():\n mapping = ibis.literal({'a': 1, 'b': 2})\n assert mapping.keys().type() == dt.Array(dt.string)\n\n\ndef test_map_values_output_type():\n mapping = ibis.literal({'a': 1, 'b': 2})\n assert mapping.values().type() == dt.Array(dt.int8)\n\n\ndef test_scalar_isin_map_keys():\n mapping = ibis.literal({'a': 1, 'b': 2})\n key = ibis.literal('a')\n expr = key.isin(mapping.keys())\n assert isinstance(expr, ir.BooleanScalar)\n\n\ndef test_column_isin_map_keys():\n t = ibis.table([('a', 'string')], name='t')\n mapping = ibis.literal({'a': 1, 'b': 2})\n expr = t.a.isin(mapping.keys())\n assert isinstance(expr, ir.BooleanColumn)\n\n\ndef test_map_get_with_compatible_value_smaller():\n value = ibis.literal({'A': 1000, 'B': 2000})\n expr = value.get('C', 3)\n assert value.type() == dt.Map(dt.string, dt.int16)\n assert expr.type() == dt.int16\n\n\ndef test_map_get_with_compatible_value_bigger():\n value = ibis.literal({'A': 1, 'B': 2})\n expr = value.get('C', 3000)\n assert value.type() == dt.Map(dt.string, dt.int8)\n assert expr.type() == dt.int16\n\n\ndef test_map_get_with_incompatible_value_different_kind():\n value = ibis.literal({'A': 1000, 'B': 2000})\n with pytest.raises(IbisTypeError):\n value.get('C', 3.0)\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_not_nullable(null_value):\n map_type = dt.Map(dt.string, dt.Int16(nullable=False))\n value = ibis.literal({'A': 1000, 'B': 2000}).cast(map_type)\n assert value.type() == map_type\n with pytest.raises(IbisTypeError):\n assert value.get('C', null_value)\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_nullable(null_value):\n value = ibis.literal({'A': 1000, 'B': None})\n result = value.get('C', null_value)\n assert result.type().nullable\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_null_type_with_null(null_value):\n value = ibis.literal({'A': None, 'B': None})\n result = value.get('C', null_value)\n assert result.type().nullable\n\n\ndef test_map_get_with_null_on_null_type_with_non_null():\n value = ibis.literal({'A': None, 'B': None})\n assert value.get('C', 1).type() == dt.int8\n\n\ndef test_map_get_with_incompatible_value():\n value = ibis.literal({'A': 1000, 'B': 2000})\n with pytest.raises(IbisTypeError):\n value.get('C', ['A'])\n\n\[email protected](\n ('value', 'expected_type'),\n [\n (datetime.now(), dt.timestamp),\n (datetime.now().date(), dt.date),\n (datetime.now().time(), dt.time),\n ],\n)\ndef test_invalid_negate(value, expected_type):\n expr = ibis.literal(value)\n assert expr.type() == expected_type\n with pytest.raises(TypeError):\n -expr\n\n\[email protected](\n 'type',\n [\n np.float16,\n np.float32,\n np.float64,\n np.int16,\n np.int32,\n np.int64,\n np.int64,\n np.int8,\n np.timedelta64,\n np.uint16,\n np.uint32,\n np.uint64,\n np.uint64,\n np.uint8,\n float,\n int,\n ],\n)\ndef test_valid_negate(type):\n value = type(1)\n expr = ibis.literal(value)\n assert -expr is not None\n\n\[email protected](\n reason='Type not supported in most backends', raises=TypeError\n)\[email protected](\n os.name == 'nt', reason='np.float128 not appear to exist on windows'\n)\ndef test_valid_negate_float128():\n value = np.float128(1)\n expr = ibis.literal(value)\n assert -expr is not None\n\n\[email protected](\n ('kind', 'begin', 'end'),\n [\n ('preceding', None, None),\n ('preceding', 1, None),\n ('preceding', -1, 1),\n ('preceding', 1, -1),\n ('preceding', -1, -1),\n ('following', None, None),\n ('following', None, 1),\n ('following', -1, 1),\n ('following', 1, -1),\n ('following', -1, -1),\n ],\n)\ndef test_window_unbounded_invalid(kind, begin, end):\n kwargs = {kind: (begin, end)}\n with pytest.raises(com.IbisInputError):\n ibis.window(**kwargs)\n\n\[email protected](\n ('left', 'right', 'expected'),\n [\n (ibis.literal(1), ibis.literal(1.0), dt.float64),\n (ibis.literal('a'), ibis.literal('b'), dt.string),\n (ibis.literal(1.0), ibis.literal(1), dt.float64),\n (ibis.literal(1), ibis.literal(1), dt.int8),\n (ibis.literal(1), ibis.literal(1000), dt.int16),\n (ibis.literal(2 ** 16), ibis.literal(2 ** 17), dt.int32),\n (ibis.literal(2 ** 50), ibis.literal(1000), dt.int64),\n (ibis.literal([1, 2]), ibis.literal([1, 2]), dt.Array(dt.int8)),\n (ibis.literal(['a']), ibis.literal([]), dt.Array(dt.string)),\n (ibis.literal([]), ibis.literal(['a']), dt.Array(dt.string)),\n (ibis.literal([]), ibis.literal([]), dt.Array(dt.null)),\n ],\n)\ndef test_nullif_type(left, right, expected):\n assert left.nullif(right).type() == expected\n\n\[email protected](\n ('left', 'right'), [(ibis.literal(1), ibis.literal('a'))]\n)\ndef test_nullif_fail(left, right):\n with pytest.raises(com.IbisTypeError):\n left.nullif(right)\n with pytest.raises(com.IbisTypeError):\n right.nullif(left)\n\n\[email protected](\n \"join_method\",\n [\n \"left_join\",\n pytest.param(\n \"right_join\",\n marks=pytest.mark.xfail(\n raises=AttributeError, reason=\"right_join is not an ibis API\"\n ),\n ),\n \"inner_join\",\n \"outer_join\",\n \"asof_join\",\n pytest.param(\n \"semi_join\",\n marks=pytest.mark.xfail(\n raises=com.IbisTypeError,\n reason=(\n \"semi_join only gives access to the left table's \"\n \"columns\"\n ),\n ),\n ),\n ],\n)\[email protected](\n raises=(com.IbisError, AttributeError),\n reason=\"Select from unambiguous joins not implemented\",\n)\ndef test_select_on_unambiguous_join(join_method):\n t = ibis.table([(\"a0\", dt.int64), (\"b1\", dt.string)], name=\"t\")\n s = ibis.table([(\"a1\", dt.int64), (\"b2\", dt.string)], name=\"s\")\n method = getattr(t, join_method)\n join = method(s, t.b1 == s.b2)\n expr1 = join[\"a0\", \"a1\"]\n expr2 = join[[\"a0\", \"a1\"]]\n expr3 = join.select([\"a0\", \"a1\"])\n assert expr1.equals(expr2)\n assert expr1.equals(expr3)\n\n\ndef test_chained_select_on_join():\n t = ibis.table([(\"a\", dt.int64)], name=\"t\")\n s = ibis.table([(\"a\", dt.int64), (\"b\", dt.string)], name=\"s\")\n join = t.join(s)[t.a, s.b]\n expr1 = join[\"a\", \"b\"]\n expr2 = join.select([\"a\", \"b\"])\n assert expr1.equals(expr2)\n\n\ndef test_repr_list_of_lists():\n lit = ibis.literal([[1]])\n result = repr(lit)\n expected = \"\"\"\\\nLiteral[array<array<int8>>]\n [[1]]\"\"\"\n assert result == expected\n\n\ndef test_repr_list_of_lists_in_table():\n t = ibis.table([('a', 'int64')], name='t')\n lit = ibis.literal([[1]])\n expr = t[t, lit.name('array_of_array')]\n result = repr(expr)\n expected = \"\"\"\\\nref_0\nUnboundTable[table]\n name: t\n schema:\n a : int64\n\nSelection[table]\n table:\n Table: ref_0\n selections:\n Table: ref_0\n array_of_array = Literal[array<array<int8>>]\n [[1]]\"\"\"\n assert result == expected\n" ]
[ [ "pandas.Timestamp", "numpy.float128" ] ]
NamiKaze7/FinQA
[ "cf61ae2611ae205b62574e2b4264cb0318dd7202" ]
[ "code/generator/Model_new.py" ]
[ "import torch\nfrom torch import nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom config import parameters as conf\n\nif conf.pretrained_model == \"bert\":\n from transformers import BertModel\nelif conf.pretrained_model == \"roberta\":\n from transformers import RobertaModel\nelif conf.pretrained_model == \"finbert\":\n from transformers import BertModel\nelif conf.pretrained_model == \"longformer\":\n from transformers import LongformerModel\n\n\nclass Bert_model(nn.Module):\n\n def __init__(self, num_decoder_layers, hidden_size, dropout_rate, input_length,\n program_length, op_list, const_list):\n\n super(Bert_model, self).__init__()\n\n self.op_list_size = len(op_list)\n self.const_list_size = len(const_list)\n self.reserved_token_size = self.op_list_size + self.const_list_size\n self.program_length = program_length\n self.hidden_size = hidden_size\n self.const_list = const_list\n self.op_list = op_list\n self.input_length = input_length\n\n self.reserved_ind = nn.Parameter(torch.arange(\n 0, self.reserved_token_size), requires_grad=False)\n self.reserved_go = nn.Parameter(torch.arange(op_list.index(\n 'GO'), op_list.index('GO') + 1), requires_grad=False)\n\n self.reserved_para = nn.Parameter(torch.arange(op_list.index(\n ')'), op_list.index(')') + 1), requires_grad=False)\n\n # masking for decoidng for test time\n op_ones = nn.Parameter(torch.ones(\n self.op_list_size), requires_grad=False)\n op_zeros = nn.Parameter(torch.zeros(\n self.op_list_size), requires_grad=False)\n other_ones = nn.Parameter(torch.ones(\n input_length + self.const_list_size), requires_grad=False)\n other_zeros = nn.Parameter(torch.zeros(\n input_length + self.const_list_size), requires_grad=False)\n self.op_only_mask = nn.Parameter(\n torch.cat((op_ones, other_zeros), 0), requires_grad=False)\n self.seq_only_mask = nn.Parameter(\n torch.cat((op_zeros, other_ones), 0), requires_grad=False)\n\n # for \")\"\n para_before_ones = nn.Parameter(torch.ones(\n op_list.index(')')), requires_grad=False)\n para_after_ones = nn.Parameter(torch.ones(\n input_length + self.reserved_token_size - op_list.index(')') - 1), requires_grad=False)\n para_zero = nn.Parameter(torch.zeros(1), requires_grad=False)\n self.para_mask = nn.Parameter(torch.cat(\n (para_before_ones, para_zero, para_after_ones), 0), requires_grad=False)\n\n # for step embedding\n # self.step_masks = []\n all_tmp_list = self.op_list + self.const_list\n self.step_masks = nn.Parameter(torch.zeros(\n conf.max_step_ind, input_length + self.reserved_token_size), requires_grad=False)\n for i in range(conf.max_step_ind):\n this_step_mask_ind = all_tmp_list.index(\"#\" + str(i))\n self.step_masks[i, this_step_mask_ind] = 1.0\n\n # self.step_mask_eye = torch.eye(conf.max_step_ind)\n\n if conf.pretrained_model == \"bert\":\n self.bert = BertModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"roberta\":\n self.bert = RobertaModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"finbert\":\n self.bert = BertModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"longformer\":\n self.bert = LongformerModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n\n self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.cls_dropout = nn.Dropout(dropout_rate)\n\n self.seq_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.seq_dropout = nn.Dropout(dropout_rate)\n\n self.reserved_token_embedding = nn.Embedding(\n self.reserved_token_size, hidden_size)\n\n # attentions\n self.decoder_history_attn_prj = nn.Linear(\n hidden_size, hidden_size, bias=True)\n self.decoder_history_attn_dropout = nn.Dropout(dropout_rate)\n\n self.question_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.question_attn_dropout = nn.Dropout(dropout_rate)\n\n self.question_summary_attn_prj = nn.Linear(\n hidden_size, hidden_size, bias=True)\n self.question_summary_attn_dropout = nn.Dropout(dropout_rate)\n\n if conf.sep_attention:\n self.input_embeddings_prj = nn.Linear(\n hidden_size*3, hidden_size, bias=True)\n else:\n self.input_embeddings_prj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n self.input_embeddings_layernorm = nn.LayerNorm([1, hidden_size])\n\n self.option_embeddings_prj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n\n # decoder lstm\n self.rnn = torch.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size,\n num_layers=conf.num_decoder_layers, batch_first=True)\n\n # step vector\n self.decoder_step_proj = nn.Linear(\n 3*hidden_size, hidden_size, bias=True)\n self.decoder_step_proj_dropout = nn.Dropout(dropout_rate)\n\n self.step_mix_proj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n\n def forward(self, is_training, input_ids, input_mask, segment_ids, option_mask, program_ids, program_mask, device):\n\n bert_outputs = self.bert(\n input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)\n\n bert_sequence_output = bert_outputs.last_hidden_state\n bert_pooled_output = bert_sequence_output[:, 0, :]\n batch_size, seq_length, bert_dim = list(bert_sequence_output.size())\n\n split_program_ids = torch.split(program_ids, 1, dim=1)\n # print(self.program_length)\n # print(program_ids.size())\n # print(split_program_ids[0].size())\n\n pooled_output = self.cls_prj(bert_pooled_output)\n pooled_output = self.cls_dropout(pooled_output)\n\n option_size = self.reserved_token_size + seq_length\n\n sequence_output = self.seq_prj(bert_sequence_output)\n sequence_output = self.seq_dropout(sequence_output)\n\n op_embeddings = self.reserved_token_embedding(self.reserved_ind)\n op_embeddings = op_embeddings.repeat(batch_size, 1, 1)\n\n logits = []\n\n init_decoder_output = self.reserved_token_embedding(self.reserved_go)\n decoder_output = init_decoder_output.repeat(batch_size, 1, 1)\n\n # [batch, op + seq len, hidden]\n initial_option_embeddings = torch.cat(\n [op_embeddings, sequence_output], dim=1)\n\n if conf.sep_attention:\n decoder_history = decoder_output\n else:\n decoder_history = torch.unsqueeze(pooled_output, dim=-1)\n\n decoder_state_h = torch.zeros(\n 1, batch_size, self.hidden_size, device=device)\n decoder_state_c = torch.zeros(\n 1, batch_size, self.hidden_size, device=device)\n\n float_input_mask = input_mask.float()\n float_input_mask = torch.unsqueeze(float_input_mask, dim=-1)\n\n this_step_new_op_emb = initial_option_embeddings\n\n for cur_step in range(self.program_length):\n\n # decoder history att\n decoder_history_attn_vec = self.decoder_history_attn_prj(\n decoder_output)\n decoder_history_attn_vec = self.decoder_history_attn_dropout(\n decoder_history_attn_vec)\n\n decoder_history_attn_w = torch.matmul(\n decoder_history, torch.transpose(decoder_history_attn_vec, 1, 2))\n decoder_history_attn_w = F.softmax(decoder_history_attn_w, dim=1)\n\n decoder_history_ctx_embeddings = torch.matmul(\n torch.transpose(decoder_history_attn_w, 1, 2), decoder_history)\n\n if conf.sep_attention:\n # input seq att\n question_attn_vec = self.question_attn_prj(decoder_output)\n question_attn_vec = self.question_attn_dropout(\n question_attn_vec)\n\n question_attn_w = torch.matmul(\n sequence_output, torch.transpose(question_attn_vec, 1, 2))\n question_attn_w -= 1e6 * (1 - float_input_mask)\n question_attn_w = F.softmax(question_attn_w, dim=1)\n\n question_ctx_embeddings = torch.matmul(\n torch.transpose(question_attn_w, 1, 2), sequence_output)\n\n # another input seq att\n question_summary_vec = self.question_summary_attn_prj(\n decoder_output)\n question_summary_vec = self.question_summary_attn_dropout(\n question_summary_vec)\n\n question_summary_w = torch.matmul(\n sequence_output, torch.transpose(question_summary_vec, 1, 2))\n question_summary_w -= 1e6 * (1 - float_input_mask)\n question_summary_w = F.softmax(question_summary_w, dim=1)\n\n question_summary_embeddings = torch.matmul(\n torch.transpose(question_summary_w, 1, 2), sequence_output)\n\n if conf.sep_attention:\n concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,\n question_ctx_embeddings,\n decoder_output], dim=-1)\n else:\n concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,\n decoder_output], dim=-1)\n\n input_embeddings = self.input_embeddings_prj(\n concat_input_embeddings)\n\n if conf.layer_norm:\n input_embeddings = self.input_embeddings_layernorm(\n input_embeddings)\n\n question_option_vec = this_step_new_op_emb * question_summary_embeddings\n option_embeddings = torch.cat(\n [this_step_new_op_emb, question_option_vec], dim=-1)\n\n option_embeddings = self.option_embeddings_prj(option_embeddings)\n option_logits = torch.matmul(\n option_embeddings, torch.transpose(input_embeddings, 1, 2))\n option_logits = torch.squeeze(\n option_logits, dim=2) # [batch, op + seq_len]\n option_logits -= 1e6 * (1 - option_mask)\n logits.append(option_logits)\n\n if is_training:\n program_index = torch.unsqueeze(\n split_program_ids[cur_step], dim=1)\n else:\n # constrain decoding\n if cur_step % 4 == 0 or (cur_step + 1) % 4 == 0:\n # op round\n option_logits -= 1e6 * self.seq_only_mask\n else:\n # number round\n option_logits -= 1e6 * self.op_only_mask\n\n if (cur_step + 1) % 4 == 0:\n # \")\" round\n option_logits -= 1e6 * self.para_mask\n # print(program_index)\n\n program_index = torch.argmax(\n option_logits, axis=-1, keepdim=True)\n\n program_index = torch.unsqueeze(\n program_index, dim=1\n )\n\n if (cur_step + 1) % 4 == 0:\n\n # update op embeddings\n this_step_index = cur_step // 4\n this_step_list_index = (\n self.op_list + self.const_list).index(\"#\" + str(this_step_index))\n this_step_mask = self.step_masks[this_step_index, :]\n\n decoder_step_vec = self.decoder_step_proj(\n concat_input_embeddings)\n decoder_step_vec = self.decoder_step_proj_dropout(\n decoder_step_vec)\n decoder_step_vec = torch.squeeze(decoder_step_vec)\n\n this_step_new_emb = decoder_step_vec # [batch, hidden]\n\n this_step_new_emb = torch.unsqueeze(this_step_new_emb, 1)\n this_step_new_emb = this_step_new_emb.repeat(\n 1, self.reserved_token_size+self.input_length, 1) # [batch, op seq, hidden]\n\n this_step_mask = torch.unsqueeze(\n this_step_mask, 0) # [1, op seq]\n # print(this_step_mask)\n\n this_step_mask = torch.unsqueeze(\n this_step_mask, 2) # [1, op seq, 1]\n this_step_mask = this_step_mask.repeat(\n batch_size, 1, self.hidden_size) # [batch, op seq, hidden]\n\n this_step_new_op_emb = torch.where(\n this_step_mask > 0, this_step_new_emb, initial_option_embeddings)\n\n # print(program_index.size())\n program_index = torch.repeat_interleave(\n program_index, self.hidden_size, dim=2) # [batch, 1, hidden]\n\n input_program_embeddings = torch.gather(\n option_embeddings, dim=1, index=program_index)\n\n decoder_output, (decoder_state_h, decoder_state_c) = self.rnn(\n input_program_embeddings, (decoder_state_h, decoder_state_c))\n decoder_history = torch.cat(\n [decoder_history, input_program_embeddings], dim=1)\n\n logits = torch.stack(logits, dim=1)\n return logits\n" ]
[ [ "torch.stack", "torch.nn.functional.softmax", "torch.cat", "torch.nn.Dropout", "torch.repeat_interleave", "torch.gather", "torch.nn.LayerNorm", "torch.arange", "torch.unsqueeze", "torch.ones", "torch.nn.LSTM", "torch.argmax", "torch.transpose", "torch.nn.Linear", "torch.split", "torch.nn.Embedding", "torch.where", "torch.zeros", "torch.squeeze" ] ]
anishvaidya/MIT-Indoor-Scene-Recognition
[ "04b2c35b25996d420c6fe90c480b86635f3baffd" ]
[ "model_inception_v2_67class.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport imageio\nimport skimage\nimport skimage.io\nimport skimage.transform\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.optimizers import Adam,RMSprop,SGD\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input, decode_predictions\nimport matplotlib.pylab as plt\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n# Using pre-trained model\nconv_base = InceptionResNetV2(include_top = False, weights = '/home/vanish/prgs/MLandDL/MITTest/Models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5', input_shape = (200,200,3)) #150,150\nconv_base.summary()\n\n# build on top of imported model\nmodel = Sequential()\nmodel.add(conv_base)\nmodel.add(Flatten())\n#model.add(Dense(512,activation='relu'))\n#model.add(Dropout(0.5))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dense(67, activation='softmax'))\n\nmodel.compile(Adam(lr=0.0001),loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n#model.compile(SGD(lr=0.0001),loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\nmodel.summary()\n\n\ntrain_data_dir = 'Dataset/trainingset/'\nimg_width = 200\nimg_height = 200\nbatch_size = 8\nnb_epochs = 10\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n validation_split=0.1) # set validation split\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training') # set as training data\n\nvalidation_generator = train_datagen.flow_from_directory(\n train_data_dir, # same directory as training data\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation') # set as validation data\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples // batch_size,\n validation_data = validation_generator, \n validation_steps = validation_generator.samples // batch_size,\n epochs = nb_epochs)\n\nmodel.save('inception_v2_200px.h5') \nmodel.save_weights('Weightsinception_v2_200px.h5')\n\nfrom keras.models import load_model\nmodel = load_model('inception_v2_200px.h5')\n\n# check classification mapping\ndict = train_generator.class_indices\n\n# Graphs\nprint(history.history.keys())\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(acc) + 1)\n\nplt.title('Training and validation accuracy')\nplt.plot(epochs, acc, 'red', label='Training acc')\nplt.plot(epochs, val_acc, 'blue', label='Validation acc')\nplt.legend()\n\nplt.figure()\nplt.title('Training and validation loss')\nplt.plot(epochs, loss, 'red', label='Training loss')\nplt.plot(epochs, val_loss, 'blue', label='Validation loss')\n\nplt.legend()\nplt.show()\n\nimport time\nimport numpy as np\nfrom keras.preprocessing import image\ntest_image = image.load_img('/home/vanish/prgs/MLandDL/MITIndoor/Dataset/trainingset/bathroom/b1.jpg', target_size = (200, 200))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\ntest_image = preprocess_input(test_image) # added to check same preds issue\nstart_time = time.time()\nresult = model.predict(test_image)\n#decode_predictions(result)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nfor i in range (0,dict.__len__()):\n if result[0][i] >= 0.05:\n listOfKeys = [key for (key, value) in dict.items() if value == i]\n for key in listOfKeys:\n print(key) \n break\n" ]
[ [ "matplotlib.pylab.title", "matplotlib.pylab.legend", "matplotlib.pylab.figure", "matplotlib.pylab.show", "numpy.expand_dims", "matplotlib.pylab.plot" ] ]
veritaass/pig_mmdet
[ "6bb348a002695e83b2f16b84173ce0aebbb20e60" ]
[ "topdown_coco_tiny_dataset.py" ]
[ "import json\nimport os\nimport os.path as osp\n#from collections import OrderedDict\nimport tempfile\n\nimport numpy as np\n\nfrom mmpose.core.evaluation.top_down_eval import (keypoint_nme,\n keypoint_pck_accuracy)\nfrom mmpose.datasets.builder import DATASETS\nfrom mmpose.datasets.datasets.base import Kpt2dSviewRgbImgTopDownDataset\n\n######\n\nfrom collections import OrderedDict, defaultdict\n\nimport json_tricks as json\nimport numpy as np\nfrom mmcv import Config, deprecated_api_warning\nfrom xtcocotools.cocoeval import COCOeval\n\nfrom ....core.post_processing import oks_nms, soft_oks_nms\nfrom ...builder import DATASETS\nfrom ..base import Kpt2dSviewRgbImgTopDownDataset\n\n#####\n\[email protected]_module()\nclass TopDownCOCOTinyDataset(Kpt2dSviewRgbImgTopDownDataset):\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=None,\n test_mode=False):\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, dataset_info, coco_style=False, test_mode=test_mode)\n\n # flip_pairs, upper_body_ids and lower_body_ids will be used\n # in some data augmentations like random flip\n self.ann_info['flip_pairs'] = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16]]\n self.ann_info['upper_body_ids'] = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n self.ann_info['lower_body_ids'] = (11, 12, 13, 14, 15, 16)\n\n self.ann_info['joint_weights'] = None\n self.ann_info['use_different_joint_weights'] = False\n\n self.dataset_name = 'coco_tiny'\n self.db = self._get_db()\n\n def _get_db(self):\n with open(self.ann_file) as f:\n anns = json.load(f)\n\n db = []\n for idx, ann in enumerate(anns):\n # get image path\n image_file = osp.join(self.img_prefix, ann['image_file'])\n # get bbox\n bbox = ann['bbox']\n center, scale = self._xywh2cs(*bbox)\n # get keypoints\n keypoints = np.array(\n ann['keypoints'], dtype=np.float32).reshape(-1, 3)\n num_joints = keypoints.shape[0]\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n sample = {\n 'image_file': image_file,\n 'center': center,\n 'scale': scale,\n 'bbox': bbox,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'bbox_score': 1,\n 'bbox_id': idx,\n }\n db.append(sample)\n\n return db\n\n def _xywh2cs(self, x, y, w, h):\n \"\"\"This encodes bbox(x, y, w, h) into (center, scale)\n Args:\n x, y, w, h\n Returns:\n tuple: A tuple containing center and scale.\n - center (np.ndarray[float32](2,)): center of the bbox (x, y).\n - scale (np.ndarray[float32](2,)): scale of the bbox w & h.\n \"\"\"\n aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[\n 'image_size'][1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n # padding to include proper amount of context\n scale = scale * 1.25\n return center, scale\n\n @deprecated_api_warning(name_dict=dict(outputs='results'))\n def evaluate(self, results, res_folder=None, metric='mAP', **kwargs):\n \"\"\"Evaluate coco keypoint results. The pose prediction results will be\n saved in ``${res_folder}/result_keypoints.json``.\n\n Note:\n - batch_size: N\n - num_keypoints: K\n - heatmap height: H\n - heatmap width: W\n\n Args:\n results (list[dict]): Testing results containing the following\n items:\n\n - preds (np.ndarray[N,K,3]): The first two dimensions are \\\n coordinates, score is the third dimension of the array.\n - boxes (np.ndarray[N,6]): [center[0], center[1], scale[0], \\\n scale[1],area, score]\n - image_paths (list[str]): For example, ['data/coco/val2017\\\n /000000393226.jpg']\n - heatmap (np.ndarray[N, K, H, W]): model output heatmap\n - bbox_id (list(int)).\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['mAP']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = defaultdict(list)\n\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n kpts[image_id].append({\n 'keypoints': preds[i],\n 'center': boxes[i][0:2],\n 'scale': boxes[i][2:4],\n 'area': boxes[i][4],\n 'score': boxes[i][5],\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n # rescoring and oks nms\n num_joints = self.ann_info['num_joints']\n vis_thr = self.vis_thr\n oks_thr = self.oks_thr\n valid_kpts = []\n for image_id in kpts.keys():\n img_kpts = kpts[image_id]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > vis_thr:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.use_nms:\n nms = soft_oks_nms if self.soft_nms else oks_nms\n keep = nms(img_kpts, oks_thr, sigmas=self.sigmas)\n valid_kpts.append([img_kpts[_keep] for _keep in keep])\n else:\n valid_kpts.append(img_kpts)\n\n self._write_coco_keypoint_results(valid_kpts, res_file)\n\n info_str = self._do_python_keypoint_eval(res_file)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n data_pack = [{\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n } for cls_ind, cls in enumerate(self.classes)\n if not cls == '__background__']\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n \"\"\"Get coco keypoint results.\"\"\"\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array(\n [img_kpt['keypoints'] for img_kpt in img_kpts])\n key_points = _key_points.reshape(-1,\n self.ann_info['num_joints'] * 3)\n\n result = [{\n 'image_id': img_kpt['image_id'],\n 'category_id': cat_id,\n 'keypoints': key_point.tolist(),\n 'score': float(img_kpt['score']),\n 'center': img_kpt['center'].tolist(),\n 'scale': img_kpt['scale'].tolist()\n } for img_kpt, key_point in zip(img_kpts, key_points)]\n\n cat_results.extend(result)\n\n return cat_results\n\n\n def _do_python_keypoint_eval(self, res_file):\n \"\"\"Keypoint evaluation using COCOAPI.\"\"\"\n coco_det = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas)\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n stats_names = [\n 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',\n 'AR .75', 'AR (M)', 'AR (L)'\n ]\n\n info_str = list(zip(stats_names, coco_eval.stats))\n\n return info_str\n\n def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):\n \"\"\"sort kpts and remove the repeated ones.\"\"\"\n for img_id, persons in kpts.items():\n num = len(persons)\n kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key])\n for i in range(num - 1, 0, -1):\n if kpts[img_id][i][key] == kpts[img_id][i - 1][key]:\n del kpts[img_id][i]\n\n return kpts\n\n\n def evaluate1111(self, results, res_folder=None, metric='PCK', **kwargs):\n# def evaluate(self, results, res_folder=None, metric='PCK', **kwargs):\n \"\"\"Evaluate keypoint detection results. The pose prediction results will\n be saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n results (list(preds, boxes, image_path, output_heatmap))\n :preds (np.ndarray[N,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['Test/source/0.jpg']\n :output_heatmap (np.ndarray[N, K, H, W]): model outputs.\n\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'NME'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'NME']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = []\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n\n def _report_metric(self, res_file, metrics, pck_thr=0.3):\n \"\"\"Keypoint evaluation.\n\n Args:\n res_file (str): Json file stored prediction results.\n metrics (str | list[str]): Metric to be performed.\n Options: 'PCK', 'NME'.\n pck_thr (float): PCK threshold, default: 0.3.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n info_str = []\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n outputs = []\n gts = []\n masks = []\n\n for pred, item in zip(preds, self.db):\n outputs.append(np.array(pred['keypoints'])[:, :-1])\n gts.append(np.array(item['joints_3d'])[:, :-1])\n masks.append((np.array(item['joints_3d_visible'])[:, 0]) > 0)\n\n outputs = np.array(outputs)\n gts = np.array(gts)\n masks = np.array(masks)\n\n normalize_factor = self._get_normalize_factor(gts)\n\n if 'PCK' in metrics:\n _, pck, _ = keypoint_pck_accuracy(outputs, gts, masks, pck_thr,\n normalize_factor)\n info_str.append(('PCK', pck))\n\n if 'NME' in metrics:\n info_str.append(\n ('NME', keypoint_nme(outputs, gts, masks, normalize_factor)))\n\n return info_str\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n @staticmethod\n def _sort_and_unique_bboxes(kpts, key='bbox_id'):\n \"\"\"sort kpts and remove the repeated ones.\"\"\"\n kpts = sorted(kpts, key=lambda x: x[key])\n num = len(kpts)\n for i in range(num - 1, 0, -1):\n if kpts[i][key] == kpts[i - 1][key]:\n del kpts[i]\n\n return kpts\n\n @staticmethod\n def _get_normalize_factor(gts):\n \"\"\"Get inter-ocular distance as the normalize factor, measured as the\n Euclidean distance between the outer corners of the eyes.\n\n Args:\n gts (np.ndarray[N, K, 2]): Groundtruth keypoint location.\n\n Return:\n np.ndarray[N, 2]: normalized factor\n \"\"\"\n\n interocular = np.linalg.norm(\n gts[:, 0, :] - gts[:, 1, :], axis=1, keepdims=True)\n return np.tile(interocular, [1, 2])\n" ]
[ [ "numpy.tile", "numpy.zeros", "numpy.array", "numpy.linalg.norm", "numpy.minimum" ] ]
guilhermealvess/emotions
[ "a65df8f44a9ff4c25421e4b5bf8dc5d918dbd38c" ]
[ "core/gabor.py" ]
[ "\n\nimport numpy as np\nfrom skimage.filters import gabor_kernel\nimport cv2\n\n\nclass KernelParams:\n def __init__(self, wavelength, orientation):\n self.wavelength = wavelength\n self.orientation = orientation\n\n def __hash__(self):\n return hash((self.wavelength, self.orientation))\n\n def __eq__(self, other):\n return (self.wavelength, self.orientation) == \\\n (other.wavelength, other.orientation)\n\n def __ne__(self, other):\n return not(self == other)\n\n\nclass GaborBank:\n def __init__(self, w = [4, 7, 10, 13],\n o = [i for i in np.arange(0, np.pi, np.pi / 8)]):\n self._wavelengths = w\n self._orientations = o\n self._kernels = {}\n for wavelength in self._wavelengths:\n for orientation in self._orientations:\n frequency = 1 / wavelength\n kernel = gabor_kernel(frequency, orientation)\n par = KernelParams(wavelength, orientation)\n self._kernels[par] = kernel\n\n def filter(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n responses = []\n for wavelength in self._wavelengths:\n for orientation in self._orientations:\n frequency = 1 / wavelength\n par = KernelParams(wavelength, orientation)\n kernel = self._kernels[par]\n real = cv2.filter2D(image, cv2.CV_32F, kernel.real)\n imag = cv2.filter2D(image, cv2.CV_32F, kernel.imag)\n mag = cv2.magnitude(real, imag)\n cv2.normalize(mag, mag, -1, 1, cv2.NORM_MINMAX)\n responses.append(mag)\n return np.array(responses)" ]
[ [ "numpy.array", "numpy.arange" ] ]
mihir135/deep_learning_nanodegree
[ "018bf9228d72a8c0580eb82070223cf5225ffd4a" ]
[ "3. Generate TV scripts/helper.py" ]
[ "import os\nimport pickle\nimport torch\n\n\nSPECIAL_WORDS = {'PADDING': '<PAD>'}\n\n\ndef load_data(path):\n \"\"\"\n Load Dataset from File\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, \"r\") as f:\n data = f.read()\n\n return data\n\n\ndef preprocess_and_save_data(dataset_path, token_lookup, create_lookup_tables):\n \"\"\"\n Preprocess Text Data\n \"\"\"\n text = load_data(dataset_path)\n \n # Ignore notice, since we don't use it for analysing the data\n text = text[81:]\n\n token_dict = token_lookup()\n for key, token in token_dict.items():\n text = text.replace(key, ' {} '.format(token))\n\n text = text.lower()\n text = text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(text + list(SPECIAL_WORDS.values()))\n int_text = [vocab_to_int[word] for word in text]\n pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('preprocess.p', 'wb'))\n\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of <batch_size> or less\n \"\"\"\n return pickle.load(open('preprocess.p', mode='rb'))\n\n\ndef save_model(filename, decoder):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n torch.save(decoder, save_filename)\n\n\ndef load_model(filename):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n print(save_filename)\n return torch.load(save_filename)\n" ]
[ [ "torch.save", "torch.load" ] ]
HumanCompatibleAI/malmo
[ "147dad058c00574e57205833159decc91c8adfd1" ]
[ "MalmoEnv/malmoenv/core.py" ]
[ "# ------------------------------------------------------------------------------------------------\n# Copyright (c) 2018 Microsoft Corporation\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute,\n# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all copies or\n# substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# ------------------------------------------------------------------------------------------------\n\nfrom lxml import etree\nimport struct\nimport socket\nimport time\nimport random\nimport numpy as np\nfrom malmoenv import comms\nfrom malmoenv.commands import CommandParser\nimport uuid\nimport gym.spaces\nfrom malmoenv.comms import retry\nfrom malmoenv.version import malmo_version\n\n\nclass StringActionSpace(gym.spaces.Discrete):\n \"\"\"Malmo actions as their strings.\"\"\"\n def __init__(self):\n gym.spaces.Discrete.__init__(self, 1)\n\n def __getitem__(self, action):\n return action\n\n\nclass ActionSpace(gym.spaces.Discrete):\n \"\"\"Malmo actions as gym action space\"\"\"\n def __init__(self, actions):\n self.actions = actions\n gym.spaces.Discrete.__init__(self, len(self.actions))\n\n def sample(self):\n return random.randint(1, len(self.actions)) - 1\n\n def __getitem__(self, action):\n return self.actions[action]\n\n def __len__(self):\n return len(self.actions)\n\n\nclass VisualObservationSpace(gym.spaces.Box):\n \"\"\"Space for visual observations: width x height x depth as a flat array.\n Where depth is 3 or 4 if encoding scene depth.\n \"\"\"\n def __init__(self, width, height, depth):\n gym.spaces.Box.__init__(self,\n low=np.iinfo(np.int8).min, high=np.iinfo(np.int8).max,\n shape=(height, width, depth), dtype=np.int8)\n\n\nclass EnvException(Exception):\n def __init__(self, message):\n super(EnvException, self).__init__(message)\n\n\nclass MissionInitException(Exception):\n def __init__(self, message):\n super(MissionInitException, self).__init__(message)\n\n\nMAX_WAIT = 60 * 3\n\n\nclass Env:\n \"\"\"Malmo \"Env\" open ai gym compatible environment API\"\"\"\n def __init__(self):\n self.action_space = None\n self.observation_space = None\n self.xml = None\n self.integratedServerPort = 0\n self.role = 0\n self.agent_count = 0\n self.resets = 0\n self.ns = '{http://ProjectMalmo.microsoft.com}'\n self.client_socket = None\n self.server = 'localhost' # The mission server\n self.port = 9000 # The mission server port\n self.server2 = self.server # optional server for agent (role <> 0)\n self.port2 = self.port + self.role # optional server port for agent\n self.resync_period = 0\n self.turn_key = \"\"\n self.exp_uid = \"\"\n self.done = True\n self.synchronous = False\n self.step_options = None\n self.width = 0\n\n \n self.height = 0\n self.depth = 0\n\n def init(self, xml, port, server=None,\n server2=None, port2=None,\n role=0, exp_uid=None, episode=0,\n action_filter=None, resync=0, step_options=0, action_space=None, synchronous=False):\n \"\"\"\"Initialize a Malmo environment.\n xml - the mission xml.\n port - the MalmoEnv service's port.\n server - the MalmoEnv service address. Default is localhost.\n server2 - the MalmoEnv service address for given role if not 0.\n port2 - the MalmoEnv service port for given role if not 0.\n role - the agent role (0..N-1) for missions with N agents. Defaults to 0.\n exp_uid - the experiment's unique identifier. Generated if not given.\n episode - the \"reset\" start count for experiment re-starts. Defaults to 0.\n action_filter - an optional list of valid actions to filter by. Defaults to simple commands.\n step_options - encodes withTurnKey and withInfo in step messages. Defaults to info included,\n turn if required.\n \"\"\"\n self.synchronous = synchronous\n\n if action_filter is None:\n action_filter = {\"move\", \"turn\", \"use\", \"attack\"}\n\n if not xml.startswith('<Mission'):\n i = xml.index(\"<Mission\")\n if i == -1:\n raise EnvException(\"Mission xml must contain <Mission> tag.\")\n xml = xml[i:]\n\n self.xml = etree.fromstring(xml)\n self.role = role\n if exp_uid is None:\n self.exp_uid = str(uuid.uuid4())\n else:\n self.exp_uid = exp_uid\n\n command_parser = CommandParser(action_filter)\n commands = command_parser.get_commands_from_xml(self.xml, self.role)\n actions = command_parser.get_actions(commands)\n # print(\"role \" + str(self.role) + \" actions \" + str(actions)\n\n if action_space:\n self.action_space = action_space\n else:\n self.action_space = ActionSpace(actions)\n\n self.port = port\n if server is not None:\n self.server = server\n if server2 is not None:\n self.server2 = server2\n else:\n self.server2 = self.server\n if port2 is not None:\n self.port2 = port2\n else:\n self.port2 = self.port + self.role\n\n self.agent_count = len(self.xml.findall(self.ns + 'AgentSection'))\n turn_based = self.xml.find('.//' + self.ns + 'TurnBasedCommands') is not None\n if turn_based:\n self.turn_key = 'AKWozEre'\n else:\n self.turn_key = \"\"\n if step_options is None:\n self.step_options = 0 if not turn_based else 2\n else:\n self.step_options = step_options\n self.done = True\n # print(\"agent count \" + str(self.agent_count) + \" turn based \" + turn_based)\n self.resync_period = resync\n self.resets = episode\n\n e = etree.fromstring(\"\"\"<MissionInit xmlns=\"http://ProjectMalmo.microsoft.com\" \n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \n SchemaVersion=\"\" PlatformVersion=\"\"\" + '\\\"' + malmo_version + '\\\"' +\n \"\"\">\n <ExperimentUID></ExperimentUID>\n <ClientRole>0</ClientRole>\n <ClientAgentConnection>\n <ClientIPAddress>127.0.0.1</ClientIPAddress>\n <ClientMissionControlPort>0</ClientMissionControlPort>\n <ClientCommandsPort>0</ClientCommandsPort>\n <AgentIPAddress>127.0.0.1</AgentIPAddress>\n <AgentMissionControlPort>0</AgentMissionControlPort>\n <AgentVideoPort>0</AgentVideoPort>\n <AgentDepthPort>0</AgentDepthPort>\n <AgentLuminancePort>0</AgentLuminancePort>\n <AgentObservationsPort>0</AgentObservationsPort>\n <AgentRewardsPort>0</AgentRewardsPort>\n <AgentColourMapPort>0</AgentColourMapPort>\n </ClientAgentConnection>\n </MissionInit>\"\"\")\n e.insert(0, self.xml)\n self.xml = e\n self.xml.find(self.ns + 'ClientRole').text = str(self.role)\n self.xml.find(self.ns + 'ExperimentUID').text = self.exp_uid\n if self.role != 0 and self.agent_count > 1:\n e = etree.Element(self.ns + 'MinecraftServerConnection',\n attrib={'address': self.server,\n 'port': str(0)\n })\n self.xml.insert(2, e)\n\n video_producers = self.xml.findall('.//' + self.ns + 'VideoProducer')\n assert len(video_producers) == self.agent_count\n video_producer = video_producers[self.role]\n self.width = int(video_producer.find(self.ns + 'Width').text)\n self.height = int(video_producer.find(self.ns + 'Height').text)\n want_depth = video_producer.attrib[\"want_depth\"]\n self.depth = 4 if want_depth is not None and (want_depth == \"true\" or want_depth == \"1\") else 3\n # print(str(self.width) + \"x\" + str(self.height) + \"x\" + str(self.depth))\n self.observation_space = VisualObservationSpace(self.width, self.height, self.depth)\n # print(etree.tostring(self.xml))\n\n @staticmethod\n def _hello(sock):\n comms.send_message(sock, (\"<MalmoEnv\" + malmo_version + \"/>\").encode())\n\n def reset(self):\n \"\"\"gym api reset\"\"\"\n\n if self.resync_period > 0 and (self.resets + 1) % self.resync_period == 0:\n self.exit_resync()\n\n while not self.done:\n self.done = self._quit_episode()\n if not self.done:\n time.sleep(0.1)\n\n return self._start_up()\n\n @retry\n def _start_up(self):\n self.resets += 1\n if self.role != 0:\n self._find_server()\n if not self.client_socket:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n # print(\"connect \" + self.server2 + \":\" + str(self.port2))\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n self.client_socket = sock # Now retries will use connected socket.\n self._init_mission()\n self.done = False\n return self._peek_obs()\n\n def _peek_obs(self):\n obs = None\n start_time = time.time()\n while not self.done and (obs is None or len(obs) == 0):\n peek_message = \"<Peek/>\"\n comms.send_message(self.client_socket, peek_message.encode())\n obs = comms.recv_message(self.client_socket)\n info = comms.recv_message(self.client_socket).decode('utf-8')\n reply = comms.recv_message(self.client_socket)\n done, = struct.unpack('!b', reply)\n self.done = done == 1\n if obs is None or len(obs) == 0:\n if time.time() - start_time > MAX_WAIT:\n self.client_socket.close()\n self.client_socket = None\n raise MissionInitException('too long waiting for first observation')\n time.sleep(0.1)\n\n obs = np.frombuffer(obs, dtype=np.uint8)\n\n if obs is None or len(obs) == 0:\n obs = np.zeros((self.height, self.width, self.depth), dtype=np.int8)\n return obs, info\n\n def _quit_episode(self):\n comms.send_message(self.client_socket, \"<Quit/>\".encode())\n reply = comms.recv_message(self.client_socket)\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def render(self):\n \"\"\"gym api render\"\"\"\n pass\n\n def seed(self):\n pass\n\n def step(self, action):\n \"\"\"gym api step\"\"\"\n obs = None\n reward = None\n info = None\n turn = True\n withturnkey = self.step_options < 2\n print(withturnkey)\n withinfo = self.step_options == 0 or self.step_options == 2\n\n while not self.done and \\\n ((obs is None or len(obs) == 0) or\n (withinfo and info is None) or turn):\n step_message = \"<Step\" + str(self.step_options) + \">\" + \\\n self.action_space[action] + \\\n \"</Step\" + str(self.step_options) + \" >\"\n t0 = time.time()\n comms.send_message(self.client_socket, step_message.encode())\n print(\"send action {}\".format(time.time() - t0)); t0 = time.time()\n if withturnkey:\n comms.send_message(self.client_socket, self.turn_key.encode())\n obs = comms.recv_message(self.client_socket)\n\n reply = comms.recv_message(self.client_socket)\n reward, done, sent = struct.unpack('!dbb', reply)\n print(\"recieve reward {}\".format(time.time() - t0)); t0 = time.time()\n self.done = done == 1\n if withinfo:\n info = comms.recv_message(self.client_socket).decode('utf-8')\n\n turn_key = comms.recv_message(self.client_socket).decode('utf-8') if withturnkey else \"\"\n # print(\"[\" + str(self.role) + \"] TK \" + turn_key + \" self.TK \" + str(self.turn_key))\n if turn_key != \"\":\n if sent != 0:\n turn = False\n # Done turns if: turn = self.turn_key == turn_key\n self.turn_key = turn_key\n else:\n turn = sent == 0\n\n # if (obs is None or len(obs) == 0) or turn:\n # time.sleep(0.1)\n print(\"turnkeyprocessor {}\".format(time.time() - t0)); t0 = time.time()\n obs = np.frombuffer(obs, dtype=np.uint8)\n print(\"creating obs from buffer {}\".format(time.time() - t0)); t0 = time.time()\n return obs, reward, self.done, info\n\n def close(self):\n \"\"\"gym api close\"\"\"\n try:\n # Purge last token from head node with <Close> message.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Close>\" + self._get_token() + \"</Close>\").encode())\n reply = comms.recv_message(sock)\n ok, = struct.unpack('!I', reply)\n assert ok\n sock.close()\n except Exception as e:\n self._log_error(e)\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n\n def reinit(self):\n \"\"\"Use carefully to reset the episode count to 0.\"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Init>\" + self._get_token() + \"</Init>\").encode())\n reply = comms.recv_message(sock)\n sock.close()\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def status(self, head):\n \"\"\"Get status from server.\n head - Ping the the head node if True.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if head:\n sock.connect((self.server, self.port))\n else:\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n\n comms.send_message(sock, \"<Status/>\".encode())\n status = comms.recv_message(sock).decode('utf-8')\n sock.close()\n return status\n\n def exit(self):\n \"\"\"Use carefully to cause the Minecraft service to exit (and hopefully restart).\n Likely to throw communication errors so wrap in exception handler.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Exit>\" + self._get_token() + \"</Exit>\").encode())\n reply = comms.recv_message(sock)\n sock.close()\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def resync(self):\n \"\"\"make sure we can ping the head and assigned node.\n Possibly after an env.exit()\"\"\"\n success = 0\n for head in [True, False]:\n for _ in range(30):\n try:\n self.status(head)\n success += 1\n break\n except Exception as e:\n self._log_error(e)\n time.sleep(10)\n\n if success != 2:\n raise EnvException(\"Failed to contact service\" + (\" head\" if success == 0 else \"\"))\n\n def exit_resync(self):\n \"\"\"Exit the current Minecraft and wait for new one to replace it.\"\"\"\n print(\"********** exit & resync **********\")\n try:\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n try:\n self.exit()\n except Exception as e:\n self._log_error(e)\n print(\"Pause for exit(s) ...\")\n time.sleep(60)\n except (socket.error, ConnectionError):\n pass\n self.resync()\n\n def _log_error(self, exn):\n pass # Keeping pylint happy\n\n def _find_server(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n start_time = time.time()\n port = 0\n while port == 0:\n comms.send_message(sock, (\"<Find>\" + self._get_token() + \"</Find>\").encode())\n reply = comms.recv_message(sock)\n port, = struct.unpack('!I', reply)\n if port == 0:\n if time.time() - start_time > MAX_WAIT:\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n raise MissionInitException('too long finding mission to join')\n time.sleep(1)\n sock.close()\n # print(\"Found mission integrated server port \" + str(port))\n self.integratedServerPort = port\n e = self.xml.find(self.ns + 'MinecraftServerConnection')\n if e is not None:\n e.attrib['port'] = str(self.integratedServerPort)\n\n def _init_mission(self):\n ok = 0\n while ok != 1:\n xml = etree.tostring(self.xml)\n # syncticking always ;))))))))))))))))))))))))))))))))))))))))))))))))))))\n token = (self._get_token() + \":\" + str(self.agent_count) + \":\" + str(self.synchronous).lower()).encode()\n # print(xml.decode())\n comms.send_message(self.client_socket, xml)\n comms.send_message(self.client_socket, token)\n\n reply = comms.recv_message(self.client_socket)\n ok, = struct.unpack('!I', reply)\n self.turn_key = comms.recv_message(self.client_socket).decode('utf-8')\n if ok != 1:\n time.sleep(1)\n\n def _get_token(self):\n return self.exp_uid + \":\" + str(self.role) + \":\" + str(self.resets)\n\n\ndef make():\n return Env()\n" ]
[ [ "numpy.zeros", "numpy.iinfo", "numpy.frombuffer" ] ]
ThompsonJ314/openmc
[ "173c85c2cbb1784e49edf51d2d379a0f981de4e3" ]
[ "openmc/filter.py" ]
[ "from abc import ABCMeta\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nimport hashlib\nfrom itertools import product\nfrom numbers import Real, Integral\nfrom xml.etree import ElementTree as ET\n\nimport numpy as np\nimport pandas as pd\n\nimport openmc\nimport openmc.checkvalue as cv\nfrom .cell import Cell\nfrom .material import Material\nfrom .mixin import IDManagerMixin\nfrom .surface import Surface\nfrom .universe import Universe\n\n\n_FILTER_TYPES = (\n 'universe', 'material', 'cell', 'cellborn', 'surface', 'mesh', 'energy',\n 'energyout', 'mu', 'polar', 'azimuthal', 'distribcell', 'delayedgroup',\n 'energyfunction', 'cellfrom', 'legendre', 'spatiallegendre',\n 'sphericalharmonics', 'zernike', 'zernikeradial', 'particle', 'cellinstance'\n)\n\n_CURRENT_NAMES = (\n 'x-min out', 'x-min in', 'x-max out', 'x-max in',\n 'y-min out', 'y-min in', 'y-max out', 'y-max in',\n 'z-min out', 'z-min in', 'z-max out', 'z-max in'\n)\n\n_PARTICLES = {'neutron', 'photon', 'electron', 'positron'}\n\n\nclass FilterMeta(ABCMeta):\n \"\"\"Metaclass for filters that ensures class names are appropriate.\"\"\"\n\n def __new__(cls, name, bases, namespace, **kwargs):\n # Check the class name.\n required_suffix = 'Filter'\n if not name.endswith(required_suffix):\n raise ValueError(\"All filter class names must end with 'Filter'\")\n\n # Create a 'short_name' attribute that removes the 'Filter' suffix.\n namespace['short_name'] = name[:-len(required_suffix)]\n\n # Subclass methods can sort of inherit the docstring of parent class\n # methods. If a function is defined without a docstring, most (all?)\n # Python interpreters will search through the parent classes to see if\n # there is a docstring for a function with the same name, and they will\n # use that docstring. However, Sphinx does not have that functionality.\n # This chunk of code handles this docstring inheritance manually so that\n # the autodocumentation will pick it up.\n if name != required_suffix:\n # Look for newly-defined functions that were also in Filter.\n for func_name in namespace:\n if func_name in Filter.__dict__:\n # Inherit the docstring from Filter if not defined.\n if isinstance(namespace[func_name],\n (classmethod, staticmethod)):\n new_doc = namespace[func_name].__func__.__doc__\n old_doc = Filter.__dict__[func_name].__func__.__doc__\n if new_doc is None and old_doc is not None:\n namespace[func_name].__func__.__doc__ = old_doc\n else:\n new_doc = namespace[func_name].__doc__\n old_doc = Filter.__dict__[func_name].__doc__\n if new_doc is None and old_doc is not None:\n namespace[func_name].__doc__ = old_doc\n\n # Make the class.\n return super().__new__(cls, name, bases, namespace, **kwargs)\n\n\ndef _repeat_and_tile(bins, repeat_factor, data_size):\n filter_bins = np.repeat(bins, repeat_factor)\n tile_factor = data_size // len(filter_bins)\n return np.tile(filter_bins, tile_factor)\n\n\nclass Filter(IDManagerMixin, metaclass=FilterMeta):\n \"\"\"Tally modifier that describes phase-space and other characteristics.\n\n Parameters\n ----------\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter. This takes on different meaning for different\n filters. See the docstrings for sublcasses of this filter or the online\n documentation for more details.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n next_id = 1\n used_ids = set()\n\n def __init__(self, bins, filter_id=None):\n self.bins = bins\n self.id = filter_id\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif len(self.bins) != len(other.bins):\n return False\n else:\n return np.allclose(self.bins, other.bins)\n\n def __gt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta > 0\n else:\n return False\n else:\n return max(self.bins) > max(other.bins)\n\n def __lt__(self, other):\n return not self > other\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tBins', self.bins)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tBins', self.bins)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def _recursive_subclasses(cls):\n \"\"\"Return all subclasses and their subclasses, etc.\"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass._recursive_subclasses())\n\n return all_subclasses\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n \"\"\"Construct a new Filter instance from HDF5 data.\n\n Parameters\n ----------\n group : h5py.Group\n HDF5 group to read from\n\n Keyword arguments\n -----------------\n meshes : dict\n Dictionary mapping integer IDs to openmc.MeshBase objects. Only\n used for openmc.MeshFilter objects.\n\n \"\"\"\n\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n # If the HDF5 'type' variable matches this class's short_name, then\n # there is no overriden from_hdf5 method. Pass the bins to __init__.\n if group['type'][()].decode() == cls.short_name.lower():\n out = cls(group['bins'][()], filter_id=filter_id)\n out._num_bins = group['n_bins'][()]\n return out\n\n # Search through all subclasses and find the one matching the HDF5\n # 'type'. Call that class's from_hdf5 method.\n for subclass in cls._recursive_subclasses():\n if group['type'][()].decode() == subclass.short_name.lower():\n return subclass.from_hdf5(group, **kwargs)\n\n raise ValueError(\"Unrecognized Filter class: '\"\n + group['type'][()].decode() + \"'\")\n\n @property\n def bins(self):\n return self._bins\n\n @bins.setter\n def bins(self, bins):\n self.check_bins(bins)\n self._bins = bins\n\n @property\n def num_bins(self):\n return len(self.bins)\n\n def check_bins(self, bins):\n \"\"\"Make sure given bins are valid for this filter.\n\n Raises\n ------\n TypeError\n ValueError\n\n \"\"\"\n\n pass\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'bins')\n subelement.text = ' '.join(str(b) for b in self.bins)\n\n return element\n\n def can_merge(self, other):\n \"\"\"Determine if filter can be merged with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to compare with\n\n Returns\n -------\n bool\n Whether the filter can be merged\n\n \"\"\"\n return type(self) is type(other)\n\n def merge(self, other):\n \"\"\"Merge this filter with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to merge with\n\n Returns\n -------\n merged_filter : openmc.Filter\n Filter resulting from the merge\n\n \"\"\"\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" '.format(\n type(self), type(other))\n raise ValueError(msg)\n\n # Merge unique filter bins\n merged_bins = np.concatenate((self.bins, other.bins))\n merged_bins = np.unique(merged_bins, axis=0)\n\n # Create a new filter with these bins and a new auto-generated ID\n return type(self)(merged_bins)\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if type(self) is not type(other):\n return False\n\n for b in other.bins:\n if b not in self.bins:\n return False\n\n return True\n\n def get_bin_index(self, filter_bin):\n \"\"\"Returns the index in the Filter for some bin.\n\n Parameters\n ----------\n filter_bin : int or tuple\n The bin is the integer ID for 'material', 'surface', 'cell',\n 'cellborn', and 'universe' Filters. The bin is an integer for the\n cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of\n floats for 'energy' and 'energyout' filters corresponding to the\n energy boundaries of the bin of interest. The bin is an (x,y,z)\n 3-tuple for 'mesh' filters corresponding to the mesh cell of\n interest.\n\n Returns\n -------\n filter_index : int\n The index in the Tally data array for this filter bin.\n\n \"\"\"\n\n if filter_bin not in self.bins:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n if isinstance(self.bins, np.ndarray):\n return np.where(self.bins == filter_bin)[0][0]\n else:\n return self.bins.index(filter_bin)\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Keyword arguments\n -----------------\n paths : bool\n Only used for DistribcellFilter. If True (default), expand\n distribcell indices into multi-index columns describing the path\n to that distribcell through the CSG tree. NOTE: This option assumes\n that all distribcell paths are of the same length and do not have\n the same universes and cells but different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns of strings that characterize the\n filter's bins. The number of rows in the DataFrame is the same as\n the total number of bins in the corresponding tally, with the filter\n bin appropriately tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n filter_bins = np.repeat(self.bins, stride)\n tile_factor = data_size // len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n df = pd.concat([df, pd.DataFrame(\n {self.short_name.lower(): filter_bins})])\n\n return df\n\n\nclass WithIDFilter(Filter):\n \"\"\"Abstract parent for filters of types with IDs (Cell, Material, etc.).\"\"\"\n def __init__(self, bins, filter_id=None):\n bins = np.atleast_1d(bins)\n\n # Make sure bins are either integers or appropriate objects\n cv.check_iterable_type('filter bins', bins,\n (Integral, self.expected_type))\n\n # Extract ID values\n bins = np.array([b if isinstance(b, Integral) else b.id\n for b in bins])\n super().__init__(bins, filter_id)\n\n def check_bins(self, bins):\n # Check the bin values.\n for edge in bins:\n cv.check_greater_than('filter bin', edge, 0, equality=True)\n\n\nclass UniverseFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Universe they occured in.\n\n Parameters\n ----------\n bins : openmc.Universe, int, or iterable thereof\n The Universes to tally. Either openmc.Universe objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Universe IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Universe\n\n\nclass MaterialFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Material they occured in.\n\n Parameters\n ----------\n bins : openmc.Material, Integral, or iterable thereof\n The Materials to tally. Either openmc.Material objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Material IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Material\n\n\nclass CellFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Cell they occured in.\n\n Parameters\n ----------\n bins : openmc.Cell, int, or iterable thereof\n The cells to tally. Either openmc.Cell objects or their ID numbers can\n be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellFromFilter(WithIDFilter):\n \"\"\"Bins tally on which Cell the neutron came from.\n\n Parameters\n ----------\n bins : openmc.Cell, Integral, or iterable thereof\n The Cell(s) to tally. Either openmc.Cell objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral or Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellbornFilter(WithIDFilter):\n \"\"\"Bins tally events based on which Cell the neutron was born in.\n\n Parameters\n ----------\n bins : openmc.Cell, Integral, or iterable thereof\n The birth Cells to tally. Either openmc.Cell objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellInstanceFilter(Filter):\n \"\"\"Bins tally events based on which cell instance a particle is in.\n\n This filter is similar to :class:`DistribcellFilter` but allows one to\n select particular instances to be tallied (instead of obtaining *all*\n instances by default) and allows instances from different cells to be\n specified in a single filter.\n\n .. versionadded:: 0.12\n\n Parameters\n ----------\n bins : iterable of 2-tuples or numpy.ndarray\n The cell instances to tally, given as 2-tuples. For the first value in\n the tuple, either openmc.Cell objects or their integral ID numbers can\n be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : numpy.ndarray\n 2D numpy array of cell IDs and instances\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n See Also\n --------\n DistribcellFilter\n\n \"\"\"\n def __init__(self, bins, filter_id=None):\n self.bins = bins\n self.id = filter_id\n\n @Filter.bins.setter\n def bins(self, bins):\n pairs = np.empty((len(bins), 2), dtype=int)\n for i, (cell, instance) in enumerate(bins):\n cv.check_type('cell', cell, (openmc.Cell, Integral))\n cv.check_type('instance', instance, Integral)\n pairs[i, 0] = cell if isinstance(cell, Integral) else cell.id\n pairs[i, 1] = instance\n self._bins = pairs\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with a multi-index column for the cell instance.\n The number of rows in the DataFrame is the same as the total number\n of bins in the corresponding tally, with the filter bin appropriately\n tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Repeat and tile bins as necessary to account for other filters.\n bins = np.repeat(self.bins, stride, axis=0)\n tile_factor = data_size // len(bins)\n bins = np.tile(bins, (tile_factor, 1))\n\n columns = pd.MultiIndex.from_product([[self.short_name.lower()],\n ['cell', 'instance']])\n return pd.DataFrame(bins, columns=columns)\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'bins')\n subelement.text = ' '.join(str(i) for i in self.bins.ravel())\n return element\n\n\nclass SurfaceFilter(WithIDFilter):\n \"\"\"Filters particles by surface crossing\n\n Parameters\n ----------\n bins : openmc.Surface, int, or iterable of Integral\n The surfaces to tally over. Either openmc.Surface objects or their ID\n numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n The surfaces to tally over. Either openmc.Surface objects or their ID\n numbers can be used.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Surface\n\n\nclass ParticleFilter(Filter):\n \"\"\"Bins tally events based on the Particle type.\n\n Parameters\n ----------\n bins : str, or iterable of str\n The particles to tally represented as strings ('neutron', 'photon',\n 'electron', 'positron').\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n The Particles to tally\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif len(self.bins) != len(other.bins):\n return False\n else:\n return np.all(self.bins == other.bins)\n\n __hash__ = Filter.__hash__\n\n @Filter.bins.setter\n def bins(self, bins):\n bins = np.atleast_1d(bins)\n cv.check_iterable_type('filter bins', bins, str)\n for edge in bins:\n cv.check_value('filter bin', edge, _PARTICLES)\n self._bins = bins\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n particles = [b.decode() for b in group['bins'][()]]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n return cls(particles, filter_id=filter_id)\n\n\nclass MeshFilter(Filter):\n \"\"\"Bins tally event locations onto a regular, rectangular mesh.\n\n Parameters\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n id : int\n Unique identifier for the filter\n bins : list of tuple\n A list of mesh indices for each filter bin, e.g. [(1, 1, 1), (2, 1, 1),\n ...]\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n def __init__(self, mesh, filter_id=None):\n self.mesh = mesh\n self.id = filter_id\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tMesh ID', self.mesh.id)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tMesh ID', self.mesh.id)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n if 'meshes' not in kwargs:\n raise ValueError(cls.__name__ + \" requires a 'meshes' keyword \"\n \"argument.\")\n\n mesh_id = group['bins'][()]\n mesh_obj = kwargs['meshes'][mesh_id]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n out = cls(mesh_obj, filter_id=filter_id)\n\n return out\n\n @property\n def mesh(self):\n return self._mesh\n\n @mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, openmc.MeshBase)\n self._mesh = mesh\n if isinstance(mesh, openmc.UnstructuredMesh):\n self.bins = list(range(len(mesh.volumes)))\n else:\n self.bins = list(mesh.indices)\n\n def can_merge(self, other):\n # Mesh filters cannot have more than one bin\n return False\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with three columns describing the x,y,z mesh\n cell indices corresponding to each filter bin. The number of rows\n in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append mesh ID as outermost index of multi-index\n mesh_key = 'mesh {}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n n_dim = len(self.mesh.dimension)\n if n_dim == 3:\n nx, ny, nz = self.mesh.dimension\n elif n_dim == 2:\n nx, ny = self.mesh.dimension\n nz = 1\n else:\n nx = self.mesh.dimension\n ny = nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_dict[mesh_key, 'x'] = _repeat_and_tile(\n np.arange(1, nx + 1), stride, data_size)\n\n # Generate multi-index sub-column for y-axis\n filter_dict[mesh_key, 'y'] = _repeat_and_tile(\n np.arange(1, ny + 1), nx * stride, data_size)\n\n # Generate multi-index sub-column for z-axis\n filter_dict[mesh_key, 'z'] = _repeat_and_tile(\n np.arange(1, nz + 1), nx * ny * stride, data_size)\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n df = pd.concat([df, pd.DataFrame(filter_dict)])\n\n return df\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = super().to_xml_element()\n element[0].text = str(self.mesh.id)\n return element\n\n\nclass MeshSurfaceFilter(MeshFilter):\n \"\"\"Filter events by surface crossings on a regular, rectangular mesh.\n\n Parameters\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral\n The mesh ID\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n id : int\n Unique identifier for the filter\n bins : list of tuple\n\n A list of mesh indices / surfaces for each filter bin, e.g. [(1, 1,\n 'x-min out'), (1, 1, 'x-min in'), ...]\n\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n @MeshFilter.mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, openmc.MeshBase)\n self._mesh = mesh\n\n # Take the product of mesh indices and current names\n n_dim = mesh.n_dimension\n self.bins = [mesh_tuple + (surf,) for mesh_tuple, surf in\n product(mesh.indices, _CURRENT_NAMES[:4*n_dim])]\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with three columns describing the x,y,z mesh\n cell indices corresponding to each filter bin. The number of rows\n in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append mesh ID as outermost index of multi-index\n mesh_key = 'mesh {}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n n_surfs = 4 * len(self.mesh.dimension)\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n elif len(self.mesh.dimension) == 2:\n nx, ny = self.mesh.dimension\n nz = 1\n else:\n nx = self.mesh.dimension\n ny = nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_dict[mesh_key, 'x'] = _repeat_and_tile(\n np.arange(1, nx + 1), n_surfs * stride, data_size)\n\n # Generate multi-index sub-column for y-axis\n if len(self.mesh.dimension) > 1:\n filter_dict[mesh_key, 'y'] = _repeat_and_tile(\n np.arange(1, ny + 1), n_surfs * nx * stride, data_size)\n\n # Generate multi-index sub-column for z-axis\n if len(self.mesh.dimension) > 2:\n filter_dict[mesh_key, 'z'] = _repeat_and_tile(\n np.arange(1, nz + 1), n_surfs * nx * ny * stride, data_size)\n\n # Generate multi-index sub-column for surface\n filter_dict[mesh_key, 'surf'] = _repeat_and_tile(\n _CURRENT_NAMES[:n_surfs], stride, data_size)\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n return pd.concat([df, pd.DataFrame(filter_dict)])\n\n\nclass RealFilter(Filter):\n \"\"\"Tally modifier that describes phase-space and other characteristics\n\n Parameters\n ----------\n values : iterable of float\n A list of values for which each successive pair constitutes a range of\n values for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n values for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of values indicating a\n filter bin range\n num_bins : int\n The number of filter bins\n\n \"\"\"\n def __init__(self, values, filter_id=None):\n self.values = np.asarray(values)\n self.bins = np.vstack((self.values[:-1], self.values[1:])).T\n self.id = filter_id\n\n def __gt__(self, other):\n if type(self) is type(other):\n # Compare largest/smallest bin edges in filters\n # This logic is used when merging tallies with real filters\n return self.values[0] >= other.values[-1]\n else:\n return super().__gt__(other)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tValues', self.values)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @Filter.bins.setter\n def bins(self, bins):\n Filter.bins.__set__(self, np.asarray(bins))\n\n def check_bins(self, bins):\n for v0, v1 in bins:\n # Values should be real\n cv.check_type('filter value', v0, Real)\n cv.check_type('filter value', v1, Real)\n\n # Make sure that each tuple has values that are increasing\n if v1 < v0:\n raise ValueError('Values {} and {} appear to be out of order'\n .format(v0, v1))\n\n for pair0, pair1 in zip(bins[:-1], bins[1:]):\n # Successive pairs should be ordered\n if pair1[1] < pair0[1]:\n raise ValueError('Values {} and {} appear to be out of order'\n .format(pair1[1], pair0[1]))\n\n def can_merge(self, other):\n if type(self) is not type(other):\n return False\n\n if self.bins[0, 0] == other.bins[-1][1]:\n # This low edge coincides with other's high edge\n return True\n elif self.bins[-1][1] == other.bins[0, 0]:\n # This high edge coincides with other's low edge\n return True\n else:\n return False\n\n def merge(self, other):\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" ' \\\n 'filters'.format(type(self), type(other))\n raise ValueError(msg)\n\n # Merge unique filter bins\n merged_values = np.concatenate((self.values, other.values))\n merged_values = np.unique(merged_values)\n\n # Create a new filter with these bins and a new auto-generated ID\n return type(self)(sorted(merged_values))\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if type(self) is not type(other):\n return False\n elif self.num_bins != other.num_bins:\n return False\n else:\n return np.allclose(self.values, other.values)\n\n def get_bin_index(self, filter_bin):\n i = np.where(self.bins[:, 1] == filter_bin[1])[0]\n if len(i) == 0:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n else:\n return i[0]\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with one column of the lower energy bound and one\n column of upper energy bound for each filter bin. The number of\n rows in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Extract the lower and upper energy bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:, 0], stride)\n hi_bins = np.repeat(self.bins[:, 1], stride)\n tile_factor = data_size // len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new energy columns to the DataFrame.\n if hasattr(self, 'units'):\n units = ' [{}]'.format(self.units)\n else:\n units = ''\n\n df.loc[:, self.short_name.lower() + ' low' + units] = lo_bins\n df.loc[:, self.short_name.lower() + ' high' + units] = hi_bins\n\n return df\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = super().to_xml_element()\n element[0].text = ' '.join(str(x) for x in self.values)\n return element\n\n\nclass EnergyFilter(RealFilter):\n \"\"\"Bins tally events based on incident particle energy.\n\n Parameters\n ----------\n values : Iterable of Real\n A list of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of energies in [eV]\n for a single filter bin\n num_bins : int\n The number of filter bins\n\n \"\"\"\n units = 'eV'\n\n def get_bin_index(self, filter_bin):\n # Use lower energy bound to find index for RealFilters\n deltas = np.abs(self.bins[:, 1] - filter_bin[1]) / filter_bin[1]\n min_delta = np.min(deltas)\n if min_delta < 1E-3:\n return deltas.argmin()\n else:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for v0, v1 in bins:\n cv.check_greater_than('filter value', v0, 0., equality=True)\n cv.check_greater_than('filter value', v1, 0., equality=True)\n\n\nclass EnergyoutFilter(EnergyFilter):\n \"\"\"Bins tally events based on outgoing particle energy.\n\n Parameters\n ----------\n values : Iterable of Real\n A list of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of energies in [eV]\n for a single filter bin\n num_bins : int\n The number of filter bins\n\n \"\"\"\n\n\ndef _path_to_levels(path):\n \"\"\"Convert distribcell path to list of levels\n\n Parameters\n ----------\n path : str\n Distribcell path\n\n Returns\n -------\n list\n List of levels in path\n\n \"\"\"\n # Split path into universes/cells/lattices\n path_items = path.split('->')\n\n # Pair together universe and cell information from the same level\n idx = [i for i, item in enumerate(path_items) if item.startswith('u')]\n for i in reversed(idx):\n univ_id = int(path_items.pop(i)[1:])\n cell_id = int(path_items.pop(i)[1:])\n path_items.insert(i, ('universe', univ_id, cell_id))\n\n # Reformat lattice into tuple\n idx = [i for i, item in enumerate(path_items) if isinstance(item, str)]\n for i in idx:\n item = path_items.pop(i)[1:-1]\n lat_id, lat_xyz = item.split('(')\n lat_id = int(lat_id)\n lat_xyz = tuple(int(x) for x in lat_xyz.split(','))\n path_items.insert(i, ('lattice', lat_id, lat_xyz))\n\n return path_items\n\n\nclass DistribcellFilter(Filter):\n \"\"\"Bins tally event locations on instances of repeated cells.\n\n This filter provides a separate score for each unique instance of a repeated\n cell in a geometry. Note that only one cell can be specified in this filter.\n The related :class:`CellInstanceFilter` allows one to obtain scores for\n particular cell instances as well as instances from different cells.\n\n Parameters\n ----------\n cell : openmc.Cell or Integral\n The distributed cell to tally. Either an openmc.Cell or an Integral\n cell ID number can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n An iterable with one element---the ID of the distributed Cell.\n id : int\n Unique identifier for the filter\n num_bins : int\n The number of filter bins\n paths : list of str\n The paths traversed through the CSG tree to reach each distribcell\n instance (for 'distribcell' filters only)\n\n See Also\n --------\n CellInstanceFilter\n\n \"\"\"\n\n def __init__(self, cell, filter_id=None):\n self._paths = None\n super().__init__(cell, filter_id)\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n out = cls(group['bins'][()], filter_id=filter_id)\n out._num_bins = group['n_bins'][()]\n\n return out\n\n @property\n def num_bins(self):\n # Need to handle number of bins carefully -- for distribcell tallies, we\n # need to know how many instances of the cell there are\n return self._num_bins\n\n @property\n def paths(self):\n return self._paths\n\n @Filter.bins.setter\n def bins(self, bins):\n # Format the bins as a 1D numpy array.\n bins = np.atleast_1d(bins)\n\n # Make sure there is only 1 bin.\n if not len(bins) == 1:\n msg = 'Unable to add bins \"{0}\" to a DistribcellFilter since ' \\\n 'only a single distribcell can be used per tally'.format(bins)\n raise ValueError(msg)\n\n # Check the type and extract the id, if necessary.\n cv.check_type('distribcell bin', bins[0], (Integral, openmc.Cell))\n if isinstance(bins[0], openmc.Cell):\n bins = np.atleast_1d(bins[0].id)\n\n self._bins = bins\n\n @paths.setter\n def paths(self, paths):\n cv.check_iterable_type('paths', paths, str)\n self._paths = paths\n\n def can_merge(self, other):\n # Distribcell filters cannot have more than one bin\n return False\n\n def get_bin_index(self, filter_bin):\n # Filter bins for distribcells are indices of each unique placement of\n # the Cell in the Geometry (consecutive integers starting at 0).\n return filter_bin\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Keyword arguments\n -----------------\n paths : bool\n If True (default), expand distribcell indices into multi-index\n columns describing the path to that distribcell through the CSG\n tree. NOTE: This option assumes that all distribcell paths are of\n the same length and do not have the same universes and cells but\n different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns describing distributed cells. The\n dataframe will have either:\n\n 1. a single column with the cell instance IDs (without summary info)\n 2. separate columns for the cell IDs, universe IDs, and lattice IDs\n and x,y,z cell indices corresponding to each (distribcell paths).\n\n The number of rows in the DataFrame is the same as the total number\n of bins in the corresponding tally, with the filter bin\n appropriately tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n level_df = None\n\n paths = kwargs.setdefault('paths', True)\n\n # Create Pandas Multi-index columns for each level in CSG tree\n if paths:\n\n # Distribcell paths require linked metadata from the Summary\n if self.paths is None:\n msg = 'Unable to construct distribcell paths since ' \\\n 'the Summary is not linked to the StatePoint'\n raise ValueError(msg)\n\n # Make copy of array of distribcell paths to use in\n # Pandas Multi-index column construction\n num_offsets = len(self.paths)\n paths = [_path_to_levels(p) for p in self.paths]\n\n # Loop over CSG levels in the distribcell paths\n num_levels = len(paths[0])\n for i_level in range(num_levels):\n # Use level key as first index in Pandas Multi-index column\n level_key = 'level {}'.format(i_level + 1)\n\n # Create a dictionary for this level for Pandas Multi-index\n level_dict = OrderedDict()\n\n # Use the first distribcell path to determine if level\n # is a universe/cell or lattice level\n path = paths[0]\n if path[i_level][0] == 'lattice':\n # Initialize prefix Multi-index keys\n lat_id_key = (level_key, 'lat', 'id')\n lat_x_key = (level_key, 'lat', 'x')\n lat_y_key = (level_key, 'lat', 'y')\n lat_z_key = (level_key, 'lat', 'z')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[lat_id_key] = np.empty(num_offsets)\n level_dict[lat_x_key] = np.empty(num_offsets)\n level_dict[lat_y_key] = np.empty(num_offsets)\n if len(path[i_level][2]) == 3:\n level_dict[lat_z_key] = np.empty(num_offsets)\n\n else:\n # Initialize prefix Multi-index keys\n univ_key = (level_key, 'univ', 'id')\n cell_key = (level_key, 'cell', 'id')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[univ_key] = np.empty(num_offsets)\n level_dict[cell_key] = np.empty(num_offsets)\n\n # Populate Multi-index arrays with all distribcell paths\n for i, path in enumerate(paths):\n\n level = path[i_level]\n if level[0] == 'lattice':\n # Assign entry to Lattice Multi-index column\n level_dict[lat_id_key][i] = level[1]\n level_dict[lat_x_key][i] = level[2][0]\n level_dict[lat_y_key][i] = level[2][1]\n if len(level[2]) == 3:\n level_dict[lat_z_key][i] = level[2][2]\n\n else:\n # Assign entry to Universe, Cell Multi-index columns\n level_dict[univ_key][i] = level[1]\n level_dict[cell_key][i] = level[2]\n\n # Tile the Multi-index columns\n for level_key, level_bins in level_dict.items():\n level_dict[level_key] = _repeat_and_tile(\n level_bins, stride, data_size)\n\n # Initialize a Pandas DataFrame from the level dictionary\n if level_df is None:\n level_df = pd.DataFrame(level_dict)\n else:\n level_df = pd.concat([level_df, pd.DataFrame(level_dict)],\n axis=1)\n\n # Create DataFrame column for distribcell instance IDs\n # NOTE: This is performed regardless of whether the user\n # requests Summary geometric information\n filter_bins = _repeat_and_tile(\n np.arange(self.num_bins), stride, data_size)\n df = pd.DataFrame({self.short_name.lower() : filter_bins})\n\n # Concatenate with DataFrame of distribcell instance IDs\n if level_df is not None:\n level_df = level_df.dropna(axis=1, how='all')\n level_df = level_df.astype(np.int)\n df = pd.concat([level_df, df], axis=1)\n\n return df\n\n\nclass MuFilter(RealFilter):\n \"\"\"Bins tally events based on particle scattering angle.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of scattering angles which events will binned into. Values\n represent the cosine of the scattering angle. If an iterable is given,\n the values will be used explicitly as grid points. If a single int is\n given, the range [-1, 1] will be divided up equally into that number of\n bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n scattering angle cosines for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of scattering angle\n cosines for a single filter bin\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(-1., 1., values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, -1.):\n cv.check_greater_than('filter value', x, -1., equality=True)\n if not np.isclose(x, 1.):\n cv.check_less_than('filter value', x, 1., equality=True)\n\n\nclass PolarFilter(RealFilter):\n \"\"\"Bins tally events based on the incident particle's direction.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of polar angles which events will binned into. Values represent\n an angle in radians relative to the z-axis. If an iterable is given, the\n values will be used explicitly as grid points. If a single int is given,\n the range [0, pi] will be divided up equally into that number of bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n polar angles in [rad] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of polar angles for a\n single filter bin\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n units = 'rad'\n\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(0., np.pi, values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, 0.):\n cv.check_greater_than('filter value', x, 0., equality=True)\n if not np.isclose(x, np.pi):\n cv.check_less_than('filter value', x, np.pi, equality=True)\n\n\nclass AzimuthalFilter(RealFilter):\n \"\"\"Bins tally events based on the incident particle's direction.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of azimuthal angles which events will binned into. Values\n represent an angle in radians relative to the x-axis and perpendicular\n to the z-axis. If an iterable is given, the values will be used\n explicitly as grid points. If a single int is given, the range\n [-pi, pi) will be divided up equally into that number of bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n azimuthal angles in [rad] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of azimuthal angles\n for a single filter bin\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n units = 'rad'\n\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(-np.pi, np.pi, values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, -np.pi):\n cv.check_greater_than('filter value', x, -np.pi, equality=True)\n if not np.isclose(x, np.pi):\n cv.check_less_than('filter value', x, np.pi, equality=True)\n\n\nclass DelayedGroupFilter(Filter):\n \"\"\"Bins fission events based on the produced neutron precursor groups.\n\n Parameters\n ----------\n bins : iterable of int\n The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses\n 6 precursor groups so a tally with all groups will have bins =\n [1, 2, 3, 4, 5, 6].\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : iterable of int\n The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses\n 6 precursor groups so a tally with all groups will have bins =\n [1, 2, 3, 4, 5, 6].\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def check_bins(self, bins):\n # Check the bin values.\n for g in bins:\n cv.check_greater_than('delayed group', g, 0)\n\n\nclass EnergyFunctionFilter(Filter):\n \"\"\"Multiplies tally scores by an arbitrary function of incident energy.\n\n The arbitrary function is described by a piecewise linear-linear\n interpolation of energy and y values. Values outside of the given energy\n range will be evaluated as zero.\n\n Parameters\n ----------\n energy : Iterable of Real\n A grid of energy values in [eV]\n y : iterable of Real\n A grid of interpolant values in [eV]\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n energy : Iterable of Real\n A grid of energy values in [eV]\n y : iterable of Real\n A grid of interpolant values in [eV]\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins (always 1 for this filter)\n\n \"\"\"\n\n def __init__(self, energy, y, filter_id=None):\n self.energy = energy\n self.y = y\n self.id = filter_id\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif not all(self.energy == other.energy):\n return False\n else:\n return all(self.y == other.y)\n\n def __gt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta > 0\n else:\n return False\n else:\n return False\n\n def __lt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta < 0\n else:\n return False\n else:\n return False\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tEnergy', self.energy)\n string += '{: <16}=\\t{}\\n'.format('\\tInterpolant', self.y)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tEnergy', self.energy)\n string += '{: <16}=\\t{}\\n'.format('\\tInterpolant', self.y)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n energy = group['energy'][()]\n y = group['y'][()]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n return cls(energy, y, filter_id=filter_id)\n\n @classmethod\n def from_tabulated1d(cls, tab1d):\n \"\"\"Construct a filter from a Tabulated1D object.\n\n Parameters\n ----------\n tab1d : openmc.data.Tabulated1D\n A linear-linear Tabulated1D object with only a single interpolation\n region.\n\n Returns\n -------\n EnergyFunctionFilter\n\n \"\"\"\n cv.check_type('EnergyFunctionFilter tab1d', tab1d,\n openmc.data.Tabulated1D)\n if tab1d.n_regions > 1:\n raise ValueError('Only Tabulated1Ds with a single interpolation '\n 'region are supported')\n if tab1d.interpolation[0] != 2:\n raise ValueError('Only linear-linar Tabulated1Ds are supported')\n\n return cls(tab1d.x, tab1d.y)\n\n @property\n def energy(self):\n return self._energy\n\n @property\n def y(self):\n return self._y\n\n @property\n def bins(self):\n raise AttributeError('EnergyFunctionFilters have no bins.')\n\n @property\n def num_bins(self):\n return 1\n\n @energy.setter\n def energy(self, energy):\n # Format the bins as a 1D numpy array.\n energy = np.atleast_1d(energy)\n\n # Make sure the values are Real and positive.\n cv.check_type('filter energy grid', energy, Iterable, Real)\n for E in energy:\n cv.check_greater_than('filter energy grid', E, 0, equality=True)\n\n self._energy = energy\n\n @y.setter\n def y(self, y):\n # Format the bins as a 1D numpy array.\n y = np.atleast_1d(y)\n\n # Make sure the values are Real.\n cv.check_type('filter interpolant values', y, Iterable, Real)\n\n self._y = y\n\n @bins.setter\n def bins(self, bins):\n raise RuntimeError('EnergyFunctionFilters have no bins.')\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'energy')\n subelement.text = ' '.join(str(e) for e in self.energy)\n\n subelement = ET.SubElement(element, 'y')\n subelement.text = ' '.join(str(y) for y in self.y)\n\n return element\n\n def can_merge(self, other):\n return False\n\n def is_subset(self, other):\n return self == other\n\n def get_bin_index(self, filter_bin):\n # This filter only has one bin. Always return 0.\n return 0\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with a column that is filled with a hash of this\n filter. EnergyFunctionFilters have only 1 bin so the purpose of this\n DataFrame column is to differentiate the filter from other\n EnergyFunctionFilters. The number of rows in the DataFrame is the\n same as the total number of bins in the corresponding tally.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n df = pd.DataFrame()\n\n # There is no clean way of sticking all the energy, y data into a\n # DataFrame so instead we'll just make a column with the filter name\n # and fill it with a hash of the __repr__. We want a hash that is\n # reproducible after restarting the interpreter so we'll use hashlib.md5\n # rather than the intrinsic hash().\n hash_fun = hashlib.md5()\n hash_fun.update(repr(self).encode('utf-8'))\n out = hash_fun.hexdigest()\n\n # The full 16 bytes make for a really wide column. Just 7 bytes (14\n # hex characters) of the digest are probably sufficient.\n out = out[:14]\n\n filter_bins = _repeat_and_tile(out, stride, data_size)\n df = pd.concat([df, pd.DataFrame(\n {self.short_name.lower(): filter_bins})])\n\n return df\n" ]
[ [ "numpy.isclose", "numpy.asarray", "numpy.vstack", "numpy.allclose", "numpy.abs", "numpy.where", "numpy.linspace", "numpy.unique", "numpy.tile", "numpy.repeat", "numpy.arange", "numpy.all", "numpy.min", "pandas.concat", "numpy.empty", "pandas.DataFrame", "numpy.atleast_1d", "numpy.ravel", "numpy.concatenate" ] ]
xiaxin2000/OpenCDA-Documents
[ "1ad4b368d4287dae8b282bac1665816a496d57c6" ]
[ "opencda/core/plan/spline.py" ]
[ "\"\"\"\nCubic spline planner\n\nAuthor: Atsushi Sakai(@Atsushi_twi)\n\n\"\"\"\nimport math\nimport numpy as np\nimport bisect\n\n\nclass Spline:\n \"\"\"\n Cubic Spline class for calculte curvature (Author: Atsushi Sakai(@Atsushi_twi)).\n\n Parameters\n -x : float\n The x coordinate.\n -y : float\n The y coordinate.\n \n Attributes\n -b : float\n The spline coefficient b.\n -c : float\n The spline coefficient c.\n -d : float\n The spline coefficient d.\n -w : float\n The spline coefficient w.\n -nx : float\n The dimension of x.\n -h : float \n The n-th discrete difference along the x-axis.\n \"\"\"\n\n def __init__(self, x, y):\n self.b, self.c, self.d, self.w = [], [], [], []\n\n self.x = x\n self.y = y\n\n self.nx = len(x) # dimension of x\n h = np.diff(x)\n\n # calc coefficient c\n self.a = [iy for iy in y]\n\n # calc coefficient c\n A = self.__calc_A(h)\n B = self.__calc_B(h)\n self.c = np.linalg.solve(A, B)\n # print(self.c1)\n\n # calc spline coefficient b and d\n for i in range(self.nx - 1):\n self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))\n tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \\\n (self.c[i + 1] + 2.0 * self.c[i]) / 3.0\n self.b.append(tb)\n\n def calc(self, t):\n \"\"\"\n Calc position\n\n Args:\n - t (float): if t is outside of the input x, return None\n Returns:\n - result (float): The calcualtion result of position. If t is outside the range of x, return None.\n\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return result\n\n def calcd(self, t):\n \"\"\"\n Calc first derivative. If t is outside of the input x, return None.\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return result\n\n def calcdd(self, t):\n \"\"\"\n Calc second derivative, If t is outside of the input x, return None.\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx\n return result\n\n def __search_index(self, x):\n \"\"\"\n Search data segment index.\n \"\"\"\n return bisect.bisect(self.x, x) - 1\n\n def __calc_A(self, h):\n \"\"\"\n Calculate matrix A for spline coefficient a.\n \"\"\"\n A = np.zeros((self.nx, self.nx))\n A[0, 0] = 1.0\n for i in range(self.nx - 1):\n if i != (self.nx - 2):\n A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])\n A[i + 1, i] = h[i]\n A[i, i + 1] = h[i]\n\n A[0, 1] = 0.0\n A[self.nx - 1, self.nx - 2] = 0.0\n A[self.nx - 1, self.nx - 1] = 1.0\n # print(A)\n return A\n\n def __calc_B(self, h):\n \"\"\"\n Calculate matrix B for spline coefficient b.\n \"\"\"\n B = np.zeros(self.nx)\n for i in range(self.nx - 2):\n B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \\\n h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]\n return B\n\n\nclass Spline2D:\n \"\"\"\n 2D Cubic Spline class for calculte curvature (Author: Atsushi Sakai(@Atsushi_twi)).\n\n Parameters\n -x : float\n The x coordinate.\n -y : float\n The y coordinate.\n \n Attributes\n -b : float\n The spline coefficient b.\n -c : float\n The spline coefficient c.\n -d : float\n The spline coefficient d.\n -w : float\n The spline coefficient w.\n -nx : float\n The dimension of x.\n -h : float \n The n-th discrete difference along the x-axis.\n\n \"\"\"\n\n def __init__(self, x, y):\n self.s = self.__calc_s(x, y)\n self.sx = Spline(self.s, x)\n self.sy = Spline(self.s, y)\n\n def __calc_s(self, x, y):\n dx = np.diff(x)\n dy = np.diff(y)\n self.ds = np.hypot(dx, dy)\n s = [0]\n s.extend(np.cumsum(self.ds))\n return s\n\n def calc_position(self, s):\n \"\"\"\n Calculate position.\n \"\"\"\n x = self.sx.calc(s)\n y = self.sy.calc(s)\n\n return x, y\n\n def calc_curvature(self, s):\n \"\"\"\n Calculate curvature.\n \"\"\"\n dx = self.sx.calcd(s)\n ddx = self.sx.calcdd(s)\n dy = self.sy.calcd(s)\n ddy = self.sy.calcdd(s)\n k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))\n return k\n\n def calc_yaw(self, s):\n \"\"\"\n Calculate yaw.\n \"\"\"\n dx = self.sx.calcd(s)\n dy = self.sy.calcd(s)\n yaw = math.atan2(dy, dx)\n return yaw\n\n\ndef calc_spline_course(x, y, ds=0.1):\n \"\"\"\n Caculate 2D splice course.\n\n Args: \n -x (float): The x coordinate of the input point. \n -y (float): The y coordinate of the input point.\n -ds (flost): The s step value. Default value equals to 0.1.\n\n Returns:\n -rx (list): List of spline course points' x coordinates.\n -ry (list): List of spline course points' y coordinates.\n -ryaw (list): List of spline course points' yaw angles.\n -rk (list): List of spline course points' curvatures.\n -s (list): List of spline course points' s values.\n \"\"\"\n sp = Spline2D(x, y)\n s = list(np.arange(0, sp.s[-1], ds))\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n return rx, ry, ryaw, rk, s\n\n\ndef main(): \n \"\"\"\n Main function to calculate spline and visulize the results.\n \"\"\"\n print(\"Spline 2D test\")\n import matplotlib.pyplot as plt\n x = [-135, -131, -131, -131]\n y = [6.43, 10.83, 100.38, 131]\n ds = 0.1 # [m] distance of each intepolated points\n\n sp = Spline2D(x, y)\n s = np.arange(0, sp.s[-1], ds)\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n plt.subplots(1)\n plt.plot(x, y, \"xb\", label=\"input\")\n plt.plot(rx, ry, \"-r\", label=\"spline\")\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel(\"x[m]\")\n plt.ylabel(\"y[m]\")\n plt.legend()\n\n plt.subplots(1)\n plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], \"-r\", label=\"yaw\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"yaw angle[deg]\")\n\n plt.subplots(1)\n plt.plot(s, rk, \"-r\", label=\"curvature\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"curvature [1/m]\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linalg.solve", "numpy.cumsum", "numpy.zeros", "numpy.diff", "numpy.hypot", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "numpy.rad2deg", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
Taylor-Liu/rrt-algorithms
[ "54be136b71d63f8e3ff37afadf267da49080100b" ]
[ "examples/rrt_star/rrt_star_3d.py" ]
[ "# This file is subject to the terms and conditions defined in\n# file 'LICENSE', which is part of this source code package.\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) +\n \"/../../\")\n\nimport numpy as np\n\nfrom src.rrt.rrt_star import RRTStar\nfrom src.search_space.search_space import SearchSpace\nfrom src.utilities.plotting import Plot\n\nX_dimensions = np.array([(0, 100), (0, 100), (0, 100)]) # dimensions of Search Space\n# obstacles\nObstacles = np.array(\n [(20, 20, 20, 40, 40, 40), (20, 20, 60, 40, 40, 80), (20, 60, 20, 40, 80, 40), (60, 60, 20, 80, 80, 40),\n (60, 20, 20, 80, 40, 40), (60, 20, 60, 80, 40, 80), (20, 60, 60, 40, 80, 80), (60, 60, 60, 80, 80, 80)])\nx_init = (0, 0, 0) # starting location\nx_goal = (100, 100, 100) # goal location\n\nQ = np.array([(8, 4)]) # length of tree edges\nr = 1 # length of smallest edge to check for intersection with obstacles\nmax_samples = 1024 # max number of samples to take before timing out\nrewire_count = 32 # optional, number of nearby branches to rewire\nprc = 0.1 # probability of checking for a connection to goal\n\n# create Search Space\nX = SearchSpace(X_dimensions, Obstacles)\n\n# create rrt_search\nrrt = RRTStar(X, Q, x_init, x_goal, max_samples, r, prc, rewire_count)\npath = rrt.rrt_star()\n\n# plot\nplot = Plot(\"rrt_star_3d\")\nplot.plot_tree(X, rrt.trees)\nif path is not None:\n plot.plot_path(X, path)\nplot.plot_obstacles(X, Obstacles)\nplot.plot_start(X, x_init)\nplot.plot_goal(X, x_goal)\nplot.draw(auto_open=True)\n" ]
[ [ "numpy.array" ] ]
vorticityxyz/Gaia-api
[ "04e2a9ee2448830df72156aecf432eda0c6eb504" ]
[ "gaia.py" ]
[ "# Description:\n#\n# WARNING!!! This file is a critical component of Vorticity Gaia API for seismic imaging\n# PLEASE DO NOT MODIFY\n#\n# (C) Vorticity Inc. Mountain View, CA 2021\n# Licence: MIT\n\nimport numpy as np\nimport grpc\nimport time\nimport os\nimport sys\nimport gaia_pb2\nimport gaia_pb2_grpc\nimport dispatch_pb2\nimport dispatch_pb2_grpc\nimport validate\n\nimport tokens\nimport codes\n\nCHUNK_SIZE = 1024 * 1024 # 1MB\n\nIN_FILE = \"_gaia_input.npz\"\nEL_IN_FILE = \"_egaia_input.npz\"\nRTM_FILE = \"_gaia_rtm.npz\"\nEL_RTM_FILE = \"_gaia_ertm.npz\"\nBF_FILE = \"_gaia_block.npz\"\nOUT_FILE = \"_shot_record.npy\"\nEL_OUT_FILE = \"_eshot_record.npz\"\nUPDATE_FILE = \"_rtm_update.npy\"\nEL_UPDATE_FILE = \"_ertm_update.npz\"\nSANITY_FILE = \"_parameters.npy\"\nEL_SANITY_FILE = \"_eparameters.npy\"\nBF_SANILTY_FILE = \"_bfparameters.npy\"\nRBF_SETUP_FILE = '_rbf_setup.npz'\n\nDISPATCH_SERVER = 'vorticity.cloud:443'\n\ndef get_file_chunks(filename):\n with open(filename, 'rb') as f:\n size = 0\n while True:\n piece = f.read(CHUNK_SIZE);\n size += sys.getsizeof(piece)\n if len(piece) == 0:\n print()\n return\n yield gaia_pb2.Chunk(buffer=piece)\n sys.stdout.write('\\r')\n sys.stdout.write('Uploading %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\ndef get_file_chunks_nv(filename):\n with open(filename, 'rb') as f:\n size = 0\n while True:\n piece = f.read(CHUNK_SIZE);\n size += sys.getsizeof(piece)\n if len(piece) == 0:\n return\n yield gaia_pb2.Chunk(buffer=piece)\n\ndef save_chunks_to_file(chunks, filename):\n size = 0\n with open(filename, 'wb') as f:\n for chunk in chunks:\n f.write(chunk.buffer)\n size += sys.getsizeof(chunk.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write('Downloading %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n \n print()\n\ndef show_progress(responses):\n final_progress_value = 0.0\n for response in responses:\n sys.stdout.write('\\r')\n sys.stdout.write('%.2f%% complete' % (response.progress * 100,))\n sys.stdout.flush()\n final_progress_value = response.progress\n print()\n return final_progress_value\n\n\nclass DispatchClient:\n def __init__(self, address):\n with open('server.crt', 'rb') as f:\n creds = grpc.ssl_channel_credentials(f.read())\n channel = grpc.secure_channel(address, creds, \n options = (('grpc.ssl_target_name_override', 'localhost'), \n ('grpc.default_authority', 'localhost')),\n compression=grpc.Compression.Gzip)\n self.stub = dispatch_pb2_grpc.DispatchServerStub(channel)\n\n def DispatchServerAddressRequest(self, token):\n request = dispatch_pb2.AddressRequest()\n request.token = token\n response = self.stub.DispatchServerAddressRequest(request)\n\n return response\n\nclass GaiaClient:\n def __init__(self, token):\n dispatch_client = DispatchClient(DISPATCH_SERVER)\n response = dispatch_client.DispatchServerAddressRequest(token)\n if (response.status == codes.SUCCESS):\n address = response.address\n with open('server.crt', 'rb') as f:\n creds = grpc.ssl_channel_credentials(f.read())\n channel = grpc.secure_channel(address, creds,\n options = (('grpc.ssl_target_name_override', 'localhost'), \n ('grpc.default_authority', 'localhost')),\n compression=grpc.Compression.Gzip)\n self.stub = gaia_pb2_grpc.GaiaServerStub(channel)\n else:\n raise Exception(\"We could not verify this account. Please contact Vorticity.\")\n\n def StatusCheck(self, token):\n request = gaia_pb2.StatusRequest()\n request.token = token\n response = self.stub.StatusCheck(request)\n return response.status\n\n def SanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.SanityCheck(chunks_generator)\n return response.status\n\n def rtmSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.rtmSanityCheck(chunks_generator)\n return response.status\n\n def eForwardSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.eForwardSanityCheck(chunks_generator)\n return response.status\n \n def eRTMSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.eRTMSanityCheck(chunks_generator)\n return response.status\n\n def Upload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.Upload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def rtmUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.rtmUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def eForwardUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.eForwardUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def eRTMUpload(self, file_name):\n print(\"Uploading...\")\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.eRTMUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def Execute(self, sent_token):\n print(\"Forward processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.Execute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def rtmExecute(self, sent_token):\n print(\"RTM processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.rtmExecute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def eForwardExecute(self, sent_token):\n print(\"Elastic forward processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.eForwardExecute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def eRTMExecute(self, sent_token):\n print(\"Elastic RTM processing.\") \n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.eRTMExecute(request)\n final_progress_value = show_progress(responses)\n return final_progress_value\n\n def Download(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.Download(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def rtmDownload(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.rtmDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def eForwardDownload(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.eForwardDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def eRTMDownload(self, sent_token, out_file_name):\n print(\"downloading...\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.eRTMDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def CleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.CleanUp(request)\n return response.status\n\n def rtmCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.rtmCleanUp(request)\n return response.status\n\n def eForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.eForwardCleanUp(request)\n return response.status\n\n def eRTMCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.eRTMCleanUp(request)\n return response.status\n\n def BatchForwardSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.BatchForwardSanityCheck(chunks_generator)\n return response.status\n\n def BatchForwardStatus(self, token, filename):\n request = gaia_pb2.BatchStatusRequest()\n request.token = token\n request.filename = filename\n response = self.stub.BatchForwardStatus(request)\n return response\n\n def BatchForwardUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.BatchForwardUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def BatchForwardInitExec(self, sent_token):\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n response = self.stub.BatchForwardInitExec(request)\n return response.status\n\n def BatchForwardDownload(self, sent_token, sent_filename, out_filename):\n #print(\"Downloading\", sent_filename)\n start = time.time()\n request = gaia_pb2.BatchDownloadRequest()\n request.token = sent_token\n request.filename = sent_filename\n responses = self.stub.BatchForwardDownload(request)\n size = 0\n with open(out_filename, 'wb') as f:\n for response in responses:\n f.write(response.buffer)\n size += sys.getsizeof(response.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write(out_filename + ' - %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\n #print()\n end = time.time()\n download_time = end - start\n print(\" Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_filename)/download_time/1024/1024), 'MB/s' )\n\n def BatchForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.BatchForwardCleanUp(request)\n return response.status\n\n def rUploadSanityCheck(self, sent_token, filename, filesize):\n request = gaia_pb2.RemoteUploadSanityRequest()\n request.token = sent_token\n request.filename = filename\n request.filesize = filesize\n response = self.stub.rUploadSanityCheck(request)\n return response.status\n\n def rUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.rUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def rForwardUpload(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.rForwardUpload(chunks_generator)\n return response\n\n def rForwardInitExec(self, sent_token):\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n response = self.stub.rForwardInitExec(request)\n return response.status\n\n def rForwardStatus(self, token, filename):\n request = gaia_pb2.BatchStatusRequest()\n request.token = token\n request.filename = filename\n response = self.stub.rForwardStatus(request)\n return response\n\n def rForwardDownload(self, sent_token, sent_filename, out_filename):\n start = time.time()\n request = gaia_pb2.BatchDownloadRequest()\n request.token = sent_token\n request.filename = sent_filename\n responses = self.stub.rForwardDownload(request)\n size = 0\n with open(out_filename, 'wb') as f:\n for response in responses:\n f.write(response.buffer)\n size += sys.getsizeof(response.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write(out_filename + ' - %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\n #print()\n end = time.time()\n download_time = end - start\n print(\" Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_filename)/download_time/1024/1024), 'MB/s' )\n\n def rForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.rForwardCleanUp(request)\n return response.status\n\n def rDelete(self, sent_token, filename):\n request = gaia_pb2.DeleteRequest()\n request.token = sent_token\n request.filename = filename\n response = self.stub.rDelete(request)\n return response.status\n\n def Reset(self, sent_token):\n request = gaia_pb2.ResetRequest()\n request.token = sent_token\n response = self.stub.Reset(request)\n return response.status\n\ndef reset_server():\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n status = client.Reset(token)\n if (status == codes.SUCCESS):\n print(\"Server reset successful!\")\n else:\n print(\"Error resetting server. Contact Vorticity.\")\n\n# Forward model operator\ndef f28(model, shot, shotxyz, recxxyyz, deltas):\n\n # temporal accuracy 2, spacial accuracy 8, no abc\n act = 2 \n acs = 8\n abc = 0\n cnum = 1 # num accelerator cards\n\n # no pml\n pmlw = 0\n pmla = 0\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc, pmlw, pmla, cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Forward model operator with pml\ndef f28pml(model, shot, shotxyz, recxxyyz, deltas, pml):\n\n # temporal accuracy 2, spacial accuracy 8, pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 1 # num accelerator cards\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1], # no pml\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Multi accelerator card forward model operator\ndef mf28pml(model, shot, shotxyz, recxxyyz, deltas, pml):\n\n # temporal accuracy 2, spacial accuracy 8, pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 2 # num accelerator cards\n\n # Validate that user input is usable\n validate.multicard_model(model, cnum)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1], \n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Acoustic RTM operator\ndef rtm28pml(model, shot, traces, shotxyz, recxxyyz, deltas, pml):\n \n # temporal accuracy 2, spacial accuracy 8, with pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 1 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.traces(traces, shot, model)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1],\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(RTM_FILE, model=np.square(model), shot=shot, traces=traces, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.rtmSanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.rtmUpload(RTM_FILE)\n if (file_length != os.path.getsize(RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.rtmExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.rtmDownload(token, UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.rtmCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n update = np.load(UPDATE_FILE)\n os.remove(RTM_FILE)\n os.remove(UPDATE_FILE)\n os.remove(SANITY_FILE)\n\n return update\n\n# Acoustic multi-card RTM operator\ndef mrtm28pml(model, shot, traces, shotxyz, recxxyyz, deltas, pml):\n \n # temporal accuracy 2, spacial accuracy 8, with pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 4 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.multicard_model(model, cnum)\n validate.shot(shot)\n validate.traces(traces, shot, model)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1],\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(RTM_FILE, model=np.square(model), shot=shot, traces=traces, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.rtmSanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.rtmUpload(RTM_FILE)\n if (file_length != os.path.getsize(RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.rtmExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.rtmDownload(token, UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.rtmCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n update = np.load(UPDATE_FILE)\n os.remove(RTM_FILE)\n os.remove(UPDATE_FILE)\n os.remove(SANITY_FILE)\n\n return update\n\n\n\n# Elastic forward model operator\ndef ef18abc(vp, vs, rho, shot, shotxyz, recxxyyz, deltas, abc):\n\n # temporal accuracy 2, spacial accuracy 8, with sponge\n temportal_ac = 1\n spacial_ac = 8\n abc_type = 2\n\n # Validate that user input is usable\n validate.emodel(vp, vs, rho)\n validate.shot(shot)\n validate.shotxyz(vp, shotxyz)\n validate.recxxyyz(vp, recxxyyz)\n validate.deltas(deltas)\n validate.abc(vp, abc)\n\n sanity_data = np.array([vp.shape[0], vp.shape[1], vp.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n temportal_ac, abc_type], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n temportal_ac, spacial_ac, abc_type,\n abc[0], abc[1],\n ], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(EL_IN_FILE, vp=np.square(vp), vs=np.square(vs), rho=rho, shot=shot, config_int=config_int, config_float=config_float)\n np.save(EL_SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.eForwardSanityCheck(EL_SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources with the current Vorticity instance.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.eForwardUpload(EL_IN_FILE)\n if (file_length != os.path.getsize(EL_IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.eForwardExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download vx, vy and vz records\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.eForwardDownload(token, EL_OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.eForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n \n with np.load(EL_OUT_FILE) as results:\n vx_traces = results['vx']\n vy_traces = results['vy']\n vz_traces = results['vz']\n\n os.remove(EL_IN_FILE)\n os.remove(EL_OUT_FILE)\n os.remove(EL_SANITY_FILE)\n\n return vx_traces, vy_traces, vz_traces\n\n# Elastic RTM operator\ndef ertm18abc(vp, vs, rho, shot, vx, vy, vz, shotxyz, recxxyyz, deltas, abc):\n # temporal accuracy 2, spacial accuracy 8, with sponge\n temportal_ac = 1\n spacial_ac = 8\n abc_type = 2\n\n # Validate that user input is usable\n validate.emodel(vp, vs, rho)\n validate.shot(shot)\n validate.traces(vx, shot, vp)\n validate.traces(vy, shot, vp)\n validate.traces(vz, shot, vp)\n validate.shotxyz(vp, shotxyz)\n validate.recxxyyz(vp, recxxyyz)\n validate.deltas(deltas)\n validate.abc(vp, abc)\n\n sanity_data = np.array([vp.shape[0], vp.shape[1], vp.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n temportal_ac, abc_type], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n temportal_ac, spacial_ac, abc_type,\n abc[0], abc[1],\n ], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n np.savez(EL_RTM_FILE, vp=np.square(vp), vs=np.square(vs), rho=rho, shot=shot, vx=vx, vy=vy, vz=vz, config_int=config_int, config_float=config_float)\n np.save(EL_SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.eRTMSanityCheck(EL_SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.eRTMUpload(EL_RTM_FILE)\n if (file_length != os.path.getsize(EL_RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.eRTMExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.eRTMDownload(token, EL_UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\")\n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.eRTMCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # Load update data\n with np.load(EL_UPDATE_FILE) as data:\n dvp = data['dvp']\n dvs = data['dvs']\n\n # remove all temp files\n os.remove(EL_RTM_FILE)\n os.remove(EL_UPDATE_FILE)\n os.remove(EL_SANITY_FILE)\n\n return dvp, dvs\n\n# Batch forward model operator\ndef batchf28pml(block, shotbox, sweep, shot, shotxyz, recxxyyz, deltas, pml, destination):\n # simulation parameters\n act = 2\n acs = 8 # spacial accuracy\n # absorbing boundary conditions\n # 0 - none, 1 - pml\n abc = 1\n cnum = 2 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.block(block)\n validate.shotbox(block, shotbox)\n validate.sweep(block, shotbox, sweep)\n validate.shot(shot)\n\n shotbox_nx = shotbox[0]\n shotbox_ny = shotbox[1]\n shotbox_nz = shotbox[2]\n\n ghost_model = np.empty((shotbox_nx, shotbox_ny, shotbox_nz))\n\n validate.shotxyz(ghost_model, shotxyz)\n validate.recxxyyz(ghost_model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(ghost_model, pml)\n\n nt = shot.shape[0]\n xt1 = recxxyyz[0]\n xt2 = recxxyyz[1]\n yt1 = recxxyyz[2]\n yt2 = recxxyyz[3]\n zt = recxxyyz[4]\n\n x_start = sweep[0]\n x_end = sweep[1]\n x_step = sweep[2]\n y_start = sweep[3]\n y_end = sweep[4]\n y_step = sweep[5]\n\n sim = np.array([act, acs, abc, cnum], dtype=np.int32)\n sanity_data = np.array([shotbox_nx, shotbox_ny, shotbox_nz, nt, xt1, xt2, yt1, yt2, act, abc, cnum])\n\n print(\"Starting gaia process.\")\n np.save(BF_SANILTY_FILE, sanity_data)\n np.savez(BF_FILE, model=block, shotbox=shotbox, sweep=sweep, shot=shot, shotxyz=shotxyz, recxxyyz=recxxyyz, deltas=deltas, sim=sim, pml=pml)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.BatchForwardSanityCheck(BF_SANILTY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.BatchForwardUpload(BF_FILE)\n if (file_length != os.path.getsize(BF_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n \n # Now instigate execution\n status = client.BatchForwardInitExec(token)\n if (status == codes.ERROR):\n raise Exception(\"Something went wrong when instigating batch execution. Try again after resetting the server or if problem persists, contact Vorticity.\")\n\n y_offset = y_start\n x_offset = x_start\n\n while (y_offset <= y_end):\n while (x_offset <= x_end):\n filename = \"shot-\" + str(y_offset) + \"-\" + str(x_offset) + \".npy\"\n drop_point = destination + filename\n\n while(True):\n response = client.BatchForwardStatus(token, filename)\n sys.stdout.write('\\r')\n sys.stdout.write('Processing shot %d of %d | %.2f%%' % (response.shot, response.total, response.progress * 100,))\n sys.stdout.flush()\n if (response.progress == 1.0):\n break\n #time.sleep(0.02)\n \n print()\n while(True):\n response = client.BatchForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n #time.sleep(0.02)\n\n client.BatchForwardDownload(token, filename, drop_point)\n \n if (x_step == 0):\n break\n x_offset += x_step\n \n if (y_step == 0):\n break\n x_offset = x_start\n y_offset += y_step\n\n status = client.BatchForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # remove all temp files\n os.remove(BF_FILE)\n os.remove(BF_SANILTY_FILE)\n\n# remote upload operator\ndef remoteUpload(local_filename, remote_filename):\n\n with open(local_filename, 'rb') as fobj:\n version = np.lib.format.read_magic(fobj)\n if version[0] == 1:\n shape, fortran_order, dtype = np.lib.format.read_array_header_1_0(fobj)\n else:\n shape, fortran_order, dtype = np.lib.format.read_array_header_2_0(fobj)\n\n # Validate that user input is usable\n validate.remote_model(shape, dtype)\n\n print(\"Starting gaia process.\")\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n file_size = os.path.getsize(local_filename)\n sanity_status = client.rUploadSanityCheck(token, remote_filename, file_size)\n\n if (sanity_status == codes.ERROR):\n raise Exception(\"Unable to upload. Not enough free space on remote server.\")\n\n response = client.rUpload(local_filename)\n\n if (response != file_size):\n raise Exception(\"Something went wrong with data upload to server. Try a gaia reset or if problem persists, contact Vorticity.\")\n\n print(\"Process complete!\")\n\n# remote batch forward operator\ndef rbf28pml(modelfile, shotbox, sweep, shot, shotxyz, recxxyyz, deltas, pml, destination):\n # simulation parameters\n act = 2\n acs = 8 # spacial accuracy\n # absorbing boundary conditions\n # 0 - none, 1 - pml\n abc = 1\n cnum = 2 # number of accelerator cards to use\n\n validate.shot(shot)\n snx = shotbox[0]\n sny = shotbox[1]\n snz = shotbox[2]\n\n ghost_model = np.empty((snx, sny, snz))\n\n validate.shotxyz(ghost_model, shotxyz)\n validate.recxxyyz(ghost_model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(ghost_model, pml)\n\n xs = shotxyz[0]\n ys = shotxyz[1]\n zs = shotxyz[2]\n\n nt = shot.shape[0]\n xt1 = recxxyyz[0]\n xt2 = recxxyyz[1]\n yt1 = recxxyyz[2]\n yt2 = recxxyyz[3]\n zt = recxxyyz[4]\n\n xsrt = sweep[0]\n xend = sweep[1]\n xstp = sweep[2]\n ysrt = sweep[3]\n yend = sweep[4]\n ystp = sweep[5]\n\n sim = np.array([act, acs, abc, cnum], dtype=np.int32)\n\n print(\"Starting gaia process.\")\n np.savez(RBF_SETUP_FILE, \n modelfile=modelfile, shotbox=shotbox, sweep=sweep, shot=shot, shotxyz=shotxyz, recxxyyz=recxxyyz, deltas=deltas, sim=sim, pml=pml)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n response = client.rForwardUpload(RBF_SETUP_FILE)\n if (response.status != codes.SUCCESS):\n raise Exception(\"Simulation was rejected by the server. Possible incorrect setup.\")\n\n if (response.length != os.path.getsize(RBF_SETUP_FILE)): \n raise Exception(\"Error uploading simulation. Reset server and try again. If problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.rForwardInitExec(token)\n if (status == codes.ERROR):\n raise Exception(\"Something went wrong when instigating execution. Try again after resetting the server or if problem persists, contact Vorticity.\")\n\n y_offset = ysrt\n x_offset = xsrt\n\n while (y_offset <= yend):\n while (x_offset <= xend):\n filename = \"shot-\" + str(x_offset + xs) + \"-\" + str(y_offset + ys) + \".npy\"\n drop_point = destination + filename\n\n while(True):\n response = client.rForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n sys.stdout.write('\\r')\n sys.stdout.write('Processing shot %d of %d | %.2f%%' % (response.shot, response.total, response.progress * 100,))\n sys.stdout.flush()\n if (response.progress == 1.0):\n print()\n break\n\n while(True):\n response = client.rForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n #time.sleep(0.02)\n\n client.rForwardDownload(token, filename, drop_point)\n\n if (xstp == 0):\n break\n x_offset += xstp\n \n if (ystp == 0):\n break\n x_offset = xsrt\n y_offset += ystp\n\n status = client.rForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # remove all temp files\n os.remove(RBF_SETUP_FILE)\n\n# remote delte operator\ndef remoteDelete(remote_filename):\n print(\"Starting gaia process.\")\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n print(\"Deleting remote earth model.\")\n status = client.rDelete(token, remote_filename)\n\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")" ]
[ [ "numpy.save", "numpy.load", "numpy.empty", "numpy.lib.format.read_array_header_2_0", "numpy.lib.format.read_array_header_1_0", "numpy.savez", "numpy.lib.format.read_magic", "numpy.array", "numpy.square" ] ]
klDen/flink
[ "a2c737891afde0c63c1a453b1ee164b80b6a702c", "a2c737891afde0c63c1a453b1ee164b80b6a702c" ]
[ "flink-python/pyflink/fn_execution/operations.py", "flink-python/pyflink/table/tests/test_pandas_conversion.py" ]
[ "################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport abc\nimport time\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import List, Tuple, Any, Dict\n\nfrom apache_beam.coders import PickleCoder\n\nfrom pyflink.datastream.state import ValueStateDescriptor, ValueState, ListStateDescriptor, \\\n ListState, MapStateDescriptor, MapState, ReducingStateDescriptor, ReducingState\nfrom pyflink.datastream import TimeDomain\nfrom pyflink.datastream.functions import RuntimeContext, TimerService, ProcessFunction, \\\n KeyedProcessFunction\nfrom pyflink.fn_execution import flink_fn_execution_pb2, operation_utils\nfrom pyflink.fn_execution.aggregate import extract_data_view_specs\nfrom pyflink.fn_execution.beam.beam_coders import DataViewFilterCoder\nfrom pyflink.fn_execution.operation_utils import extract_user_defined_aggregate_function\nfrom pyflink.fn_execution.state_impl import RemoteKeyedStateBackend\n\ntry:\n from pyflink.fn_execution.aggregate_fast import RowKeySelector, SimpleAggsHandleFunction, \\\n GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction, \\\n GroupTableAggFunction\nexcept ImportError:\n from pyflink.fn_execution.aggregate_slow import RowKeySelector, SimpleAggsHandleFunction, \\\n GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction,\\\n GroupTableAggFunction\n\nfrom pyflink.metrics.metricbase import GenericMetricGroup\nfrom pyflink.table import FunctionContext, Row\n\n\n# table operations\nSCALAR_FUNCTION_URN = \"flink:transform:scalar_function:v1\"\nTABLE_FUNCTION_URN = \"flink:transform:table_function:v1\"\nSTREAM_GROUP_AGGREGATE_URN = \"flink:transform:stream_group_aggregate:v1\"\nSTREAM_GROUP_TABLE_AGGREGATE_URN = \"flink:transform:stream_group_table_aggregate:v1\"\nPANDAS_AGGREGATE_FUNCTION_URN = \"flink:transform:aggregate_function:arrow:v1\"\nPANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN = \\\n \"flink:transform:batch_over_window_aggregate_function:arrow:v1\"\n\n# datastream operations\nDATA_STREAM_STATELESS_FUNCTION_URN = \"flink:transform:datastream_stateless_function:v1\"\nPROCESS_FUNCTION_URN = \"flink:transform:process_function:v1\"\nKEYED_PROCESS_FUNCTION_URN = \"flink:transform:keyed_process_function:v1\"\n\n\nclass Operation(abc.ABC):\n def __init__(self, spec):\n super(Operation, self).__init__()\n self.spec = spec\n self.func, self.user_defined_funcs = self.generate_func(self.spec.serialized_fn)\n if self.spec.serialized_fn.metric_enabled:\n self.base_metric_group = GenericMetricGroup(None, None)\n else:\n self.base_metric_group = None\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n user_defined_func.open(FunctionContext(self.base_metric_group))\n\n def close(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'close'):\n user_defined_func.close()\n\n def finish(self):\n self._update_gauge(self.base_metric_group)\n\n def _update_gauge(self, base_metric_group):\n if base_metric_group is not None:\n for name in base_metric_group._flink_gauge:\n flink_gauge = base_metric_group._flink_gauge[name]\n beam_gauge = base_metric_group._beam_gauge[name]\n beam_gauge.set(flink_gauge())\n for sub_group in base_metric_group._sub_groups:\n self._update_gauge(sub_group)\n\n @abc.abstractmethod\n def generate_func(self, serialized_fn) -> Tuple:\n pass\n\n\nclass ScalarFunctionOperation(Operation):\n def __init__(self, spec):\n super(ScalarFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n \"\"\"\n Generates a lambda function based on udfs.\n :param serialized_fn: serialized function which contains a list of the proto\n representation of the Python :class:`ScalarFunction`\n :return: the generated lambda function\n \"\"\"\n scalar_functions, variable_dict, user_defined_funcs = reduce(\n lambda x, y: (\n ','.join([x[0], y[0]]),\n dict(chain(x[1].items(), y[1].items())),\n x[2] + y[2]),\n [operation_utils.extract_user_defined_function(udf) for udf in serialized_fn.udfs])\n generate_func = eval('lambda value: [%s]' % scalar_functions, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass TableFunctionOperation(Operation):\n def __init__(self, spec):\n super(TableFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n \"\"\"\n Generates a lambda function based on udtfs.\n :param serialized_fn: serialized function which contains the proto representation of\n the Python :class:`TableFunction`\n :return: the generated lambda function\n \"\"\"\n table_function, variable_dict, user_defined_funcs = \\\n operation_utils.extract_user_defined_function(serialized_fn.udfs[0])\n generate_func = eval('lambda value: %s' % table_function, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass PandasAggregateFunctionOperation(Operation):\n def __init__(self, spec):\n super(PandasAggregateFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n pandas_functions, variable_dict, user_defined_funcs = reduce(\n lambda x, y: (\n ','.join([x[0], y[0]]),\n dict(chain(x[1].items(), y[1].items())),\n x[2] + y[2]),\n [operation_utils.extract_user_defined_function(udf, True)\n for udf in serialized_fn.udfs])\n variable_dict['wrap_pandas_result'] = operation_utils.wrap_pandas_result\n generate_func = eval('lambda value: wrap_pandas_result([%s])' %\n pandas_functions, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass PandasBatchOverWindowAggregateFunctionOperation(Operation):\n def __init__(self, spec):\n super(PandasBatchOverWindowAggregateFunctionOperation, self).__init__(spec)\n self.windows = [window for window in self.spec.serialized_fn.windows]\n # the index among all the bounded range over window\n self.bounded_range_window_index = [-1 for _ in range(len(self.windows))]\n # Whether the specified position window is a bounded range window.\n self.is_bounded_range_window = []\n window_types = flink_fn_execution_pb2.OverWindow\n\n bounded_range_window_nums = 0\n for i, window in enumerate(self.windows):\n window_type = window.window_type\n if (window_type is window_types.RANGE_UNBOUNDED_PRECEDING) or (\n window_type is window_types.RANGE_UNBOUNDED_FOLLOWING) or (\n window_type is window_types.RANGE_SLIDING):\n self.bounded_range_window_index[i] = bounded_range_window_nums\n self.is_bounded_range_window.append(True)\n bounded_range_window_nums += 1\n else:\n self.is_bounded_range_window.append(False)\n\n def generate_func(self, serialized_fn):\n user_defined_funcs = []\n self.window_indexes = []\n self.mapper = []\n for udf in serialized_fn.udfs:\n pandas_agg_function, variable_dict, user_defined_func, window_index = \\\n operation_utils.extract_over_window_user_defined_function(udf)\n user_defined_funcs.extend(user_defined_func)\n self.window_indexes.append(window_index)\n self.mapper.append(eval('lambda value: %s' % pandas_agg_function, variable_dict))\n return self.wrapped_over_window_function, user_defined_funcs\n\n def wrapped_over_window_function(self, boundaries_series):\n import pandas as pd\n OverWindow = flink_fn_execution_pb2.OverWindow\n input_series = boundaries_series[-1]\n # the row number of the arrow format data\n input_cnt = len(input_series[0])\n results = []\n # loop every agg func\n for i in range(len(self.window_indexes)):\n window_index = self.window_indexes[i]\n # the over window which the agg function belongs to\n window = self.windows[window_index]\n window_type = window.window_type\n func = self.mapper[i]\n result = []\n if self.is_bounded_range_window[window_index]:\n window_boundaries = boundaries_series[\n self.bounded_range_window_index[window_index]]\n if window_type is OverWindow.RANGE_UNBOUNDED_PRECEDING:\n # range unbounded preceding window\n for j in range(input_cnt):\n end = window_boundaries[j]\n series_slices = [s.iloc[:end] for s in input_series]\n result.append(func(series_slices))\n elif window_type is OverWindow.RANGE_UNBOUNDED_FOLLOWING:\n # range unbounded following window\n for j in range(input_cnt):\n start = window_boundaries[j]\n series_slices = [s.iloc[start:] for s in input_series]\n result.append(func(series_slices))\n else:\n # range sliding window\n for j in range(input_cnt):\n start = window_boundaries[j * 2]\n end = window_boundaries[j * 2 + 1]\n series_slices = [s.iloc[start:end] for s in input_series]\n result.append(func(series_slices))\n else:\n # unbounded range window or unbounded row window\n if (window_type is OverWindow.RANGE_UNBOUNDED) or (\n window_type is OverWindow.ROW_UNBOUNDED):\n series_slices = [s.iloc[:] for s in input_series]\n func_result = func(series_slices)\n result = [func_result for _ in range(input_cnt)]\n elif window_type is OverWindow.ROW_UNBOUNDED_PRECEDING:\n # row unbounded preceding window\n window_end = window.upper_boundary\n for j in range(input_cnt):\n end = min(j + window_end + 1, input_cnt)\n series_slices = [s.iloc[: end] for s in input_series]\n result.append(func(series_slices))\n elif window_type is OverWindow.ROW_UNBOUNDED_FOLLOWING:\n # row unbounded following window\n window_start = window.lower_boundary\n for j in range(input_cnt):\n start = max(j + window_start, 0)\n series_slices = [s.iloc[start: input_cnt] for s in input_series]\n result.append(func(series_slices))\n else:\n # row sliding window\n window_start = window.lower_boundary\n window_end = window.upper_boundary\n for j in range(input_cnt):\n start = max(j + window_start, 0)\n end = min(j + window_end + 1, input_cnt)\n series_slices = [s.iloc[start: end] for s in input_series]\n result.append(func(series_slices))\n results.append(pd.Series(result))\n return results\n\n\nclass StatefulFunctionOperation(Operation):\n\n def __init__(self, spec, keyed_state_backend):\n self.keyed_state_backend = keyed_state_backend\n super(StatefulFunctionOperation, self).__init__(spec)\n\n def finish(self):\n super().finish()\n if self.keyed_state_backend:\n self.keyed_state_backend.commit()\n\n\nTRIGGER_TIMER = 1\n\n\nclass AbstractStreamGroupAggregateOperation(StatefulFunctionOperation):\n\n def __init__(self, spec, keyed_state_backend):\n self.generate_update_before = spec.serialized_fn.generate_update_before\n self.grouping = [i for i in spec.serialized_fn.grouping]\n self.group_agg_function = None\n # If the upstream generates retract message, we need to add an additional count1() agg\n # to track current accumulated messages count. If all the messages are retracted, we need\n # to send a DELETE message to downstream.\n self.index_of_count_star = spec.serialized_fn.index_of_count_star\n self.count_star_inserted = spec.serialized_fn.count_star_inserted\n self.state_cache_size = spec.serialized_fn.state_cache_size\n self.state_cleaning_enabled = spec.serialized_fn.state_cleaning_enabled\n self.data_view_specs = extract_data_view_specs(spec.serialized_fn.udfs)\n super(AbstractStreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def open(self):\n self.group_agg_function.open(FunctionContext(self.base_metric_group))\n\n def close(self):\n self.group_agg_function.close()\n\n def generate_func(self, serialized_fn):\n user_defined_aggs = []\n input_extractors = []\n filter_args = []\n # stores the indexes of the distinct views which the agg functions used\n distinct_indexes = []\n # stores the indexes of the functions which share the same distinct view\n # and the filter args of them\n distinct_info_dict = {}\n for i in range(len(serialized_fn.udfs)):\n user_defined_agg, input_extractor, filter_arg, distinct_index = \\\n extract_user_defined_aggregate_function(\n i, serialized_fn.udfs[i], distinct_info_dict)\n user_defined_aggs.append(user_defined_agg)\n input_extractors.append(input_extractor)\n filter_args.append(filter_arg)\n distinct_indexes.append(distinct_index)\n distinct_view_descriptors = {}\n for agg_index_list, filter_arg_list in distinct_info_dict.values():\n if -1 in filter_arg_list:\n # If there is a non-filter call, we don't need to check filter or not before\n # writing the distinct data view.\n filter_arg_list = []\n # use the agg index of the first function as the key of shared distinct view\n distinct_view_descriptors[agg_index_list[0]] = DistinctViewDescriptor(\n input_extractors[agg_index_list[0]], filter_arg_list)\n\n key_selector = RowKeySelector(self.grouping)\n if len(self.data_view_specs) > 0:\n state_value_coder = DataViewFilterCoder(self.data_view_specs)\n else:\n state_value_coder = PickleCoder()\n\n self.group_agg_function = self.create_process_function(\n user_defined_aggs, input_extractors, filter_args, distinct_indexes,\n distinct_view_descriptors, key_selector, state_value_coder)\n\n return self.process_element_or_timer, []\n\n def process_element_or_timer(self, input_datas: List[Tuple[int, Row, int, Row]]):\n # the structure of the input data:\n # [element_type, element(for process_element), timestamp(for timer), key(for timer)]\n # all the fields are nullable except the \"element_type\"\n for input_data in input_datas:\n if input_data[0] != TRIGGER_TIMER:\n self.group_agg_function.process_element(input_data[1])\n else:\n self.group_agg_function.on_timer(input_data[3])\n return self.group_agg_function.finish_bundle()\n\n @abc.abstractmethod\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n pass\n\n\nclass StreamGroupAggregateOperation(AbstractStreamGroupAggregateOperation):\n\n def __init__(self, spec, keyed_state_backend):\n super(StreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n aggs_handler_function = SimpleAggsHandleFunction(\n user_defined_aggs,\n input_extractors,\n self.index_of_count_star,\n self.count_star_inserted,\n self.data_view_specs,\n filter_args,\n distinct_indexes,\n distinct_view_descriptors)\n\n return GroupAggFunction(\n aggs_handler_function,\n key_selector,\n self.keyed_state_backend,\n state_value_coder,\n self.generate_update_before,\n self.state_cleaning_enabled,\n self.index_of_count_star)\n\n\nclass StreamGroupTableAggregateOperation(AbstractStreamGroupAggregateOperation):\n def __init__(self, spec, keyed_state_backend):\n super(StreamGroupTableAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n aggs_handler_function = SimpleTableAggsHandleFunction(\n user_defined_aggs,\n input_extractors,\n self.data_view_specs,\n filter_args,\n distinct_indexes,\n distinct_view_descriptors)\n return GroupTableAggFunction(\n aggs_handler_function,\n key_selector,\n self.keyed_state_backend,\n state_value_coder,\n self.generate_update_before,\n self.state_cleaning_enabled,\n self.index_of_count_star)\n\n\nclass DataStreamStatelessFunctionOperation(Operation):\n\n def __init__(self, spec):\n super(DataStreamStatelessFunctionOperation, self).__init__(spec)\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n runtime_context = RuntimeContext(\n self.spec.serialized_fn.runtime_context.task_name,\n self.spec.serialized_fn.runtime_context.task_name_with_subtasks,\n self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.index_of_this_subtask,\n self.spec.serialized_fn.runtime_context.attempt_number,\n {p.key: p.value for p in self.spec.serialized_fn.runtime_context.job_parameters}\n )\n user_defined_func.open(runtime_context)\n\n def generate_func(self, serialized_fn):\n func, user_defined_func = operation_utils.extract_data_stream_stateless_function(\n serialized_fn)\n return func, [user_defined_func]\n\n\nclass InternalRuntimeContext(RuntimeContext):\n\n def __init__(self,\n task_name: str,\n task_name_with_subtasks: str,\n number_of_parallel_subtasks: int,\n max_number_of_parallel_subtasks: int,\n index_of_this_subtask: int,\n attempt_number: int,\n job_parameters: Dict[str, str],\n keyed_state_backend: RemoteKeyedStateBackend):\n super(InternalRuntimeContext, self).__init__(\n task_name, task_name_with_subtasks, number_of_parallel_subtasks,\n max_number_of_parallel_subtasks, index_of_this_subtask, attempt_number,\n job_parameters)\n self._keyed_state_backend = keyed_state_backend\n\n def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:\n return self._keyed_state_backend.get_value_state(state_descriptor.name, PickleCoder())\n\n def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:\n return self._keyed_state_backend.get_list_state(state_descriptor.name, PickleCoder())\n\n def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:\n return self._keyed_state_backend.get_map_state(state_descriptor.name, PickleCoder(),\n PickleCoder())\n\n def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:\n return self._keyed_state_backend.get_reducing_state(\n state_descriptor.get_name(), PickleCoder(), state_descriptor.get_reduce_function())\n\n\nclass ProcessFunctionOperation(DataStreamStatelessFunctionOperation):\n\n def __init__(self, spec):\n self.timer_service = ProcessFunctionOperation.InternalTimerService()\n self.function_context = ProcessFunctionOperation.InternalProcessFunctionContext(\n self.timer_service)\n super(ProcessFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn) -> tuple:\n func, proc_func = operation_utils.extract_process_function(\n serialized_fn, self.function_context)\n return func, [proc_func]\n\n class InternalProcessFunctionContext(ProcessFunction.Context):\n \"\"\"\n Internal implementation of ProcessFunction.Context.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._timestamp = None\n\n def timer_service(self):\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n class InternalTimerService(TimerService):\n \"\"\"\n Internal implementation of TimerService.\n \"\"\"\n def __init__(self):\n self._current_watermark = None\n\n def current_processing_time(self) -> int:\n return int(time.time() * 1000)\n\n def current_watermark(self):\n return self._current_watermark\n\n def set_current_watermark(self, wm):\n self._current_watermark = wm\n\n def register_processing_time_timer(self, t: int):\n raise Exception(\"Register timers is only supported on a keyed stream.\")\n\n def register_event_time_timer(self, t: int):\n raise Exception(\"Register timers is only supported on a keyed stream.\")\n\n\nclass KeyedProcessFunctionOperation(StatefulFunctionOperation):\n\n def __init__(self, spec, keyed_state_backend):\n self._collector = KeyedProcessFunctionOperation.InternalCollector()\n internal_timer_service = KeyedProcessFunctionOperation.InternalTimerService(\n self._collector, keyed_state_backend)\n self.function_context = KeyedProcessFunctionOperation.InternalKeyedProcessFunctionContext(\n internal_timer_service)\n self.on_timer_ctx = KeyedProcessFunctionOperation\\\n .InternalKeyedProcessFunctionOnTimerContext(internal_timer_service)\n super(KeyedProcessFunctionOperation, self).__init__(spec, keyed_state_backend)\n\n def generate_func(self, serialized_fn) -> Tuple:\n func, proc_func = operation_utils.extract_keyed_process_function(\n serialized_fn, self.function_context, self.on_timer_ctx, self._collector,\n self.keyed_state_backend)\n return func, [proc_func]\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n runtime_context = InternalRuntimeContext(\n self.spec.serialized_fn.runtime_context.task_name,\n self.spec.serialized_fn.runtime_context.task_name_with_subtasks,\n self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.index_of_this_subtask,\n self.spec.serialized_fn.runtime_context.attempt_number,\n {p.key: p.value for p in\n self.spec.serialized_fn.runtime_context.job_parameters},\n self.keyed_state_backend)\n user_defined_func.open(runtime_context)\n\n class InternalCollector(object):\n \"\"\"\n Internal implementation of the Collector. It uses a buffer list to store data to be emitted.\n There will be a header flag for each data type. 0 means it is a proc time timer registering\n request, while 1 means it is an event time timer and 2 means it is a normal data. When\n registering a timer, it must take along with the corresponding key for it.\n \"\"\"\n\n def __init__(self):\n self.buf = []\n\n def collect_reg_proc_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_PROC_TIMER.value,\n a, key, None))\n\n def collect_reg_event_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_EVENT_TIMER.value,\n a, key, None))\n\n def collect_del_proc_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.DEL_PROC_TIMER.value,\n a, key, None))\n\n def collect_del_event_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.DEL_EVENT_TIMER.value,\n a, key, None))\n\n def collect(self, a: Any):\n self.buf.append((operation_utils.KeyedProcessFunctionOutputFlag.NORMAL_DATA.value, a))\n\n def clear(self):\n self.buf.clear()\n\n class InternalKeyedProcessFunctionOnTimerContext(KeyedProcessFunction.OnTimerContext):\n \"\"\"\n Internal implementation of ProcessFunction.OnTimerContext.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._time_domain = None\n self._timestamp = None\n self._current_key = None\n\n def get_current_key(self):\n return self._current_key\n\n def set_current_key(self, current_key):\n self._current_key = current_key\n\n def timer_service(self) -> TimerService:\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n def time_domain(self) -> TimeDomain:\n return self._time_domain\n\n def set_time_domain(self, td: TimeDomain):\n self._time_domain = td\n\n class InternalKeyedProcessFunctionContext(KeyedProcessFunction.Context):\n \"\"\"\n Internal implementation of KeyedProcessFunction.Context.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._timestamp = None\n self._current_key = None\n\n def get_current_key(self):\n return self._current_key\n\n def set_current_key(self, current_key):\n self._current_key = current_key\n\n def timer_service(self) -> TimerService:\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n class InternalTimerService(TimerService):\n \"\"\"\n Internal implementation of TimerService.\n \"\"\"\n\n def __init__(self, collector, keyed_state_backend):\n self._collector = collector\n self._keyed_state_backend = keyed_state_backend\n self._current_watermark = None\n\n def current_processing_time(self) -> int:\n return int(time.time() * 1000)\n\n def current_watermark(self) -> int:\n return self._current_watermark\n\n def set_current_watermark(self, wm):\n self._current_watermark = wm\n\n def register_processing_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_reg_proc_timer(t, current_key)\n\n def register_event_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_reg_event_timer(t, current_key)\n\n def delete_processing_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_del_proc_timer(t, current_key)\n\n def delete_event_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_del_event_timer(t, current_key)\n", "################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport datetime\nimport decimal\n\nfrom pandas.util.testing import assert_frame_equal\n\nfrom pyflink.common import Row\nfrom pyflink.table.types import DataTypes\nfrom pyflink.testing import source_sink_utils\nfrom pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \\\n PyFlinkBlinkStreamTableTestCase, PyFlinkOldStreamTableTestCase\n\n\nclass PandasConversionTestBase(object):\n\n @classmethod\n def setUpClass(cls):\n super(PandasConversionTestBase, cls).setUpClass()\n cls.data = [(1, 1, 1, 1, True, 1.1, 1.2, 'hello', bytearray(b\"aaa\"),\n decimal.Decimal('1000000000000000000.01'), datetime.date(2014, 9, 13),\n datetime.time(hour=1, minute=0, second=1),\n datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],\n Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),\n d=[1, 2])),\n (1, 2, 2, 2, False, 2.1, 2.2, 'world', bytearray(b\"bbb\"),\n decimal.Decimal('1000000000000000000.02'), datetime.date(2014, 9, 13),\n datetime.time(hour=1, minute=0, second=1),\n datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],\n Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),\n d=[1, 2]))]\n cls.data_type = DataTypes.ROW(\n [DataTypes.FIELD(\"f1\", DataTypes.TINYINT()),\n DataTypes.FIELD(\"f2\", DataTypes.SMALLINT()),\n DataTypes.FIELD(\"f3\", DataTypes.INT()),\n DataTypes.FIELD(\"f4\", DataTypes.BIGINT()),\n DataTypes.FIELD(\"f5\", DataTypes.BOOLEAN()),\n DataTypes.FIELD(\"f6\", DataTypes.FLOAT()),\n DataTypes.FIELD(\"f7\", DataTypes.DOUBLE()),\n DataTypes.FIELD(\"f8\", DataTypes.STRING()),\n DataTypes.FIELD(\"f9\", DataTypes.BYTES()),\n DataTypes.FIELD(\"f10\", DataTypes.DECIMAL(38, 18)),\n DataTypes.FIELD(\"f11\", DataTypes.DATE()),\n DataTypes.FIELD(\"f12\", DataTypes.TIME()),\n DataTypes.FIELD(\"f13\", DataTypes.TIMESTAMP(3)),\n DataTypes.FIELD(\"f14\", DataTypes.ARRAY(DataTypes.STRING())),\n DataTypes.FIELD(\"f15\", DataTypes.ROW(\n [DataTypes.FIELD(\"a\", DataTypes.INT()),\n DataTypes.FIELD(\"b\", DataTypes.STRING()),\n DataTypes.FIELD(\"c\", DataTypes.TIMESTAMP(3)),\n DataTypes.FIELD(\"d\", DataTypes.ARRAY(DataTypes.INT()))]))], False)\n cls.pdf = cls.create_pandas_data_frame()\n\n @classmethod\n def create_pandas_data_frame(cls):\n data_dict = {}\n for j, name in enumerate(cls.data_type.names):\n data_dict[name] = [cls.data[i][j] for i in range(len(cls.data))]\n # need convert to numpy types\n import numpy as np\n data_dict[\"f1\"] = np.int8(data_dict[\"f1\"])\n data_dict[\"f2\"] = np.int16(data_dict[\"f2\"])\n data_dict[\"f3\"] = np.int32(data_dict[\"f3\"])\n data_dict[\"f4\"] = np.int64(data_dict[\"f4\"])\n data_dict[\"f6\"] = np.float32(data_dict[\"f6\"])\n data_dict[\"f7\"] = np.float64(data_dict[\"f7\"])\n data_dict[\"f15\"] = [row.as_dict() for row in data_dict[\"f15\"]]\n import pandas as pd\n return pd.DataFrame(data=data_dict,\n index=[2., 3.],\n columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',\n 'f10', 'f11', 'f12', 'f13', 'f14', 'f15'])\n\n\nclass PandasConversionTests(PandasConversionTestBase):\n\n def test_from_pandas_with_incorrect_schema(self):\n fields = self.data_type.fields.copy()\n fields[0], fields[7] = fields[7], fields[0] # swap str with tinyint\n wrong_schema = DataTypes.ROW(fields) # should be DataTypes.STRING()\n with self.assertRaisesRegex(Exception, \"Expected a string.*got int8\"):\n self.t_env.from_pandas(self.pdf, schema=wrong_schema)\n\n def test_from_pandas_with_names(self):\n # skip decimal as currently only decimal(38, 18) is supported\n pdf = self.pdf.drop(['f10', 'f11', 'f12', 'f13', 'f14', 'f15'], axis=1)\n new_names = list(map(str, range(len(pdf.columns))))\n table = self.t_env.from_pandas(pdf, schema=new_names)\n self.assertEqual(new_names, table.get_schema().get_field_names())\n table = self.t_env.from_pandas(pdf, schema=tuple(new_names))\n self.assertEqual(new_names, table.get_schema().get_field_names())\n\n def test_from_pandas_with_types(self):\n new_types = self.data_type.field_types()\n new_types[0] = DataTypes.BIGINT()\n table = self.t_env.from_pandas(self.pdf, schema=new_types)\n self.assertEqual(new_types, table.get_schema().get_field_data_types())\n table = self.t_env.from_pandas(self.pdf, schema=tuple(new_types))\n self.assertEqual(new_types, table.get_schema().get_field_data_types())\n\n\nclass PandasConversionITTests(PandasConversionTestBase):\n\n def test_from_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type, 5)\n self.assertEqual(self.data_type, table.get_schema().to_row_data_type())\n\n table = table.filter(table.f2 < 2)\n table_sink = source_sink_utils.TestAppendSink(\n self.data_type.field_names(),\n self.data_type.field_types())\n self.t_env.register_table_sink(\"Results\", table_sink)\n table.execute_insert(\"Results\").wait()\n actual = source_sink_utils.results()\n self.assert_equals(actual,\n [\"+I[1, 1, 1, 1, true, 1.1, 1.2, hello, [97, 97, 97], \"\n \"1000000000000000000.010000000000000000, 2014-09-13, 01:00:01, \"\n \"1970-01-01 00:00:00.123, [hello, 中文], +I[1, hello, \"\n \"1970-01-01 00:00:00.123, [1, 2]]]\"])\n\n def test_to_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n result_pdf = table.to_pandas()\n result_pdf.index = self.pdf.index\n self.assertEqual(2, len(result_pdf))\n assert_frame_equal(self.pdf, result_pdf)\n\n def test_empty_to_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n pdf = table.filter(table.f1 < 0).to_pandas()\n self.assertTrue(pdf.empty)\n\n def test_to_pandas_for_retract_table(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n result_pdf = table.group_by(table.f1).select(table.f2.max.alias('f2')).to_pandas()\n import pandas as pd\n import numpy as np\n assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int16([2])}))\n\n result_pdf = table.group_by(\"f2\").select(\"max(f1) as f2\").to_pandas()\n assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int8([1, 1])}))\n\n\nclass StreamPandasConversionTests(PandasConversionITTests,\n PyFlinkOldStreamTableTestCase):\n pass\n\n\nclass BlinkBatchPandasConversionTests(PandasConversionTests,\n PandasConversionITTests,\n PyFlinkBlinkBatchTableTestCase):\n pass\n\n\nclass BlinkStreamPandasConversionTests(PandasConversionITTests,\n PyFlinkBlinkStreamTableTestCase):\n def test_to_pandas_with_event_time(self):\n self.t_env.get_config().get_configuration().set_string(\"parallelism.default\", \"1\")\n # create source file path\n import tempfile\n import os\n tmp_dir = tempfile.gettempdir()\n data = [\n '2018-03-11 03:10:00',\n '2018-03-11 03:10:00',\n '2018-03-11 03:10:00',\n '2018-03-11 03:40:00',\n '2018-03-11 04:20:00',\n '2018-03-11 03:30:00'\n ]\n source_path = tmp_dir + '/test_to_pandas_with_event_time.csv'\n with open(source_path, 'w') as fd:\n for ele in data:\n fd.write(ele + '\\n')\n\n self.t_env.get_config().get_configuration().set_string(\n \"pipeline.time-characteristic\", \"EventTime\")\n\n source_table = \"\"\"\n create table source_table(\n rowtime TIMESTAMP(3),\n WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE\n ) with(\n 'connector.type' = 'filesystem',\n 'format.type' = 'csv',\n 'connector.path' = '%s',\n 'format.ignore-first-line' = 'false',\n 'format.field-delimiter' = ','\n )\n \"\"\" % source_path\n self.t_env.execute_sql(source_table)\n t = self.t_env.from_path(\"source_table\")\n result_pdf = t.to_pandas()\n import pandas as pd\n os.remove(source_path)\n assert_frame_equal(result_pdf, pd.DataFrame(\n data={\"rowtime\": [\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 40),\n datetime.datetime(2018, 3, 11, 4, 20),\n datetime.datetime(2018, 3, 11, 3, 30),\n ]}))\n" ]
[ [ "pandas.Series" ], [ "numpy.int8", "pandas.DataFrame", "numpy.float32", "numpy.int64", "numpy.int32", "pandas.util.testing.assert_frame_equal", "numpy.int16", "numpy.float64" ] ]
ggoh29/Simplicial-neural-network-benchmark
[ "9a12bcd054251790d85e3971f5473dcffaa5664b" ]
[ "planetoid_dgi_benchmark.py" ]
[ "from Planetoid.PlanetoidDataset import PlanetoidSCDataset\nfrom models import planetoid_GCN, planetoid_GAT, planetoid_SCN, planetoid_SCConv, planetoid_SAN, planetoid_SAT\nimport torch.nn as nn\nimport torch\nfrom Planetoid.DGI import DGI\nfrom Planetoid.logreg import LogReg\nfrom constants import DEVICE\n\n2708, 79\ndataset = 'fake'\ndataset_features_dct = {'Cora' : 1433, 'CiteSeer' : 3703, 'PubMed' : 500, 'fake' : 2708}\ndataset_classes_dct = {'Cora' : 7, 'CiteSeer' : 6, 'PubMed' : 3 , 'fake' : 3}\ninput_size = dataset_features_dct[dataset]\noutput_size = 512\nnb_epochs = 200\ntest_epochs = 50\nlr = 0.001\nl2_coef = 0.0\npatience = 20\n\nnn_mod = planetoid_GCN\n# nn_mod = planetoid_GAT\n# nn_mod = planetoid_SCN\n# nn_mod = planetoid_SCConv\n# nn_mod = planetoid_SAT\n# nn_mod = planetoid_SAN\n\nprocessor_type = nn_mod[0]\nmodel = nn_mod[1]\n\ndgi = DGI(input_size, output_size, model)\noptimiser = torch.optim.Adam(dgi.parameters(), lr=lr, weight_decay=l2_coef)\nb_xent = nn.BCEWithLogitsLoss()\nxent = nn.CrossEntropyLoss()\n\nif __name__ == \"__main__\":\n\n data = PlanetoidSCDataset('./data', dataset, processor_type)\n data_full, b1, b2 = data.get_full()\n\n cnt_wait = 0\n best = 1e9\n best_t = 0\n bl = False\n b1 = b1.to(DEVICE)\n b2 = b2.to(DEVICE)\n for epoch in range(nb_epochs):\n dgi.train()\n optimiser.zero_grad()\n\n nb_nodes = data_full.X0.shape[0]\n lbl_1 = torch.ones(1, nb_nodes)\n lbl_2 = torch.zeros(1, nb_nodes)\n\n lbl = torch.cat((lbl_1, lbl_2), 1).to(DEVICE)\n\n logits = dgi(data_full, b1, b2, processor_type)\n\n loss = b_xent(logits, lbl)\n\n print('Loss:', loss)\n\n if loss < best:\n best = loss\n best_t = epoch\n cnt_wait = 0\n torch.save(dgi.state_dict(), f'./data/{model.__name__}_dgi.pkl')\n if epoch != 0:\n bl = True\n else:\n if bl:\n cnt_wait += 1\n\n if cnt_wait == patience:\n print('Early stopping!')\n break\n\n loss.backward()\n optimiser.step()\n\n print('Loading {}th epoch'.format(best_t))\n dgi.load_state_dict(torch.load(f'./data/{model.__name__}_dgi.pkl'))\n\n embeds, _ = dgi.embed(data_full, b1, b2)\n # embeds = data_full.X0.to(DEVICE)\n # output_size = 79\n # with open(\"./embeddings.py\", 'w') as f:\n # f.write(f'embeddings = {embeds.tolist()}')\n # with open(\"./labels.py\", 'w') as f:\n # f.write(f'labels = {data.get_labels().tolist()}')\n train_embs = data.get_train_embeds(embeds)\n val_embs = data.get_val_embeds(embeds)\n test_embs = data.get_test_embeds(embeds)\n\n train_lbls = data.get_train_labels().to(DEVICE)\n x_unique = train_lbls.unique(sorted=True)\n x_unique_count = torch.stack([(train_lbls == x_u).sum() for x_u in x_unique])\n val_lbls = data.get_val_labels().to(DEVICE)\n test_lbls = data.get_test_labels().to(DEVICE)\n\n tot = torch.zeros(1).to(DEVICE)\n\n accs = []\n\n for _ in range(test_epochs):\n log = LogReg(output_size, dataset_classes_dct[dataset])\n opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)\n log.to(DEVICE)\n\n pat_steps = 0\n best_acc = torch.zeros(1)\n best_acc = best_acc.to(DEVICE)\n\n for _ in range(100):\n log.train()\n opt.zero_grad()\n\n logits = log(train_embs)\n loss = xent(logits, train_lbls)\n\n loss.backward()\n opt.step()\n\n logits = log(test_embs)\n preds = torch.argmax(logits, dim=1)\n acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]\n accs.append(acc * 100)\n print(model.__name__)\n print(acc)\n tot += acc\n\n print('Average accuracy:', tot / test_epochs)\n\n accs = torch.stack(accs)\n print(accs.mean())\n print(accs.std())\n" ]
[ [ "torch.sum", "torch.ones", "torch.stack", "torch.load", "torch.argmax", "torch.nn.CrossEntropyLoss", "torch.nn.BCEWithLogitsLoss", "torch.zeros", "torch.cat" ] ]
JohnLauFoo/clc_packages_Yu
[ "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991" ]
[ "matplotlib-3.4.3/matplotlib-3.4.3/examples/mplot3d/lines3d.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/hat_graph.py" ]
[ "\"\"\"\n================\nParametric Curve\n================\n\nThis example demonstrates plotting a parametric curve in 3D.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nax = plt.figure().add_subplot(projection='3d')\n\n# Prepare arrays x, y, z\ntheta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\nz = np.linspace(-2, 2, 100)\nr = z**2 + 1\nx = r * np.sin(theta)\ny = r * np.cos(theta)\n\nax.plot(x, y, z, label='parametric curve')\nax.legend()\n\nplt.show()\n", "\"\"\"\n=========\nHat graph\n=========\nThis example shows how to create a `hat graph`_ and how to annotate it with\nlabels.\n\n.. _hat graph: https://doi.org/10.1186/s41235-019-0182-3\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef hat_graph(ax, xlabels, values, group_labels):\n \"\"\"\n Create a hat graph.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n The Axes to plot into.\n xlabels : list of str\n The category names to be displayed on the x-axis.\n values : (M, N) array-like\n The data values.\n Rows are the groups (len(group_labels) == M).\n Columns are the categories (len(xlabels) == N).\n group_labels : list of str\n The group labels displayed in the legend.\n \"\"\"\n\n def label_bars(heights, rects):\n \"\"\"Attach a text label on top of each bar.\"\"\"\n for height, rect in zip(heights, rects):\n ax.annotate(f'{height}',\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 4), # 4 points vertical offset.\n textcoords='offset points',\n ha='center', va='bottom')\n\n values = np.asarray(values)\n x = np.arange(values.shape[1])\n ax.set_xticks(x)\n ax.set_xticklabels(xlabels)\n spacing = 0.3 # spacing between hat groups\n width = (1 - spacing) / values.shape[0]\n heights0 = values[0]\n for i, (heights, group_label) in enumerate(zip(values, group_labels)):\n style = {'fill': False} if i == 0 else {'edgecolor': 'black'}\n rects = ax.bar(x - spacing/2 + i * width, heights - heights0,\n width, bottom=heights0, label=group_label, **style)\n label_bars(heights, rects)\n\n\n# initialise labels and a numpy array make sure you have\n# N labels of N number of values in the array\nxlabels = ['I', 'II', 'III', 'IV', 'V']\nplayerA = np.array([5, 15, 22, 20, 25])\nplayerB = np.array([25, 32, 34, 30, 27])\n\nfig, ax = plt.subplots()\nhat_graph(ax, xlabels, [playerA, playerB], ['Player A', 'Player B'])\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_xlabel('Games')\nax.set_ylabel('Score')\nax.set_ylim(0, 60)\nax.set_title('Scores by number of game and players')\nax.legend()\n\nfig.tight_layout()\nplt.show()\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.axes.Axes.bar` / `matplotlib.pyplot.bar`\n# - `matplotlib.axes.Axes.annotate` / `matplotlib.pyplot.annotate`\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.cos", "matplotlib.pyplot.show", "numpy.sin", "numpy.linspace" ], [ "numpy.asarray", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "numpy.array" ] ]
AhmetCanSolak/aydin
[ "e8bc81ee88c96e0f34986df30a63c96468a45f70" ]
[ "aydin/it/demo/n2s/nn/2D_generic.py" ]
[ "# flake8: noqa\nimport os\nimport time\n\nimport numpy\nimport numpy as np\nfrom skimage.data import camera\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom skimage.metrics import structural_similarity as ssim\n\nfrom aydin.features.standard_features import StandardFeatureGenerator\nfrom aydin.io.datasets import newyork, pollen, normalise, add_noise, lizard, characters\nfrom aydin.it.fgr import ImageTranslatorFGR\nfrom aydin.regression.perceptron import PerceptronRegressor\n\n\"\"\"\n Demo for self-supervised denoising using camera image with synthetic noise\n\"\"\"\n\n\ndef demo(image, name):\n image = normalise(image.astype(np.float32))\n noisy = add_noise(image)\n # noisy=image\n\n start_time = time.time()\n\n generator = StandardFeatureGenerator()\n regressor = PerceptronRegressor()\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy)\n\n elapsedtime = time.time() - start_time\n print(f\"time elapsed: {elapsedtime} s\")\n\n start = time.time()\n denoised = it.translate(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\"denoised:\", psnr_denoised, ssim_denoised)\n\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=(2.7 * 5, 5), dpi=300)\n plt.subplot(1, 3, 1)\n plt.imshow(normalise(noisy), cmap='gray')\n plt.axis('off')\n plt.title(f'Noisy \\nPSNR: {psnr_noisy:.3f}, SSIM: {ssim_noisy:.3f}')\n plt.subplot(1, 3, 2)\n plt.imshow(normalise(denoised), cmap='gray')\n plt.axis('off')\n plt.title(f'Denoised \\nPSNR: {psnr_denoised:.3f}, SSIM: {ssim_denoised:.3f}')\n plt.subplot(1, 3, 3)\n plt.imshow(normalise(image), cmap='gray')\n plt.axis('off')\n plt.title('Original')\n plt.subplots_adjust(left=0.01, right=0.99, top=0.95, bottom=0.01, hspace=0.1)\n os.makedirs(\"../../../demo_results\", exist_ok=True)\n plt.savefig(f'../../demo_results/n2s_nn_2D_{name}.png')\n\n plt.clf()\n plt.plot(regressor.loss_history[0]['training'], 'r')\n plt.plot(regressor.loss_history[0]['validation'], 'b')\n plt.legend(['training', 'validation'])\n plt.show()\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(normalise(image), name='image')\n viewer.add_image(normalise(noisy), name='noisy')\n viewer.add_image(normalise(denoised), name='denoised')\n\n\nif __name__ == \"__main__\":\n camera_image = camera()\n demo(camera_image, \"camera\")\n lizard_image = lizard()\n demo(lizard_image, \"lizard\")\n pollen_image = pollen()\n demo(pollen_image, \"pollen\")\n newyork_image = newyork()\n demo(newyork_image, \"newyork\")\n characters_image = characters()\n demo(characters_image, \"characters\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "numpy.clip", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.plot" ] ]
xhuohai/kendryte-model-compiler
[ "b6ef72b5db83a3b421046150ff3e77843c2be5bb" ]
[ "layer_list_to_darknet.py" ]
[ "'''\n * Copyright 2018 Canaan Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n '''\n\nimport tensor_list_to_layer_list\nimport numpy\n\n\ndef gen_config_file(layers):\n ret = []\n for layer in layers:\n assert (isinstance(layer, tensor_list_to_layer_list.LayerBase))\n ret.append('[' + layer.name + ']')\n for k, v in layer.config.items():\n ret.append(str(k) + '=' + str(v))\n ret.append('')\n\n return '\\n'.join(ret)\n\n\ndef gen_weights(layers):\n ret = [numpy.array([0, 2, 0, 0], 'int32').tobytes()] # header\n\n for layer in layers:\n assert (isinstance(layer, tensor_list_to_layer_list.LayerBase))\n if type(layer) in (\n tensor_list_to_layer_list.LayerNet,\n tensor_list_to_layer_list.LayerPool\n ):\n pass\n elif isinstance(layer, tensor_list_to_layer_list.LayerConvolutional) or \\\n isinstance(layer, tensor_list_to_layer_list.LayerDepthwiseConvolutional):\n if str(layer.config['batch_normalize']) != '0':\n gamma = numpy.array(layer.batch_normalize_gamma, 'float32')\n beta = numpy.array(layer.batch_normalize_beta, 'float32')\n bias = numpy.array(layer.batch_normalize_moving_mean, 'float32')\n if layer.bias is not None:\n bias = bias - numpy.array(layer.bias, 'float32')\n variance = numpy.array(layer.batch_normalize_moving_variance, 'float32')\n\n ret.append(beta.tobytes())\n ret.append(gamma.tobytes())\n ret.append(bias.tobytes())\n ret.append(variance.tobytes())\n else:\n bias = numpy.array(layer.bias, 'float32')\n ret.append(bias.tobytes())\n\n weights = numpy.array(layer.weights, 'float32')\n weights_trans = numpy.transpose(weights, [3, 2, 0, 1])\n ret.append(weights_trans.tobytes())\n else:\n print('unknown layer:', layer.name, type(layer))\n\n return b''.join(ret)\n" ]
[ [ "numpy.array", "numpy.transpose" ] ]
837477/COMTRIS_AI
[ "2cb49a9a9c5de785d6b1a864abf8d5eeb6db3302" ]
[ "src/comtris.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom pymongo import MongoClient\n\n\nclass Net(nn.Module):\n def __init__(self, D_in, D_out):\n super(Net,self).__init__()\n self.layer_1 = nn.Linear(D_in, D_out*2)\n self.layer_out = nn.Linear(D_out*2, D_out)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.layer_1(x)\n x = self.relu(x)\n x = self.layer_out(x) \n return x\n\n\nclass Comtris():\n def __init__(self):\n self.db = MongoClient(os.environ['COMTRIS_MONGODB_URI'])['COMTRIS']\n self.model = {\n \"CPU\": torch.load(\"./model/CPU\"),\n \"VGA\": torch.load(\"./model/VGA\"),\n \"M/B\": torch.load(\"./model/MB\"),\n \"RAM\": torch.load(\"./model/RAM\"),\n \"SSD\": torch.load(\"./model/SSD\"),\n \"POWER\": torch.load(\"./model/POWER\"),\n }\n for part in self.model:\n self.model[part].eval() \n self.index_dict = {\n \"CPU\": self.db['master_config'].find_one({\"key\": \"CPU_dict\"})['value'],\n \"VGA\": self.db['master_config'].find_one({\"key\": \"VGA_dict\"})['value'],\n \"M/B\": self.db['master_config'].find_one({\"key\": \"M/B_dict\"})['value'],\n \"RAM\": self.db['master_config'].find_one({\"key\": \"RAM_dict\"})['value'],\n \"SSD\": self.db['master_config'].find_one({\"key\": \"SSD_dict\"})['value'],\n \"POWER\": self.db['master_config'].find_one({\"key\": \"POWER_dict\"})['value']\n }\n self.part_needs = self.db['master_config'].find_one({\"key\": \"needs\"})['value']\n self.index = {}\n for part in self.index_dict:\n for p_i in self.index_dict[part][\"part_to_index\"]:\n self.index_dict[part][\"part_to_index\"][p_i] = int(self.index_dict[part][\"part_to_index\"][p_i])\n self.index.update(self.index_dict[part][\"part_to_index\"])\n \n def part(self):\n part = {\n \"CPU\": list(self.index_dict['CPU']['part_to_index'].keys()),\n \"VGA\": list(self.index_dict['VGA']['part_to_index'].keys()),\n \"M/B\": list(self.index_dict['M/B']['part_to_index'].keys()),\n \"RAM\": list(self.index_dict['RAM']['part_to_index'].keys()),\n \"SSD\": list(self.index_dict['SSD']['part_to_index'].keys()),\n \"POWER\": list(self.index_dict['POWER']['part_to_index'].keys())\n }\n return part\n \n def needs(self):\n return self.part_needs\n\n def prediction(self, parts, target):\n # 예측 데이터 개수 확인\n if len(parts) != len(self.part_needs[target]):\n return False\n \n if target not in {\"CPU\", \"VGA\", \"M/B\", \"RAM\", \"SSD\", \"POWER\"}:\n return False\n \n # 예측 데이터 가공\n x = []\n for part in parts:\n x.append(self.index[part])\n x = torch.FloatTensor(x)\n \n # 예측 값 추출\n y = list(self.model[target](x))\n y = y.index(max(y))\n result = self.index_dict[target]['index_to_part'][str(y)]\n \n return result\n\n\nif __name__ == \"__main__\":\n CT = Comtris()\n\n # 순서 매우 중요!!\n # [\"AMD 3100\", \"ASROCK A320M\", \"ASROCK RX570\", \"3200 8G\", \"500GB\", \"600W\"]\n # [CPU, M/B, VGA, RAM, SSD, POWER]\n\n needs = CT.needs()\n part = CT.part()\n # CPU TEST\n '''\n for i in range(5):\n x = []\n for p in part:\n if p not in needs['CPU']:\n continue\n x.append(np.random.choice(part[p]))\n result = CT.prediction(x, \"CPU\")\n print(x)\n print(result)\n print(\"#\" * 100)\n # VGA TEST\n for i in range(5):\n x = []\n for p in part:\n if p not in needs['VGA']:\n continue\n x.append(np.random.choice(part[p]))\n result = CT.prediction(x, \"VGA\")\n print(x)\n print(result)\n print(\"#\" * 100)\n '''\n\n result = CT.prediction([\"GTX1660SUPER ASUS\", \"A320 ASUS\", \"3200 16GB\", \"1TB\", \"600W\"], \"CPU\")\n print(result)\n" ]
[ [ "torch.nn.ReLU", "torch.FloatTensor", "torch.nn.Linear", "torch.load" ] ]
tum-db/partitioned-filters
[ "56c20102715a442cbec9ecb732d41de15b31c828" ]
[ "python/benchmark_plotter/latexify.py" ]
[ "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator, FuncFormatter\nimport numpy as np\n\n\ndef latexify(fig_width=None, fig_height=None, columns=1):\n \"\"\"Set up matplotlib's RC params for LaTeX plotting.\n Call this before plotting a figure.\n\n Parameters\n ----------\n fig_width : float, optional, inches\n fig_height : float, optional, inches\n columns : {1, 2}\n \"\"\"\n\n # code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples\n\n # Width and max height in inches for IEEE journals taken from\n # computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf\n\n assert (columns in [1, 2])\n\n if fig_width is None:\n fig_width = 3.39 if columns == 1 else 6.9 # width in inches\n\n if fig_height is None:\n golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio\n fig_height = fig_width * golden_mean # height in inches\n\n MAX_HEIGHT_INCHES = 32.0\n if fig_height > MAX_HEIGHT_INCHES:\n print(\"WARNING: fig_height too large:\" + fig_height +\n \"so will reduce to\" + MAX_HEIGHT_INCHES + \"inches.\")\n fig_height = MAX_HEIGHT_INCHES\n\n params = {'backend': 'ps',\n 'pgf.rcfonts': False,\n 'axes.labelsize': 8, # fontsize for x and y labels (was 10)\n 'axes.titlesize': 8,\n 'font.size': 8, # was 10\n 'legend.fontsize': 6, # was 10\n 'legend.handlelength': 1.5,\n 'legend.handletextpad': 0.3,\n 'legend.labelspacing': 0.3, # was 0.1\n 'legend.columnspacing': 0.3,\n 'legend.borderpad': 0.3,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'axes.labelpad': 1,\n 'axes.titlepad': 2,\n 'text.usetex': True,\n 'figure.figsize': [fig_width, fig_height],\n 'font.family': 'serif',\n 'text.latex.preamble': r'\\usepackage{amssymb} \\usepackage{ifsym}'\n }\n\n matplotlib.rcParams.update(params)\n\n\ndef format_axes(ax):\n spine_color = 'black'\n for spine in ['top', 'right']:\n ax.spines[spine].set_visible(False)\n\n for spine in ['left', 'bottom']:\n ax.spines[spine].set_color(spine_color)\n ax.spines[spine].set_linewidth(0.5)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_tick_params(direction='out', color=spine_color)\n\n ax.yaxis.set_minor_locator(AutoMinorLocator(n=2))\n ax.yaxis.grid(True)\n ax.yaxis.grid(b=True, which='minor', linestyle=':')\n ax.tick_params(axis='both', which='major', pad=0.5)\n\n return ax\n\n\ndef barAxes(ax):\n ax.set_axisbelow(True)\n\n\ndef cm2inch(value):\n return value / 2.54\n\n\ndef reorderLegend(ax=None, order=None, unique=False):\n if ax is None: ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) # sort both labels and handles by labels\n if order is not None: # Sort according to a given list (not necessarily complete)\n keys = dict(zip(order, range(len(order))))\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0], np.inf)))\n if unique:\n labels, handles = zip(*unique_everseen(zip(labels, handles), key=labels)) # Keep only the first of each handle\n return handles, labels\n\n\ndef unique_everseen(seq, key=None):\n seen = set()\n seen_add = seen.add\n return [x for x, k in zip(seq, key) if not (k in seen or seen_add(k))]\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.gca", "matplotlib.ticker.AutoMinorLocator", "matplotlib.rcParams.update" ] ]
parneetk/PyTorch-Style-Transfer
[ "f38ec4b1cd57cee4304787b054a6e6c9ce3b00ff" ]
[ "experiments/net/mynn.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Hang Zhang\n## ECE Department, Rutgers University\n## Email: [email protected]\n## Copyright (c) 2017\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass MultConst(nn.Module):\n\tdef forward(self, input):\n\t\treturn 255*input\n\n\nclass GramMatrix(nn.Module):\n\tdef forward(self, y):\n\t\t(b, ch, h, w) = y.size()\n\t\tfeatures = y.view(b, ch, w * h)\n\t\tfeatures_t = features.transpose(1, 2)\n\t\tgram = features.bmm(features_t) / (ch * h * w)\n\t\treturn gram\n\t\n\nclass InstanceNormalization(nn.Module):\n\t\"\"\"InstanceNormalization\n\tImproves convergence of neural-style.\n\tref: https://arxiv.org/pdf/1607.08022.pdf\n\t\"\"\"\n\n\tdef __init__(self, dim, eps=1e-5):\n\t\tsuper(InstanceNormalization, self).__init__()\n\t\tself.weight = nn.Parameter(torch.FloatTensor(dim))\n\t\tself.bias = nn.Parameter(torch.FloatTensor(dim))\n\t\tself.eps = eps\n\t\tself._reset_parameters()\n\n\tdef _reset_parameters(self):\n\t\tself.weight.data.uniform_()\n\t\tself.bias.data.zero_()\n\n\tdef forward(self, x):\n\t\tn = x.size(2) * x.size(3)\n\t\tt = x.view(x.size(0), x.size(1), n)\n\t\tmean = torch.mean(t, 2).unsqueeze(2).expand_as(x)\n\t\t# Calculate the biased var. torch.var returns unbiased var\n\t\tvar = torch.var(t, 2).unsqueeze(2).expand_as(x) * ((n - 1) / float(n))\n\t\tscale_broadcast = self.weight.unsqueeze(1).unsqueeze(1).unsqueeze(0)\n\t\tscale_broadcast = scale_broadcast.expand_as(x)\n\t\tshift_broadcast = self.bias.unsqueeze(1).unsqueeze(1).unsqueeze(0)\n\t\tshift_broadcast = shift_broadcast.expand_as(x)\n\t\tout = (x - mean) / torch.sqrt(var + self.eps)\n\t\tout = out * scale_broadcast + shift_broadcast\n\t\treturn out\n\n\nclass Basicblock(nn.Module):\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):\n\t\tsuper(Basicblock, self).__init__()\n\t\tself.downsample = downsample\n\t\tif self.downsample is not None:\n\t\t\tself.residual_layer = nn.Conv2d(inplanes, planes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=stride)\n\t\tconv_block=[]\n\t\tconv_block+=[norm_layer(inplanes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(inplanes, planes, kernel_size=3, stride=stride),\n\t\t\t\t\t\t\t\tnorm_layer(planes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=1),\n\t\t\t\t\t\t\t\tnorm_layer(planes)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\n\tdef forward(self, input):\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.residual_layer(input)\n\t\telse:\n\t\t\tresidual = input\n\t\treturn residual + self.conv_block(input)\n\t\t\t\n\nclass UpBasicblock(nn.Module):\n\t\"\"\" Up-sample residual block (from MSG-Net paper)\n\tEnables passing identity all the way through the generator\n\tref https://arxiv.org/abs/1703.06953\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=2, norm_layer=nn.BatchNorm2d):\n\t\tsuper(UpBasicblock, self).__init__()\n\t\tself.residual_layer = UpsampleConvLayer(inplanes, planes,\n \t\t\t \t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=1, upsample=stride)\n\t\tconv_block=[]\n\t\tconv_block+=[norm_layer(inplanes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tUpsampleConvLayer(inplanes, planes, kernel_size=3, stride=1, upsample=stride),\n\t\t\t\t\t\t\t\tnorm_layer(planes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\n\tdef forward(self, input):\n\t\treturn self.residual_layer(input) + self.conv_block(input)\n\n\nclass Bottleneck(nn.Module):\n\t\"\"\" Pre-activation residual block\n\tIdentity Mapping in Deep Residual Networks\n\tref https://arxiv.org/abs/1603.05027\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):\n\t\tsuper(Bottleneck, self).__init__()\n\t\tself.expansion = 4\n\t\tself.downsample = downsample\n\t\tif self.downsample is not None:\n\t\t\tself.residual_layer = nn.Conv2d(inplanes, planes * self.expansion,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=stride)\n\t\tconv_block = []\n\t\tconv_block += [norm_layer(inplanes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(inplanes, planes, kernel_size=1, stride=1)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=stride)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\t\n\tdef forward(self, x):\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.residual_layer(x)\n\t\telse:\n\t\t\tresidual = x\n\t\treturn residual + self.conv_block(x)\n\n\nclass UpBottleneck(nn.Module):\n\t\"\"\" Up-sample residual block (from MSG-Net paper)\n\tEnables passing identity all the way through the generator\n\tref https://arxiv.org/abs/1703.06953\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=2, norm_layer=nn.BatchNorm2d):\n\t\tsuper(UpBottleneck, self).__init__()\n\t\tself.expansion = 4\n\t\tself.residual_layer = UpsampleConvLayer(inplanes, planes * self.expansion,\n \t\t\t \t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=1, upsample=stride)\n\t\tconv_block = []\n\t\tconv_block += [norm_layer(inplanes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(inplanes, planes, kernel_size=1, stride=1)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tUpsampleConvLayer(planes, planes, kernel_size=3, stride=1, upsample=stride)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\n\tdef forward(self, x):\n\t\treturn self.residual_layer(x) + self.conv_block(x)\n\n\nclass ConvLayer(torch.nn.Module):\n\tdef __init__(self, in_channels, out_channels, kernel_size, stride):\n\t\tsuper(ConvLayer, self).__init__()\n\t\treflection_padding = int(np.floor(kernel_size / 2))\n\t\tself.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n\t\tself.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n\tdef forward(self, x):\n\t\tout = self.reflection_pad(x)\n\t\tout = self.conv2d(out)\n\t\treturn out\n\nclass UpsampleConvLayer(torch.nn.Module):\n\t\"\"\"UpsampleConvLayer\n\tUpsamples the input and then does a convolution. This method gives better results\n\tcompared to ConvTranspose2d.\n\tref: http://distill.pub/2016/deconv-checkerboard/\n\t\"\"\"\n\n\tdef __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):\n\t\tsuper(UpsampleConvLayer, self).__init__()\n\t\tself.upsample = upsample\n\t\tif upsample:\n\t\t\tself.upsample_layer = torch.nn.UpsamplingNearest2d(scale_factor=upsample)\n\t\tself.reflection_padding = int(np.floor(kernel_size / 2))\n\t\tif self.reflection_padding != 0:\n\t\t\tself.reflection_pad = nn.ReflectionPad2d(self.reflection_padding)\n\t\tself.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n\tdef forward(self, x):\n\t\tif self.upsample:\n\t\t\tx = self.upsample_layer(x)\n\t\tif self.reflection_padding != 0:\n\t\t\tx = self.reflection_pad(x)\n\t\tout = self.conv2d(x)\n\t\treturn out\n\n" ]
[ [ "torch.FloatTensor", "torch.var", "torch.sqrt", "numpy.floor", "torch.nn.UpsamplingNearest2d", "torch.nn.Conv2d", "torch.nn.ReflectionPad2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.mean" ] ]
dennisbappert/sod-using-vit
[ "24ed0692d8eb09adf2f74e69a132f267a4137b68" ]
[ "train.py" ]
[ "import datetime\nimport os\nimport random\nimport time\nimport warnings\n\nimport hydra\nimport torch\nfrom hydra.utils import instantiate\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch.cuda.amp import autocast, GradScaler\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, WeightedRandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom lib.data import ToDeviceFunction, PrefetchLoader\nfrom lib.utils import print_torch_setup, mkdir, save_on_master, MetricLogger, flatten_dict, SmoothedValue, torchvision\nfrom lib.utils.denormalize import denormalize\nfrom lib.utils.smoothing import gaussian_blur\n\n\[email protected](config_path='conf', config_name='config')\ndef main(cfg: DictConfig) -> None:\n if cfg.trainer.print_torch_setup is True:\n print_torch_setup()\n\n if cfg.trainer.seed is not None:\n random.seed(cfg.trainer.seed)\n torch.manual_seed(cfg.trainer.seed)\n torch.backends.cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n assert torch.cuda.is_available(), 'This code requires a GPU to train'\n torch.backends.cudnn.benchmark = True\n assert cfg.trainer.output_dir, 'You need to specify an output directory'\n\n mkdir(cfg.trainer.output_dir)\n experiment_name = time.strftime(\"%Y%m%d-%H%M%S\")\n print(f'The current experiment will be tracked as {experiment_name}')\n output_dir = os.path.join(cfg.trainer.output_dir, experiment_name)\n print(f'Results will be saved in {output_dir}')\n writer = SummaryWriter(output_dir)\n\n # this is just a workaround for now\n # hparams logging to a file and as text into tensorboard\n # it is certainly not perfect... :/\n hparams = flatten_dict(OmegaConf.to_container(cfg, resolve=True))\n hparams_as_str = [str(k) + ' >>> ' + str(v) + '\\n' for k, v in hparams.items()]\n # TODO: this seems to not work properly!\n # writer.add_hparams(hparams, metric_dict={'acc': 1}, run_name=experiment_name)\n with open(os.path.join(output_dir, 'hparams.txt'), 'w', encoding='utf-8') as hparams_file:\n for line in hparams_as_str:\n hparams_file.write(line)\n writer.add_text('hparams', '\\r\\n'.join(hparams_as_str), global_step=0)\n\n device = torch.device(cfg.trainer.device)\n assert device.type == 'cuda', 'Only GPU based training is supported'\n\n dataset = instantiate(cfg.dataset.train)\n\n assert cfg.dataset.val_split is not None, 'Handling a separate validation set is not implemented as of now!'\n train_size = int((1 - cfg.dataset.val_split) * len(dataset))\n val_size = len(dataset) - train_size\n train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])\n\n train_sampler_weights = dataset.make_weights_for_dataset_sampling(train_dataset)\n sampler = WeightedRandomSampler(train_sampler_weights, num_samples=cfg.dataset.train_samples_per_epoch,\n replacement=True)\n train_collate_fn = dataset.get_collate_fn(mode='train', channels_last=cfg.trainer.channels_last)\n train_dataloader = instantiate(cfg.dataloader.train,\n dataset=train_dataset,\n collate_fn=train_collate_fn,\n sampler=sampler)\n\n val_collate_fn = dataset.get_collate_fn(mode='val', channels_last=cfg.trainer.channels_last)\n val_dataloader = instantiate(cfg.dataloader.val,\n dataset=val_dataset,\n collate_fn=val_collate_fn)\n\n # this handler moves a batch to the GPU as uint8, casts it to a float after transferring it\n # and normalizes the images\n to_device_handler = ToDeviceFunction(device=device, mean=cfg.dataset.mean, std=cfg.dataset.std)\n\n # the prefetch loader prefetches the next batch onto the GPU which makes up a couple\n # of percent in the training loop\n train_dataloader = PrefetchLoader(loader=train_dataloader,\n to_device_handler=to_device_handler)\n\n # val_dataloader = PrefetchLoader(loader=val_dataloader,\n # to_device_handler=to_device_handler)\n\n model = instantiate(cfg.models.model,\n device=device\n ).to(device)\n\n if cfg.trainer.channels_last is True:\n model = model.to(memory_format=torch.channels_last)\n\n if cfg.trainer.anomaly_detection is True:\n torch.autograd.set_detect_anomaly(mode=True)\n\n params_to_optimize = [\n {\"params\": [p for p in model.parameters()\n if p.requires_grad]}\n ]\n\n optimizer = instantiate(cfg.optimizer, params_to_optimize)\n\n scaler = GradScaler(enabled=cfg.trainer.amp)\n\n if cfg.trainer.resume is not None:\n if os.path.isfile(cfg.trainer.resume):\n print(\"Trying to load checkpoint '{}'\".format(cfg.trainer.resume))\n\n if cfg.trainer.from_u2net_checkpoint is True:\n checkpoint = torch.load(cfg.trainer.resume, map_location=device)\n model.load_state_dict(checkpoint)\n else:\n checkpoint = torch.load(cfg.trainer.resume, map_location=device)\n model.load_state_dict(checkpoint['model'])\n\n if cfg.trainer.weights_only is False:\n cfg.trainer.start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n scaler.load_state_dict(checkpoint['scaler'])\n\n print(f'Loaded checkpoint {cfg.trainer.resume}. Resuming training at epoch {cfg.trainer.start_epoch}')\n else:\n warnings.warn(f'Checkpoint f{cfg.trainer.resume} not found!')\n\n print(\"Start training...\")\n start_time = time.time()\n\n if cfg.trainer.dry_run is True:\n print(\"Doing dry run, running val on train dataset...\")\n # validate_one_epoch(writer, model, train_dataloader, device, 0, cfg.trainer.print_freq)\n return\n\n for epoch in range(cfg.trainer.start_epoch, cfg.trainer.epochs):\n train_one_epoch(writer, device, model, optimizer, scaler, train_dataloader, epoch, cfg)\n # validate_one_epoch(writer, model, val_dataloader, epoch, cfg)\n\n checkpoint = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scaler': scaler.state_dict(),\n 'epoch': epoch,\n 'cfg': cfg}\n save_on_master(\n checkpoint,\n os.path.join(output_dir, 'model_{}.pth'.format(epoch)))\n save_on_master(\n checkpoint,\n os.path.join(output_dir, 'checkpoint.pth'))\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\ndef create_metric_logger(train, epoch, writer):\n if train:\n prefix = 'train'\n else:\n prefix = 'val'\n\n metric_logger = MetricLogger(epoch=epoch, delimiter=\" \", writer=writer, experiment_prefix=prefix)\n\n if train:\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'), log=False)\n metric_logger.add_meter('samples/s', SmoothedValue(window_size=10, fmt='{value}'), log=True, log_value='median',\n title='samples per second')\n metric_logger.add_meter('loss', SmoothedValue(), log=True, log_value='global_avg',\n title='loss')\n\n return metric_logger\n\n\ndef criterion(aux, y, metadata, device):\n # aux ^= [d0, d1, d2, d3, d4, d5, d6]\n\n def masked_l1_loss(y_hat, y, mask):\n loss = F.l1_loss(y_hat, y, reduction='none')\n loss = (loss * mask.float()).sum()\n non_zero_elements = mask.sum()\n return loss / non_zero_elements\n\n mask = y[:, 0]\n smoothed_mask = gaussian_blur(\n mask.unsqueeze(dim=1), (9, 9), (2.5, 2.5)).squeeze(dim=1)\n unknown_mask = y[:, 1]\n\n l1_mask = torch.ones(mask.shape, device=device)\n l1_details_mask = torch.zeros(mask.shape, device=device)\n\n # i synthesised some detailed masks using pymatting.github.io\n # by synthesising trimaps from segmentation masks and use these\n # in an additional loss to let the model learn the unknown areas\n # between foreground and background. this is not perfect as the generated\n # trimaps and masks are not super accurate, but it seems to go in the right\n # direction.\n detailed_masks = [x['detailed_masks'] for x in metadata]\n for idx, detailed_mask in enumerate(detailed_masks):\n if not detailed_mask:\n l1_mask[idx] = l1_mask[idx] - unknown_mask[idx]\n else:\n l1_details_mask[idx] = unknown_mask[idx]\n\n loss = 0\n for output in aux:\n loss += 2 * masked_l1_loss(output, mask, l1_mask)\n # this loss should give some learning signals to focus on unknown areas\n loss += 3 * masked_l1_loss(output, mask, l1_details_mask)\n # i'm not quite sure if this loss gives the right incentive, the idea\n # is to blur the segmentation mask a bit to reduce background bleeding\n # caused by bad labels, preliminary results seem to be quite ok.\n loss += F.mse_loss(output, smoothed_mask)\n\n aux = {\n 'l1_mask': l1_mask,\n 'l1_detailed_mask': l1_details_mask,\n 'mask': mask,\n 'smoothed_mask': smoothed_mask\n }\n\n return loss, aux\n\n\ndef train_one_epoch(writer, device, model, optimizer, scaler, data_loader, epoch, cfg):\n model.train()\n\n metric_logger = create_metric_logger(train=True, epoch=epoch, writer=writer)\n\n for x, y, metadata in metric_logger.log_every(data_loader, cfg.trainer.print_freq):\n start_time = time.time()\n\n with autocast(enabled=cfg.trainer.amp):\n y_hat, aux_outputs = model(x)\n loss, aux = criterion(aux_outputs, y, metadata, device)\n\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n metric_logger.update(\n loss=loss.item(),\n lr=optimizer.param_groups[0][\"lr\"])\n\n metric_logger.meters['samples/s'].update(x.size(0) / (time.time() - start_time))\n\n if random.random() < .1:\n sample = denormalize(x[:4], mean=cfg.dataset.mean, std=cfg.dataset.std)\n sample_foreground = y_hat[:4].unsqueeze(dim=1).repeat(1,3,1, 1) * sample\n\n writer.add_image(\n f'train-metrics/sample',\n torchvision.utils.make_grid(\n [torchvision.utils.make_grid(sample, nrow=4),\n torchvision.utils.make_grid(sample_foreground),\n torchvision.utils.make_grid(y_hat[:4].unsqueeze(dim=1), nrow=4)], nrow=1),\n metric_logger.global_step)\n\n writer.add_image(\n f'train-metrics/loss insights',\n torchvision.utils.make_grid(\n [torchvision.utils.make_grid(aux['l1_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['l1_detailed_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['smoothed_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['mask'][:4].unsqueeze(dim=1), nrow=4)], nrow=1),\n metric_logger.global_step)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.ones", "torch.nn.functional.mse_loss", "torch.cuda.amp.GradScaler", "torch.load", "torch.nn.functional.l1_loss", "torch.manual_seed", "torch.zeros", "torch.autograd.set_detect_anomaly", "torch.cuda.is_available", "torch.utils.data.random_split", "torch.cuda.amp.autocast", "torch.utils.tensorboard.SummaryWriter", "torch.utils.data.WeightedRandomSampler", "torch.device" ] ]
npabon/ProDy
[ "390322d9b7688809f91656bc1cadfdb66cd0a9b3" ]
[ "lib/prody/atomic/atom.py" ]
[ "# -*- coding: utf-8 -*-\n# ProDy: A Python Package for Protein Dynamics Analysis\n# \n# Copyright (C) 2010-2012 Ahmet Bakan\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n\n\"\"\"This module defines classes to handle individual atoms.\"\"\"\n\n__author__ = 'Ahmet Bakan'\n__copyright__ = 'Copyright (C) 2010-2012 Ahmet Bakan'\n\nimport numpy as np\n\nfrom . import flags\nfrom .fields import ATOMIC_FIELDS, READONLY\nfrom .fields import wrapGetMethod, wrapSetMethod\nfrom .pointer import AtomPointer\nfrom .bond import Bond\n\n__all__ = ['Atom']\n\n\nclass Atom(AtomPointer):\n \n \"\"\"A class for handling individual atoms in an :class:`.AtomGroup`.\"\"\"\n \n __slots__ = ['_ag', '_acsi', '_index']\n \n def __init__(self, ag, index, acsi):\n AtomPointer.__init__(self, ag, acsi)\n self._index = int(index)\n \n def __repr__(self):\n\n n_csets = self._ag.numCoordsets()\n if n_csets == 1:\n return '<Atom: {0} from {1} (index {2})>'.format(\n self.getName(), self._ag.getTitle(), self._index)\n elif n_csets > 1:\n return ('<Atom: {0} from {1} (index {2}; active #{3} of '\n '{4} coordsets)>').format(self.getName(), \n self._ag.getTitle(), self._index, self.getACSIndex(), \n n_csets)\n else:\n return ('<Atom: {0} from {1} (index {2}; no coordinates)>'\n ).format(self.getName(), self._ag.getTitle(), self._index)\n\n def __str__(self):\n\n return 'Atom {0} (index {1})'.format(self.getName(), self._index)\n\n def __len__(self):\n \n return 1\n \n def __int__(self):\n \n return self._index\n \n def numAtoms(self, flag=None):\n \"\"\"Return number of atoms, or number of atoms with given *flag*.\"\"\"\n \n return len(self._getSubset(flag)) if flag else 1\n \n def getIndex(self):\n \"\"\"Return index of the atom.\"\"\"\n \n return self._index\n \n def getIndices(self):\n \"\"\"Return index of the atom in an :class:`numpy.ndarray`.\"\"\"\n \n return np.array([self._index])\n \n _getIndices = getIndices\n \n def iterAtoms(self):\n \"\"\"Yield atoms.\"\"\"\n\n yield Atom(ag=self._ag, index=self._index, acsi=self.getACSIndex())\n\n __iter__ = iterAtoms\n \n def getCoords(self):\n \"\"\"Return a copy of coordinates of the atom from the active coordinate \n set.\"\"\"\n \n if self._ag._coords is not None:\n return self._ag._coords[self.getACSIndex(), self._index].copy()\n \n def _getCoords(self):\n \"\"\"Return a view of coordinates of the atom from the active coordinate \n set.\"\"\"\n \n if self._ag._coords is not None:\n return self._ag._coords[self.getACSIndex(), self._index]\n \n def setCoords(self, coords):\n \"\"\"Set coordinates of the atom in the active coordinate set.\"\"\"\n \n acsi = self.getACSIndex()\n self._ag._coords[acsi, self._index] = coords\n self._ag._setTimeStamp(acsi)\n \n def getCoordsets(self, indices=None):\n \"\"\"Return a copy of coordinate set(s) at given *indices*.\"\"\"\n \n if self._ag._coords is None:\n return None\n \n if indices is None:\n return self._ag._coords[:, self._index].copy()\n \n if isinstance(indices, (int, slice)):\n return self._ag._coords[indices, self._index].copy()\n \n if isinstance(indices, (list, np.ndarray)):\n return self._ag._coords[indices, self._index]\n \n raise IndexError('indices must be an integer, a list/array of '\n 'integers, a slice, or None')\n \n def _getCoordsets(self, indices=None): \n \"\"\"Return a view of coordinate set(s) at given *indices*.\"\"\"\n \n if self._ag._coords is None:\n return None\n \n if indices is None:\n indices = slice(None)\n\n return self._ag._coords[indices, self._index]\n\n def iterCoordsets(self):\n \"\"\"Yield copies of coordinate sets.\"\"\"\n \n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index].copy()\n\n def _iterCoordsets(self):\n \"\"\"Yield views of coordinate sets.\"\"\"\n \n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index]\n \n def getData(self, label):\n \"\"\"Return a copy of data associated with *label*, if it is present.\"\"\"\n \n try:\n data = self._ag._getData(label)\n except KeyError:\n pass\n else:\n if data.ndim > 1:\n return data[self._index]\n else:\n return data[self._index].copy()\n \n _getData = getData\n \n def setData(self, label, data):\n \"\"\"Update *data* associated with *label*.\n \n :raise AttributeError: when *label* is not in use or read-only\"\"\"\n \n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(repr(label)))\n if label in ATOMIC_FIELDS:\n getattr(self, 'set' + ATOMIC_FIELDS[label].meth)(data)\n else:\n try:\n self._ag._data[label][self._index] = data \n except KeyError:\n raise AttributeError('data with label {0} must be set for'\n ' AtomGroup first'.format(repr(label)))\n \n def getFlag(self, label):\n \"\"\"Return atom flag.\"\"\"\n \n return self._ag._getFlags(label)[self._index]\n \n def setFlag(self, label, value):\n \"\"\"Update flag associated with *label*.\n \n :raise AttributeError: when *label* is not in use or read-only\"\"\"\n \n if label in flags.PLANTERS:\n raise AttributeError('flag {0} cannot be changed by user'\n .format(repr(label)))\n flags = self._ag._getFlags(label)\n if flags is None:\n raise AttributeError('flags with label {0} must be set for '\n 'AtomGroup first'.format(repr(label)))\n flags[self._index] = value\n \n def getSelstr(self):\n \"\"\"Return selection string that will select this atom.\"\"\"\n \n return 'index {0}'.format(self._index)\n\n def numBonds(self):\n \"\"\"Return number of bonds formed by this atom. Bonds must be set first\n using :meth:`.AtomGroup.setBonds`.\"\"\"\n \n numbonds = self._ag._data.get('numbonds')\n if numbonds is not None:\n return numbonds[self._index]\n \n def iterBonds(self):\n \"\"\"Yield bonds formed by the atom. Use :meth:`setBonds` for setting\n bonds.\"\"\"\n \n ag = self._ag\n acsi = self.getACSIndex()\n for bond in self._iterBonds():\n yield Bond(ag, bond, acsi) \n\n def _iterBonds(self):\n \"\"\"Yield pairs of bonded atom indices.\"\"\"\n\n ag = self._ag\n if ag._bmap is None:\n raise ValueError('bonds are not set, use `AtomGroup.setBonds`')\n \n this = self._index\n for other in ag._bmap[this]:\n if other == -1:\n break\n yield this, other \n \n def iterBonded(self):\n \"\"\"Yield bonded atoms. Use :meth:`setBonds` for setting bonds.\"\"\"\n \n ag = self._ag\n if ag._bmap is None:\n raise ValueError('bonds are not set, use `AtomGroup.setBonds`')\n \n acsi = self.getACSIndex()\n this = self._index\n for other in self._ag._bmap[this]:\n if other == -1:\n break\n yield Atom(ag, other, acsi)\n\n\nfor fname, field in ATOMIC_FIELDS.items():\n \n if field.private:\n continue\n \n meth = field.meth\n getMeth = 'get' + meth\n setMeth = 'set' + meth\n # Define public method for retrieving a copy of data array\n def getData(self, meth=field.meth_pl, call=field.call):\n data = getattr(self._ag, '_get' + meth)()\n if data is not None:\n return data[self._index] \n getData = wrapGetMethod(getData)\n getData.__name__ = getMeth\n getData.__doc__ = field.getDocstr('get', False)\n setattr(Atom, getMeth, getData)\n setattr(Atom, '_' + getMeth, getData)\n \n if field.readonly:\n continue\n \n # Define public method for setting values in data array\n def setData(self, value, var=fname, none=field.none):\n array = self._ag._data[var]\n if array is None:\n raise AttributeError('attribute of the AtomGroup is '\n 'not set')\n array[self._index] = value\n if none: self._ag._none(none)\n setData = wrapSetMethod(setData)\n setData.__name__ = setMeth \n setData.__doc__ = field.getDocstr('set', False)\n setattr(Atom, setMeth, setData)\n\ndel getData\ndel setData\n" ]
[ [ "numpy.array" ] ]
em3ndez/gretel-synthetics
[ "7d9f433a741469860c6ec3aadf76da02036671c4" ]
[ "src/gretel_synthetics/batch.py" ]
[ "\"\"\"\nThis module allows automatic splitting of a DataFrame\ninto smaller DataFrames (by clusters of columns) and doing\nmodel training and text generation on each sub-DF independently.\n\nThen we can concat each sub-DF back into one final synthetic dataset.\n\nFor example usage, please see our Jupyter Notebook.\n\"\"\"\nimport abc\nimport glob\nimport gzip\nimport io\nimport json\nimport logging\nimport shutil\nimport tempfile\nimport threading\nimport time\n\nfrom copy import deepcopy\nfrom dataclasses import dataclass, field\nfrom itertools import zip_longest\nfrom math import ceil\nfrom pathlib import Path\nfrom typing import Callable, Dict\nfrom typing import Iterator as IteratorType\nfrom typing import List, Optional, Type, Union\n\nimport cloudpickle\nimport gretel_synthetics.const as const\nimport numpy as np\nimport pandas as pd\n\nfrom gretel_synthetics.config import (\n BaseConfig,\n config_from_model_dir,\n CONFIG_MAP,\n LocalConfig,\n)\nfrom gretel_synthetics.errors import TooManyInvalidError\nfrom gretel_synthetics.generate import generate_text, GenText, SeedingGenerator\nfrom gretel_synthetics.tokenizers import BaseTokenizerTrainer\nfrom gretel_synthetics.train import train\nfrom tqdm.auto import tqdm\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nMAX_INVALID = 1000\nBATCH_SIZE = 15\nFIELD_DELIM = \"field_delimiter\"\nGEN_LINES = \"gen_lines\"\nREAD = \"read\"\nWRITE = \"write\"\nHEADER_FILE = \"headers.json\"\nORIG_HEADERS = \"original_headers.json\"\nCHECKPOINT_DIR = \"checkpoint_dir\"\nCONFIG_FILE = \"model_params.json\"\nTRAIN_FILE = \"train.csv\"\nPATH_HOLDER = \"___path_holder___\"\nFILE = \"file\"\nMEMORY = \"memory\"\n\n\n@dataclass\nclass GenerationSummary:\n \"\"\"A class to capture the summary data after synthetic data is generated.\"\"\"\n\n valid_lines: int = 0\n invalid_lines: int = 0\n is_valid: bool = False\n\n\nclass _BatchEpochCallback:\n \"\"\"\n Wrapper class to take a user supplied callback and inject the batch number. The batch number\n is then available in the EpochState object when it is supplied to the callback.\n \"\"\"\n\n def __init__(self, user_callback: callable, batch_number: int):\n self._batch_number = batch_number\n self._user_callback = user_callback\n\n def callback(self, epoch_state):\n epoch_state.batch = self._batch_number\n self._user_callback(epoch_state)\n\n\n@dataclass\nclass Batch:\n \"\"\"A representation of a synthetic data workflow. It should not be used\n directly. This object is created automatically by the primary batch handler,\n such as ``DataFrameBatch``. This class holds all of the necessary information\n for training, data generation and DataFrame re-assembly.\n \"\"\"\n\n checkpoint_dir: str\n input_data_path: str\n headers: List[str]\n config: LocalConfig\n gen_data_count: int = 0\n\n training_df: Type[pd.DataFrame] = field(default_factory=lambda: None, init=False)\n gen_data_stream: io.StringIO = field(default_factory=io.StringIO, init=False)\n gen_data_invalid: List[GenText] = field(default_factory=list, init=False)\n validator: Callable = field(default_factory=lambda: None, init=False)\n\n def __post_init__(self):\n self.reset_gen_data()\n\n @property\n def synthetic_df(self) -> pd.DataFrame:\n \"\"\"Get a DataFrame constructed from the generated lines\"\"\"\n if not self.gen_data_stream.getvalue(): # pragma: no cover\n return pd.DataFrame()\n self.gen_data_stream.seek(0)\n return pd.read_csv(self.gen_data_stream, sep=self.config.field_delimiter)\n\n def set_validator(self, fn: Callable, save=True):\n \"\"\"Assign a validation callable to this batch. Optionally\n pickling and saving the validator for loading later\n \"\"\"\n self.validator = fn\n if save:\n p = Path(self.checkpoint_dir) / \"validator.p.gz\"\n with gzip.open(p, \"w\") as fout:\n fout.write(cloudpickle.dumps(fn))\n\n def load_validator_from_file(self):\n \"\"\"Load a saved validation object if it exists\"\"\"\n p = Path(self.checkpoint_dir) / \"validator.p.gz\"\n if p.exists():\n with gzip.open(p, \"r\") as fin:\n self.validator = cloudpickle.loads(fin.read())\n\n def reset_gen_data(self):\n \"\"\"Reset all objects that accumulate or track synthetic\n data generation\n \"\"\"\n self.gen_data_invalid = []\n self.gen_data_stream = io.StringIO()\n self.gen_data_stream.write(\n self.config.field_delimiter.join(self.headers) + \"\\n\"\n )\n self.gen_data_count = 0\n\n def add_valid_data(self, data: GenText):\n \"\"\"Take a ``gen_text`` object and add the generated\n line to the generated data stream\n \"\"\"\n self.gen_data_stream.write(data.text + \"\\n\")\n self.gen_data_count += 1\n\n def _basic_validator(self, raw_line: str): # pragma: no cover\n return len(raw_line.split(self.config.field_delimiter)) == len(self.headers)\n\n def get_validator(self):\n \"\"\"If a custom validator is set, we return that. Otherwise,\n we return the built-in validator, which simply checks if a generated\n line has the right number of values based on the number of headers\n for this batch.\n\n This at least makes sure the resulting DataFrame will be the right\n shape\n \"\"\"\n if self.validator is not None:\n return self.validator\n\n return self._basic_validator\n\n\ndef _create_batch_from_dir(batch_dir: str):\n path = Path(batch_dir)\n if not path.is_dir(): # pragma: no cover\n raise ValueError(\"%s is not a directory\" % batch_dir)\n\n if not (path / HEADER_FILE).is_file(): # pragma: no cover\n raise ValueError(\"missing headers\")\n headers = json.loads(open(path / HEADER_FILE).read())\n\n if not (path / CONFIG_FILE).is_file(): # pragma: no cover\n raise ValueError(\"missing model param file\")\n\n config = config_from_model_dir(batch_dir)\n\n # training path can be empty, since we will not need access\n # to training data simply for read-only data generation\n train_path = \"\"\n\n # Wrap the user supplied callback with a _BatchEpochCallback so we have the batch number too.\n if config.epoch_callback is not None:\n batch_count = int(Path(batch_dir).name.split(\"_\")[-1])\n config.epoch_callback = _BatchEpochCallback(\n config.epoch_callback, batch_count\n ).callback\n\n batch = Batch(\n checkpoint_dir=batch_dir,\n input_data_path=train_path,\n headers=headers,\n config=config,\n )\n\n batch.load_validator_from_file()\n\n return batch\n\n\ndef _crawl_checkpoint_for_batches(checkpoint_dir: str):\n logger.info(\"Looking for and loading batch data...\")\n matching_dirs = glob.glob(str(Path(checkpoint_dir) / \"batch_*\"))\n if not matching_dirs:\n raise ValueError(\n \"checkpoint directory does not exist or does not contain batch data\"\n )\n\n batches = []\n for batch_dir in matching_dirs:\n idx = int(Path(batch_dir).name.split(\"_\")[-1])\n batches.append((idx, _create_batch_from_dir(batch_dir)))\n\n logger.info(\"Found and loaded %d batches\", len(batches))\n return dict(sorted(batches, key=lambda b: b[0]))\n\n\ndef _build_batch_dirs(\n base_ckpoint: str, headers: List[List[str]], config: dict\n) -> dict:\n \"\"\"Return a mapping of batch number => ``Batch`` object\"\"\"\n out = {}\n logger.info(\"Creating directory structure for batch jobs...\")\n base_path = Path(config[\"checkpoint_dir\"])\n if not base_path.is_dir():\n base_path.mkdir()\n for i, headers in enumerate(headers):\n ckpoint = Path(base_ckpoint) / f\"batch_{i}\"\n if not ckpoint.is_dir():\n ckpoint.mkdir()\n checkpoint_dir = str(ckpoint)\n input_data_path = str(ckpoint / \"train.csv\")\n new_config = deepcopy(config)\n new_config.update(\n {\"checkpoint_dir\": checkpoint_dir, \"input_data_path\": input_data_path}\n )\n\n # Determine what BaseConfig subclass to use, if the config template does\n # not have a model type then we'll default to using a LocalConfig which gives\n # us backwards compat to 0.14.0\n config_class_str = new_config.get(const.MODEL_TYPE, None)\n if config_class_str is None:\n config_class = LocalConfig\n else:\n config_class = CONFIG_MAP[config_class_str]\n\n # Wrap the user supplied callback with a _BatchEpochCallback so we have the batch number too.\n if new_config.get(\"epoch_callback\") is not None:\n new_config[\"epoch_callback\"] = _BatchEpochCallback(\n new_config.get(\"epoch_callback\"), i\n ).callback\n\n out[i] = Batch(\n checkpoint_dir=checkpoint_dir,\n input_data_path=input_data_path,\n headers=headers,\n config=config_class(**new_config),\n )\n # try and load any previously saved validators\n out[i].load_validator_from_file()\n\n # we write the headers out as well incase we load these\n # batches back in via \"read\" mode only later...it's the only\n # way to get the header names back\n with open(ckpoint / HEADER_FILE, \"w\") as fout:\n fout.write(json.dumps(headers))\n\n return out\n\n\ndef _validate_batch_seed_values(\n batch: Batch, seed_values: Union[dict, List[dict]]\n) -> Union[str, List[str]]:\n \"\"\"Validate that seed values line up with the first N columns in a batch. Also construct\n an appropiate seed string based on the values in the batch\n \"\"\"\n ret_str = True\n if isinstance(seed_values, dict):\n seed_values = [seed_values]\n elif isinstance(seed_values, list):\n ret_str = False\n else:\n raise TypeError(\"seed_values should be a dict or list of dicts\")\n\n seed_strings = []\n\n for seed in seed_values:\n if len(seed) > len(batch.headers):\n raise RuntimeError(\n \"The number of seed fields is greater than the number of columns in the first batch\"\n )\n\n headers_to_seed = batch.headers[: len(seed)]\n tmp = []\n for header in headers_to_seed:\n value = seed.get(header)\n if value is None:\n raise RuntimeError(\n f\"The header: {header} is not in the seed values mapping\"\n ) # noqa\n tmp.append(str(value))\n\n seed_strings.append(\n batch.config.field_delimiter.join(tmp) + batch.config.field_delimiter\n )\n\n if ret_str:\n return seed_strings[0]\n else:\n return seed_strings\n\n\nclass _BufferedRecords(abc.ABC):\n \"\"\"Base class for all buffers used when\n generating records\n \"\"\"\n\n @abc.abstractmethod\n def add(self, record: dict):\n ...\n\n @abc.abstractmethod\n def get_records(self):\n ...\n\n def cleanup(self):\n pass\n\n\nclass _BufferedDicts(_BufferedRecords):\n\n _records: List[dict]\n\n def __init__(self):\n self._records = []\n\n def add(self, record: dict):\n self._records.append(record)\n\n def get_records(self):\n return self._records\n\n\nclass _BufferedDataFrame(_BufferedRecords):\n \"\"\"Buffer dictionaries into a memory or file, then\n load it as a DataFrame and set the column order\n based on the provided list. This allows\n datatypes to be inferred as if the values were\n being read from a CSV on disk.\n\n NOTE: The cleanup() method must be called when done\n with this class.\n \"\"\"\n\n def __init__(self, delim: str, columns: List[str], method: str = FILE):\n self.delim = delim\n self.columns = columns\n self.headers_set = False\n self.method = method\n\n # Create our actual buffer file-like object\n if self.method == FILE:\n self.buffer = tempfile.TemporaryFile(mode=\"w+\")\n elif self.method == MEMORY:\n self.buffer = io.StringIO()\n else:\n raise ValueError(\"Invalid method\")\n\n def add(self, record: dict):\n # write the columns names into the buffer, we\n # use the first dict to specify the order and\n # assume subsequent dicts have the same order\n if not self.headers_set:\n _columns = self.delim.join(record.keys())\n self.buffer.write(_columns + \"\\n\")\n self.headers_set = True\n _row = self.delim.join(record.values())\n self.buffer.write(_row + \"\\n\")\n\n @property\n def df(self) -> pd.DataFrame:\n self.buffer.seek(0)\n return pd.read_csv(self.buffer, sep=self.delim)[self.columns]\n\n def get_records(self) -> pd.DataFrame:\n return self.df\n\n def cleanup(self):\n if self.method == FILE:\n self.buffer.close()\n\n\n@dataclass\nclass GenerationProgress:\n \"\"\"\n This class should not have to be used directly.\n\n It is used to communicate the current progress of record generation.\n\n When a callback function is passed to the ``RecordFactory.generate_all()`` method,\n each time the callback is called an instance of this class will be passed\n as the single argument::\n\n def my_callback(data: GenerationProgress):\n ...\n\n factory: RecordFactory\n df = factory.generate_all(output=\"df\", callback=my_callback)\n\n This class is used to periodically communicate progress of generation to the user,\n through a callback that can be passed to ``RecordFactory.generate_all()`` method.\n \"\"\"\n\n current_valid_count: int = 0\n \"\"\"The number of valid lines/records that\n were generated so far.\n \"\"\"\n\n current_invalid_count: int = 0\n \"\"\"The number of invalid lines/records that\n were generated so far.\n \"\"\"\n\n new_valid_count: int = 0\n \"\"\"The number of new valid lines/records that\n were generated since the last progress callback.\n \"\"\"\n\n new_invalid_count: int = 0\n \"\"\"The number of new valid lines/records that\n were generated since the last progress callback.\n \"\"\"\n\n completion_percent: float = 0.0\n \"\"\"The percentage of valid lines/records that have been generated.\"\"\"\n\n timestamp: float = field(default_factory=time.time)\n \"\"\"The timestamp from when the information in this object has been captured.\"\"\"\n\n\nclass _GenerationCallback:\n \"\"\"\n Wrapper around a callback function that is sending progress updates only once\n per configured time period (``update_interval``).\n\n Args:\n callback_fn: Callback function to be invoked with current progress.\n update_interval: Number of seconds to wait between sending progress update.\n \"\"\"\n\n def __init__(self, callback_fn: callable, update_interval: int = 30):\n self._callback_fn = callback_fn\n self._update_interval = update_interval\n\n self._last_update_time = int(time.monotonic())\n self._last_progress = GenerationProgress()\n\n def update_progress(\n self,\n num_lines: int,\n valid_count: int,\n invalid_count: int,\n *,\n force_update=False,\n ):\n\n \"\"\"\n Method that's being called from the generator with a progress update.\n\n Args:\n num_lines: Total number of lines to be generated.\n valid_count: Number of valid lines that were generated so far.\n invalid_count: Number of invalid lines that were generated so far.\n final_update:\n Is this the final update? It is ``True`` when sending last update, after the\n whole generation was complete.\n \"\"\"\n now = int(time.monotonic())\n\n if now - self._last_update_time >= self._update_interval or force_update:\n current_progress = GenerationProgress(\n current_valid_count=valid_count,\n current_invalid_count=invalid_count,\n new_valid_count=valid_count - self._last_progress.current_valid_count,\n new_invalid_count=invalid_count\n - self._last_progress.current_invalid_count,\n completion_percent=0\n if num_lines == 0\n else round(valid_count / num_lines * 100, 2),\n )\n\n self._callback_fn(current_progress)\n self._last_update_time = now\n self._last_progress = current_progress\n\n\n@dataclass\nclass _FactoryCounter:\n num_lines: int = 0\n \"\"\"The target number of lines to generate when\n iterating or generating all records.\n \"\"\"\n\n max_invalid: int = MAX_INVALID\n \"\"\"The number of max invalid lines to tolerate before\n stopping generation and raising a ``RunTimeError.``\n \"\"\"\n\n valid_count: int = 0\n \"\"\"The number of valid records / lines that have been generated\n \"\"\"\n\n invalid_count: int = 0\n \"\"\"The number of invalid records / lines that were generated\n \"\"\"\n\n\ndef _threading_generation_callback(\n counter: _FactoryCounter, callback: _GenerationCallback, event: threading.Event\n):\n while not event.is_set():\n try:\n callback.update_progress(\n counter.num_lines, counter.valid_count, counter.invalid_count\n )\n except Exception:\n event.set()\n break\n time.sleep(1)\n\n\nclass RecordFactory:\n \"\"\"A stateful factory that can be used to generate and validate entire\n records, regardless of the number of underlying header clusters that were\n used to build multiple sub-models.\n\n Instances of this class should be created by calling the appropiate method\n of the ``DataFrameBatch`` instance. This class should not have to\n be used directly. You should be able to create an instance like so::\n\n factory = batcher.create_record_factory(num_lines=50)\n\n The class is init'd with default capacity and limits as specified\n by the ``num_lines`` and ``max_invalid`` attributes. At any time,\n you can inspect the state of the instance by doing::\n\n factory.summary\n\n The factory instance can be used one of two ways: buffered or unbuffered.\n\n For unbuffered mode, the entire instance can be used as an iterator to\n create synthetic records. Each record will be a dictionary.\n\n NOTE:\n All values in the generated dictionaries will be strings.\n\n The ``valid_count`` and ``invalid_count`` counters will update as\n records are generated.\n\n When creating the record factory, you may also provide an entire\n record validator::\n\n def validator(rec: dict):\n ...\n\n factory = batcher.create_record_factory(num_lines=50, validator=validator)\n\n Each generated record dict will be passed to the validator. This validator may either\n return False or raise an exception to mark a record as invalid.\n\n At any point, you may reset the state of the factory by calling::\n\n factory.reset()\n\n This will reset all counters and allow you to keep generating records.\n\n Finally, you can generate records in buffered mode, where generated records\n will be buffered in memory and returned as one collection. By default, a list\n of dicts will be returned::\n\n factory.generate_all()\n\n You may request the records to be returned as a DataFrame. The dtypes will\n be inferred as if you were reading the data from a CSV::\n\n factory.generate_all(output=\"df\")\n\n NOTE:\n When using ``generate_all``, the factory states will be reset automatically.\n \"\"\"\n\n validator: Callable\n \"\"\"An optional callable that will receive a fully constructed record for one\n final validation before returning or yielding a single record. Records that\n do not pass this validation will also increment the ``invalid_count.``\n \"\"\"\n\n _batches: Dict[int, Batch]\n _header_list: List[str]\n _seed_fields: Union[str, List[str]]\n _record_generator: IteratorType[dict]\n _delimiter: str\n _parallelism: int\n _counter = _FactoryCounter\n _invalid_cache_size: int\n _thread_event: threading.Event = None\n\n invalid_cache: List[dict]\n\n def __init__(\n self,\n *,\n num_lines: int,\n batches: dict,\n header_list: list,\n delimiter: str,\n seed_fields: Union[dict, list] = None,\n max_invalid=MAX_INVALID,\n validator: Optional[Callable] = None,\n parallelism: int = 4,\n invalid_cache_size: int = 100,\n ):\n self._counter = _FactoryCounter()\n self._counter.num_lines = num_lines\n self.max_invalid = max_invalid\n self._batches = batches\n self._header_list = header_list\n self._seed_fields = seed_fields\n self._delimiter = delimiter\n self._parallelism = parallelism\n self.validator = validator\n self._invalid_cache_size = invalid_cache_size\n self.reset()\n\n if self._seed_fields is not None:\n self._seed_fields = _validate_batch_seed_values(\n self._batches[0], self._seed_fields\n )\n\n if isinstance(self._seed_fields, list):\n logger.info(\n \"Adjusting num_lines and parallelism because seed_fields is a list, will only target %d lines\",\n len(self._seed_fields),\n ) # noqa\n self._parallelism = 1\n self._counter.num_lines = len(self._seed_fields)\n\n def _cache_invalid(self, line: GenText):\n self.invalid_cache.append(line.as_dict())\n self.invalid_cache = self.invalid_cache[: self._invalid_cache_size]\n\n def _get_record(self) -> IteratorType[dict]:\n # our actual batch line generators\n generators = []\n\n # if we have a list of seed fields, we do special\n # handling to create the proper generator\n seed_generator = None # assume no seeds to start\n if isinstance(self._seed_fields, list):\n seed_generator = SeedingGenerator(\n self._batches[0].config,\n seed_list=self._seed_fields,\n line_validator=self._batches[0].get_validator(),\n max_invalid=self.max_invalid * 10000,\n )\n generators.append((self._batches[0], seed_generator))\n\n for idx, batch in self._batches.items():\n start_string = None\n if idx == 0 and seed_generator:\n # We've already added the first batch's generator to the list\n # so we just continue on to the next one\n continue\n if idx == 0:\n # In the event we have seeds that aren't a list, (i.e. static seeds)\n start_string = self._seed_fields\n generators.append(\n (\n batch,\n # We seed the low level API with much higher limits on\n # valid / invalid generation because we will enforce\n # those limits in this high level instance.\n generate_text(\n batch.config,\n line_validator=batch.get_validator(),\n max_invalid=self.max_invalid * 10000,\n num_lines=self._counter.num_lines * 10000,\n start_string=start_string,\n parallelism=self._parallelism,\n ),\n )\n )\n\n # At this point, we've created our list of generators. Below here\n # is what gets run on every next() call, which tries to construct\n # a full record from all the underlying batches.\n\n # keep looping as long as our target line count is less than\n # our total line count\n while self._counter.valid_count < self._counter.num_lines:\n # loop over each batch line generater and attempt\n # to construct a full line, we'll only count a\n # full line once we get through each generator\n\n # if we are using a watchdog thread to monitor generation\n # and it throws an exception, a threading event will be set\n # that signals generation should stop\n if self._thread_event and self._thread_event.is_set():\n break\n\n if self._counter.invalid_count >= self.max_invalid:\n raise RuntimeError(\"Invalid record count exceeded during generation\")\n\n seed_cache = None\n if seed_generator:\n # If we're using a seeding generator (from a list of seeds)\n # we cache the next seed we are about to use to generate\n # the next record.\n seed_cache = seed_generator.settings.start_string[0]\n\n record = {}\n batch: Batch\n for batch, gen in generators:\n while True:\n\n # see above usage for watchdog thread exception handling\n if self._thread_event and self._thread_event.is_set():\n break\n\n line = next(gen) # type: GenText\n if line.valid is False:\n self._cache_invalid(line)\n self._counter.invalid_count += 1\n if self._counter.invalid_count > self.max_invalid:\n raise RuntimeError(\n \"Invalid record count exceeded during generation\"\n )\n continue\n partial_rec = dict(\n zip_longest(batch.headers, line.values_as_list(), fillvalue=\"\")\n )\n record.update(partial_rec)\n break\n\n # Do a final validation, if configured, on the fully constructed\n # record, if this validation fails, we'll still increment our\n # invalid count.\n\n valid = True # assume we have a valid record\n\n if self.validator is not None:\n try:\n _valid = self.validator(record)\n if _valid is False:\n valid = False\n except Exception:\n valid = False\n\n if not valid:\n self._counter.invalid_count += 1\n if seed_cache:\n seed_generator.settings.start_string.insert(0, seed_cache)\n continue # back to the while start\n\n self._counter.valid_count += 1\n yield record\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self._record_generator)\n\n def reset(self):\n self._counter.valid_count = 0\n self._counter.invalid_count = 0\n self._record_generator = self._get_record()\n self._thread_event = None\n self.invalid_cache = []\n\n def generate_all(\n self,\n output: Optional[str] = None,\n callback: Optional[callable] = None,\n callback_interval: int = 30,\n callback_threading: bool = False,\n ):\n \"\"\"Attempt to generate the full number of records that was set when\n creating the ``RecordFactory.`` This method will create a buffer\n that holds all records and then returns the the buffer once\n generation is complete.\n\n Args:\n output: How the records should be returned. If ``None``, which is the\n default, then a list of record dicts will be returned. Other options\n that are supported are: 'df' for a DataFrame.\n callback: An optional callable that will periodically be called with\n a ``GenerationProgress`` instance as the single argument while\n records are being generated.\n callback_interval: If using a callback, the minimum number of seconds that\n should occur between callbacks.\n callback_threading: If enabled, a watchdog thread will be used to execute\n the callback. This will ensure that the callback is called regardless\n of invalid or valid counts. If callback threading is disabled, the callback\n will only be called after valid records are generated. If the callback\n raises and exception, then a threading event will be set which will trigger\n the stopping of generation.\n\n Returns:\n Generated records in an object that is dependent on the ``output`` param. By default\n this will be a list of dicts.\n \"\"\"\n progress_callback = None\n if callback:\n progress_callback = _GenerationCallback(callback, callback_interval)\n\n self.reset()\n if output is not None and output not in (\"df\",):\n raise ValueError(\"invalid output type\")\n\n _iter = tqdm(self._record_generator, total=self._counter.num_lines)\n\n buffer = None # type: _BufferedRecords\n\n if output == \"df\":\n buffer = _BufferedDataFrame(self._delimiter, self._header_list)\n\n if not buffer:\n buffer = _BufferedDicts()\n\n callback_thread = None\n if callback_threading:\n if not progress_callback:\n raise ValueError(\n \"Cannot use callback_threading without a progress callback\"\n )\n self._thread_event = threading.Event()\n callback_thread = threading.Thread(\n target=_threading_generation_callback,\n args=(self._counter, progress_callback, self._thread_event),\n )\n callback_thread.start()\n\n try:\n for rec in _iter:\n # NOTE: This iterator will block while no records are being\n # succesfully generated. If callbacks need to occur in this\n # situation, ensure the callback threading option is enabled\n #\n # If threading is enabled, and the callback encounters an exception,\n # a threading event will be set and the generator will break out of its\n # loop and generation will cease.\n buffer.add(rec)\n\n if progress_callback and not callback_threading:\n progress_callback.update_progress(\n self._counter.num_lines,\n self._counter.valid_count,\n self._counter.invalid_count,\n )\n\n except (RuntimeError, StopIteration) as err:\n logger.warning(\n f\"Runtime error on iteration, returning current buffer, {str(err)}\"\n )\n finally:\n if callback_threading:\n self._thread_event.set()\n callback_thread.join()\n\n # send final progress update\n if progress_callback:\n progress_callback.update_progress(\n self._counter.num_lines,\n self._counter.valid_count,\n self._counter.invalid_count,\n force_update=True,\n )\n\n out_records = buffer.get_records()\n buffer.cleanup()\n return out_records\n\n @property\n def summary(self):\n return {\n \"num_lines\": self._counter.num_lines,\n \"max_invalid\": self._counter.max_invalid,\n \"valid_count\": self._counter.valid_count,\n \"invalid_count\": self._counter.invalid_count,\n }\n\n\nclass DataFrameBatch:\n \"\"\"Create a multi-batch trainer / generator. When created, the directory\n structure to store models and training data will automatically be created.\n The directory structure will be created under the \"checkpoint_dir\" location\n provided in the ``config`` template. There will be one directory per batch,\n where each directory will be called \"batch_N\" where N is the batch number, starting\n from 0.\n\n Training and generating can happen per-batch or we can loop over all batches to\n do both train / generation functions.\n\n Example:\n When creating this object, you must explicitly create the training data\n from the input DataFrame before training models::\n\n my_batch = DataFrameBatch(df=my_df, config=my_config)\n my_batch.create_training_data()\n my_batch.train_all_batches()\n\n Args:\n df: The input, source DataFrame\n batch_size: If ``batch_headers`` is not provided we automatically break up\n the number of columns in the source DataFrame into batches of N columns.\n batch_headers: A list of lists of strings can be provided which will control\n the number of batches. The number of inner lists is the number of batches, and each\n inner list represents the columns that belong to that batch\n config: A template training config to use, this will be used as kwargs for each Batch's\n synthetic configuration. This may also be a sucblass of ``BaseConfig``. If this is used,\n you can set the ``input_data_path`` param to the constant ``PATH_HOLDER`` as it does not\n really matter\n tokenizer_class: An optional ``BaseTokenizerTrainer`` subclass. If not provided the default\n tokenizer will be used for the underlying ML engine.\n\n NOTE:\n When providing a config, the source of training data is not necessary, only the\n ``checkpoint_dir`` is needed. Each batch will control its input training data path\n after it creates the training dataset.\n \"\"\"\n\n batches: Dict[int, Batch]\n \"\"\"A mapping of ``Batch`` objects to a batch number. The batch number (key)\n increments from 0..N where N is the number of batches being used.\n \"\"\"\n\n batch_size: int\n \"\"\"The max number of columns allowed for a single DF batch\n \"\"\"\n\n # NOTE: Allowing a dict is for backwards compat\n config: Union[dict, BaseConfig]\n \"\"\"The template config that will be used for all batches. If a dict\n is provided we default to a TensorFlowConfig.\n \"\"\"\n\n mode: Union[WRITE, READ]\n\n master_header_list: List[str]\n \"\"\"During training, this is the original column order. When reading from\n disk, we concatenate all headers from all batches together. This list is not\n guaranteed to preserve the original header order.\n \"\"\"\n\n original_headers: List[str]\n \"\"\"Stores the original header list / order from the original training data that was used.\n This is written out to the model directory during training and loaded back in when\n using read-only mode.\n \"\"\"\n\n def __init__(\n self,\n *,\n df: pd.DataFrame = None,\n batch_size: int = BATCH_SIZE,\n batch_headers: List[List[str]] = None,\n config: Union[dict, BaseConfig] = None,\n tokenizer: BaseTokenizerTrainer = None,\n mode: str = WRITE,\n checkpoint_dir: str = None,\n ):\n\n if mode not in (WRITE, READ): # pragma: no cover\n raise ValueError(\"mode must be read or write\")\n\n self.mode = mode\n\n # If the config was a subclass of BaseConfig, then we convert\n # it to a dict and utilize that dict as our template. We do this\n # because when we re-create the batches we want to utilize the\n # Config constructors to set some attrs for us\n if isinstance(config, BaseConfig):\n config = config.as_dict()\n\n self.tokenizer = tokenizer\n\n self.original_headers = None\n\n if self.mode == READ:\n if isinstance(config, dict):\n _ckpoint_dir = config.get(\"checkpoint_dir\")\n else:\n _ckpoint_dir = checkpoint_dir\n\n if _ckpoint_dir is None:\n raise ValueError(\"checkpoint_dir required for read mode\")\n else:\n self._read_checkpoint_dir = _ckpoint_dir\n\n if self.mode == WRITE:\n if not config:\n raise ValueError(\"config is required!\")\n\n checkpoint_path = Path(config[CHECKPOINT_DIR])\n overwrite = config.get(\"overwrite\", False)\n if (\n not overwrite\n and checkpoint_path.is_dir()\n and any(checkpoint_path.iterdir())\n ):\n raise RuntimeError(\n \"checkpoint_dir already exists and is non-empty, set overwrite on config or remove model directory!\"\n ) # noqa\n\n if overwrite and checkpoint_path.is_dir():\n shutil.rmtree(checkpoint_path)\n\n if not isinstance(df, pd.DataFrame):\n raise ValueError(\"df must be a DataFrame in write mode\")\n\n if FIELD_DELIM not in config:\n raise ValueError(\"field_delimiter must be in config\")\n\n if GEN_LINES not in config:\n config[GEN_LINES] = df.shape[0]\n\n self._source_df = df\n self.batch_size = batch_size\n self.config = config\n self._source_df.fillna(\"\", inplace=True)\n self.master_header_list = list(self._source_df.columns)\n\n if not batch_headers:\n self.batch_headers = self._create_header_batches()\n else: # pragma: no cover\n self.batch_headers = batch_headers\n\n self.batches = _build_batch_dirs(\n self.config[\"checkpoint_dir\"], self.batch_headers, self.config\n )\n\n # Preserve the original order of the DF headers\n self.original_headers = list(self._source_df)\n with open(Path(self.config[CHECKPOINT_DIR]) / ORIG_HEADERS, \"w\") as fout:\n fout.write(json.dumps(list(self.original_headers)))\n else:\n self.batches = _crawl_checkpoint_for_batches(self._read_checkpoint_dir)\n self.master_header_list = []\n for batch in self.batches.values():\n self.master_header_list.extend(batch.headers)\n\n try:\n self.original_headers = json.loads(\n open(Path(self._read_checkpoint_dir) / ORIG_HEADERS).read()\n )\n except FileNotFoundError:\n self.original_headers = None\n\n logger.info(\"Validating underlying models exist via generation test...\")\n try:\n self.generate_all_batch_lines(parallelism=1, num_lines=1)\n except Exception as err:\n raise RuntimeError(\n \"Error testing generation during model load\"\n ) from err\n\n def _create_header_batches(self):\n num_batches = ceil(len(self._source_df.columns) / self.batch_size)\n tmp = np.array_split(list(self._source_df.columns), num_batches)\n return [list(row) for row in tmp]\n\n def create_training_data(self):\n \"\"\"Split the original DataFrame into N smaller DataFrames. Each\n smaller DataFrame will have the same number of rows, but a subset\n of the columns from the original DataFrame.\n\n This method iterates over each ``Batch`` object and assigns\n a smaller training DataFrame to the ``training_df`` attribute\n of the object.\n\n Finally, a training CSV is written to disk in the specific\n batch directory\n \"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n for i, batch in self.batches.items():\n logger.info(f\"Generating training DF and CSV for batch {i}\")\n out_df = self._source_df[batch.headers]\n batch.training_df = out_df.copy(deep=True)\n out_df.to_csv(\n batch.input_data_path,\n header=False,\n index=False,\n sep=self.config[FIELD_DELIM],\n )\n\n def train_batch(self, batch_idx: int):\n \"\"\"Train a model for a single batch. All model information will\n be written into that batch's directory.\n\n Args:\n batch_idx: The index of the batch, from the ``batches`` dictionary\n \"\"\"\n if self.tokenizer is not None:\n _tokenizer = deepcopy(self.tokenizer)\n _tokenizer.config = self.batches[batch_idx].config\n else:\n _tokenizer = None\n\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n try:\n train(self.batches[batch_idx].config, _tokenizer)\n except KeyError:\n raise ValueError(\"batch_idx is invalid\")\n\n def train_all_batches(self):\n \"\"\"Train a model for each batch.\"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n for idx in self.batches.keys():\n self.train_batch(idx)\n\n def set_batch_validator(self, batch_idx: int, validator: Callable):\n \"\"\"Set a validator for a specific batch. If a validator is configured\n for a batch, each generated record from that batch will be sent\n to the validator.\n\n Args:\n batch_idx: The batch number .\n validator: A callable that should take exactly one argument,\n which will be the raw line generated from the ``generate_text``\n function.\n \"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n if not callable(validator):\n raise ValueError(\"validator must be callable!\")\n try:\n self.batches[batch_idx].set_validator(validator)\n except KeyError:\n raise ValueError(\"invalid batch number!\")\n\n def generate_batch_lines(\n self,\n batch_idx: int,\n max_invalid=MAX_INVALID,\n raise_on_exceed_invalid: bool = False,\n num_lines: int = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallelism: int = 0,\n ) -> GenerationSummary:\n \"\"\"Generate lines for a single batch. Lines generated are added\n to the underlying ``Batch`` object for each batch. The lines\n can be accessed after generation and re-assembled into a DataFrame.\n\n Args:\n batch_idx: The batch number\n max_invalid: The max number of invalid lines that can be generated, if\n this is exceeded, generation will stop\n raise_on_exceed_invalid: If true and if the number of lines generated exceeds the ``max_invalid``\n amount, we will re-raise the error thrown by the generation module which will interrupt\n the running process. Otherwise, we will not raise the caught exception and just return ``False``\n indicating that the batch failed to generate all lines.\n num_lines: The number of lines to generate, if ``None``, then we use the number from the\n batch's config\n seed_fields: A dictionary that maps field/column names to initial seed values for those columns. This seed\n will only apply to the first batch that gets trained and generated. Additionally, the fields provided\n in the mapping MUST exist at the front of the first batch.\n\n NOTE:\n This param may also be a list of dicts. If this is the case, then ``num_lines`` will automatically\n be set to the list length downstream, and a 1:1 ratio will be used for generating valid lines for\n each prefix.\n parallelism: The number of concurrent workers to use. ``1`` (the default) disables parallelization,\n while a non-positive value means \"number of CPUs + x\" (i.e., use ``0`` for using as many workers\n as there are CPUs). A floating-point value is interpreted as a fraction of the available CPUs,\n rounded down.\n \"\"\"\n try:\n batch = self.batches[batch_idx]\n except KeyError: # pragma: no cover\n raise ValueError(\"invalid batch index\")\n\n seed_string = None\n\n # If we are on batch 0 and we have seed values, we want to validate that\n # the seed values line up properly with the first N columns.\n if batch_idx == 0 and seed_fields is not None:\n seed_string = _validate_batch_seed_values(batch, seed_fields)\n\n batch: Batch\n batch.reset_gen_data()\n validator = batch.get_validator()\n if num_lines is None:\n num_lines = batch.config.gen_lines\n\n if isinstance(seed_fields, list):\n num_lines = len(seed_fields)\n\n t = tqdm(total=num_lines, desc=\"Valid record count \")\n t2 = tqdm(total=max_invalid, desc=\"Invalid record count \")\n line: GenText\n summary = GenerationSummary()\n try:\n for line in generate_text(\n batch.config,\n line_validator=validator,\n max_invalid=max_invalid,\n num_lines=num_lines,\n start_string=seed_string,\n parallelism=parallelism,\n ):\n if line.valid is None or line.valid is True:\n batch.add_valid_data(line)\n t.update(1)\n summary.valid_lines += 1\n else:\n t2.update(1)\n batch.gen_data_invalid.append(line)\n summary.invalid_lines += 1\n except TooManyInvalidError:\n if raise_on_exceed_invalid:\n raise\n else:\n return summary\n t.close()\n t2.close()\n summary.is_valid = batch.gen_data_count >= num_lines\n return summary\n\n def create_record_factory(\n self,\n *,\n num_lines: int,\n max_invalid: int = MAX_INVALID,\n validator: Callable = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallellism: int = 4,\n **kwargs,\n ) -> RecordFactory:\n if validator is not None:\n if not callable(validator):\n raise ValueError(\"validator must be callable\")\n return RecordFactory(\n num_lines=num_lines,\n batches=self.batches,\n delimiter=self.batches[0].config.field_delimiter,\n header_list=self.original_headers or self.master_header_list,\n seed_fields=seed_fields,\n max_invalid=max_invalid,\n validator=validator,\n parallelism=parallellism,\n **kwargs,\n )\n\n def generate_all_batch_lines(\n self,\n max_invalid=MAX_INVALID,\n raise_on_failed_batch: bool = False,\n num_lines: int = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallelism: int = 0,\n ) -> Dict[int, GenerationSummary]:\n \"\"\"Generate synthetic lines for all batches. Lines for each batch\n are added to the individual ``Batch`` objects. Once generateion is\n done, you may re-assemble the dataset into a DataFrame.\n\n Example::\n\n my_batch.generate_all_batch_lines()\n # Wait for all generation to complete\n synthetic_df = my_batch.batches_to_df()\n\n Args:\n max_invalid: The number of invalid lines, per batch. If this number\n is exceeded for any batch, generation will stop.\n raise_on_failed_batch: If True, then an exception will be raised if any single batch\n fails to generate the requested number of lines. If False, then the failed batch\n will be set to ``False`` in the result dictionary from this method.\n num_lines: The number of lines to create from each batch. If ``None`` then the value\n from the config template will be used.\n\n NOTE:\n Will be overridden / ignored if ``seed_fields`` is a list. Will be set to the len of the list.\n seed_fields: A dictionary that maps field/column names to initial seed values for those columns. This seed\n will only apply to the first batch that gets trained and generated. Additionally, the fields provided\n in the mapping MUST exist at the front of the first batch.\n\n NOTE:\n This param may also be a list of dicts. If this is the case, then ``num_lines`` will automatically\n be set to the list length downstream, and a 1:1 ratio will be used for generating valid lines for\n each prefix.\n parallelism: The number of concurrent workers to use. ``1`` (the default) disables parallelization,\n while a non-positive value means \"number of CPUs + x\" (i.e., use ``0`` for using as many workers\n as there are CPUs). A floating-point value is interpreted as a fraction of the available CPUs,\n rounded down.\n\n Returns:\n A dictionary of batch number to a dictionary that reports the number of valid, invalid lines and bool value\n that shows if each batch was able to generate the full number of requested lines::\n\n {\n 0: GenerationSummary(valid_lines=1000, invalid_lines=10, is_valid=True),\n 1: GenerationSummary(valid_lines=500, invalid_lines=5, is_valid=True)\n }\n \"\"\"\n batch_status = {}\n for idx in self.batches.keys():\n batch_status[idx] = self.generate_batch_lines(\n idx,\n max_invalid=max_invalid,\n raise_on_exceed_invalid=raise_on_failed_batch,\n num_lines=num_lines,\n seed_fields=seed_fields,\n parallelism=parallelism,\n )\n return batch_status\n\n def batch_to_df(self, batch_idx: int) -> pd.DataFrame: # pragma: no cover\n \"\"\"Extract a synthetic data DataFrame from a single batch.\n\n Args:\n batch_idx: The batch number\n\n Returns:\n A DataFrame with synthetic data\n \"\"\"\n try:\n return self.batches[batch_idx].synthetic_df\n except KeyError:\n raise ValueError(\"batch_idx is invalid!\")\n\n def batches_to_df(self) -> pd.DataFrame:\n \"\"\"Convert all batches to a single synthetic data DataFrame.\n\n Returns:\n A single DataFrame that is the concatenation of all the\n batch DataFrames.\n \"\"\"\n batch_iter = iter(self.batches.values())\n base_batch = next(batch_iter)\n accum_df = base_batch.synthetic_df\n\n for batch in batch_iter:\n accum_df = pd.concat([accum_df, batch.synthetic_df], axis=1)\n\n return accum_df[self.original_headers or self.master_header_list]\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
weiwei1115/models
[ "14c3209118b2cadcce9a8f66b760c9cddb3a02ad" ]
[ "PaddleNLP/examples/text_generation/vae-seq2seq/model.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport paddle.nn.initializer as I\n\n\nclass CrossEntropyWithKL(nn.Layer):\n \"\"\"\n backward_loss = kl_loss * kl_weight + cross_entropy_loss\n \"\"\"\n\n def __init__(self, base_kl_weight, anneal_r):\n super(CrossEntropyWithKL, self).__init__()\n self.kl_weight = base_kl_weight\n self.anneal_r = anneal_r\n self.loss = 0.0\n self.kl_loss = 0.0\n self.rec_loss = 0.0\n\n def update_kl_weight(self):\n self.kl_weight = min(1.0, self.kl_weight + self.anneal_r)\n\n def forward(self, kl_loss, dec_output, trg_mask, label):\n self.update_kl_weight()\n self.kl_loss = kl_loss\n\n rec_loss = F.softmax_with_cross_entropy(\n logits=dec_output, label=label, soft_label=False)\n\n rec_loss = paddle.squeeze(rec_loss, axis=[2])\n rec_loss = rec_loss * trg_mask\n rec_loss = paddle.mean(rec_loss, axis=[0])\n rec_loss = paddle.sum(rec_loss)\n self.rec_loss = rec_loss\n\n self.loss = self.kl_loss * self.kl_weight + self.rec_loss\n return self.loss\n\n\nclass Perplexity(paddle.metric.Metric):\n def __init__(self, name='ppl', reset_freq=100, *args, **kwargs):\n self.cross_entropy = kwargs.pop('loss')\n super(Perplexity, self).__init__(*args, **kwargs)\n self._name = name\n self.total_ce = 0\n self.word_count = 0\n self.reset_freq = reset_freq\n self.batch_size = 0\n\n def update(self, kl_loss, dec_output, trg_mask, label, *args):\n # Perplexity is calculated using cross entropy\n self.batch_size = dec_output.shape[0]\n loss = self.cross_entropy.loss.numpy()\n self.total_ce += loss[0] * self.batch_size\n self.word_count += np.sum(trg_mask)\n\n def reset(self):\n self.total_ce = 0\n self.word_count = 0\n\n def accumulate(self):\n return np.exp(self.total_ce / self.word_count)\n\n def name(self):\n return self._name\n\n\nclass NegativeLogLoss(paddle.metric.Metric):\n def __init__(self, name='nll', reset_freq=100, *args, **kwargs):\n self.cross_entropy = kwargs.pop('loss')\n super(NegativeLogLoss, self).__init__(*args, **kwargs)\n self._name = name\n self.total_ce = 0\n self.batch_count = 0\n self.reset_freq = reset_freq\n self.batch_size = 0\n self.sample_count = 0\n\n def update(self, kl_loss, dec_output, trg_mask, label, *args):\n self.batch_size = dec_output.shape[0]\n loss = self.cross_entropy.loss.numpy()\n self.total_ce += loss[0] * self.batch_size\n self.sample_count += self.batch_size\n\n def reset(self):\n self.total_ce = 0\n self.sample_count = 0\n\n def accumulate(self):\n return (self.total_ce / self.sample_count)\n\n def name(self):\n return self._name\n\n\nclass TrainCallback(paddle.callbacks.ProgBarLogger):\n def __init__(self, ppl, nll, log_freq=200, verbose=2):\n super(TrainCallback, self).__init__(log_freq, verbose)\n self.ppl = ppl\n self.nll = nll\n\n def on_train_begin(self, logs=None):\n super(TrainCallback, self).on_train_begin(logs)\n self.train_metrics = [\"loss\", \"ppl\", \"nll\", \"kl weight\", \"kl loss\", \"rec loss\"]\n\n def on_epoch_begin(self, epoch=None, logs=None):\n super(TrainCallback, self).on_epoch_begin(epoch, logs)\n self.ppl.reset()\n self.nll.reset()\n\n def on_train_batch_end(self, step, logs=None):\n # loss and kl weight are not accumulated\n logs[\"kl weight\"] = self.ppl.cross_entropy.kl_weight\n logs[\"kl loss\"] = self.ppl.cross_entropy.kl_loss.numpy()[0]\n logs[\"rec loss\"] = self.ppl.cross_entropy.rec_loss.numpy()[0]\n super(TrainCallback, self).on_train_batch_end(step, logs)\n\n def on_eval_begin(self, logs=None):\n super(TrainCallback, self).on_eval_begin(logs)\n self.eval_metrics = [\"loss\", \"ppl\", \"nll\"]\n\n def on_eval_batch_end(self, step, logs=None):\n super(TrainCallback, self).on_eval_batch_end(step, logs)\n\n\nclass LSTMEncoder(nn.Layer):\n def __init__(self,\n vocab_size,\n embed_dim,\n hidden_size,\n num_layers,\n init_scale=0.1,\n enc_dropout=0.):\n super(LSTMEncoder, self).__init__()\n self.src_embedder = nn.Embedding(\n vocab_size,\n embed_dim,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n self.lstm = nn.LSTM(\n input_size=embed_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=enc_dropout)\n if enc_dropout > 0.0:\n self.dropout = nn.Dropout(enc_dropout)\n else:\n self.dropout = None\n\n def forward(self, src, src_length):\n src_emb = self.src_embedder(src)\n\n if self.dropout:\n src_emb = self.dropout(src_emb)\n enc_output, enc_final_state = self.lstm(\n src_emb, sequence_length=src_length)\n if self.dropout:\n enc_output = self.dropout(enc_output)\n\n enc_final_state = [\n [h, c] for h, c in zip(enc_final_state[0], enc_final_state[1])\n ]\n return enc_output, enc_final_state\n\n\nclass LSTMDecoderCell(nn.Layer):\n def __init__(self,\n num_layers,\n embed_dim,\n hidden_size,\n latent_size,\n dropout=None):\n super(LSTMDecoderCell, self).__init__()\n self.dropout = dropout\n self.lstm_cells = nn.LayerList([\n nn.LSTMCell(\n input_size=embed_dim + latent_size, hidden_size=hidden_size)\n for i in range(num_layers)\n ])\n\n def forward(self, step_input, lstm_states, latent_z):\n new_lstm_states = []\n step_input = paddle.concat([step_input, latent_z], 1)\n for i, lstm_cell in enumerate(self.lstm_cells):\n out, new_lstm_state = lstm_cell(step_input, lstm_states[i])\n if self.dropout:\n step_input = self.dropout(out)\n else:\n step_input = out\n new_lstm_states.append(new_lstm_state)\n if self.dropout:\n step_input = self.dropout(step_input)\n out = step_input\n return out, new_lstm_states\n\n\nclass LSTMDecoder(nn.Layer):\n def __init__(self,\n vocab_size,\n embed_dim,\n hidden_size,\n latent_size,\n num_layers,\n init_scale=0.1,\n dec_dropout=0.):\n super(LSTMDecoder, self).__init__()\n self.num_layers = num_layers\n self.embed_dim = embed_dim\n self.hidden_size = hidden_size\n self.latent_size = latent_size\n self.trg_embedder = nn.Embedding(\n vocab_size,\n embed_dim,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n self.output_fc = nn.Linear(\n hidden_size,\n vocab_size,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n if dec_dropout > 0.0:\n self.dropout = nn.Dropout(dec_dropout)\n else:\n self.dropout = None\n\n self.lstm = nn.RNN(\n LSTMDecoderCell(self.num_layers, self.embed_dim, self.hidden_size,\n self.latent_size, self.dropout))\n\n def forward(self, trg, dec_initial_states, latent_z):\n trg_emb = self.trg_embedder(trg)\n if self.dropout:\n trg_emb = self.dropout(trg_emb)\n lstm_output, _ = self.lstm(\n inputs=trg_emb,\n initial_states=dec_initial_states,\n latent_z=latent_z)\n dec_output = self.output_fc(lstm_output)\n return dec_output\n\n\nclass VAESeq2SeqModel(nn.Layer):\n def __init__(self,\n embed_dim,\n hidden_size,\n latent_size,\n vocab_size,\n num_layers=1,\n init_scale=0.1,\n PAD_ID=0,\n enc_dropout=0.,\n dec_dropout=0.):\n super(VAESeq2SeqModel, self).__init__()\n self.PAD_ID = PAD_ID\n self.latent_size = latent_size\n self.vocab_size = vocab_size\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.encoder = LSTMEncoder(vocab_size, embed_dim, hidden_size,\n num_layers, init_scale, enc_dropout)\n self.decoder = LSTMDecoder(vocab_size, embed_dim, hidden_size,\n latent_size, num_layers, init_scale,\n dec_dropout)\n self.distributed_fc = nn.Linear(\n hidden_size * 2,\n latent_size * 2,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n self.fc = nn.Linear(\n latent_size,\n 2 * hidden_size * num_layers,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n def sampling(self, z_mean, z_log_var):\n \"\"\"\n Reparameterization trick \n \"\"\"\n # By default, random_normal has mean=0 and std=1.0\n epsilon = paddle.normal(shape=(z_mean.shape[0], self.latent_size))\n epsilon.stop_gradient = True\n return z_mean + paddle.exp(0.5 * z_log_var) * epsilon\n\n def build_distribution(self, enc_final_state=None):\n enc_hidden = [\n paddle.concat(\n state, axis=-1) for state in enc_final_state\n ]\n\n enc_hidden = paddle.concat(enc_hidden, axis=-1)\n z_mean_log_var = self.distributed_fc(enc_hidden)\n z_mean, z_log_var = paddle.split(z_mean_log_var, 2, -1)\n return z_mean, z_log_var\n\n def calc_kl_dvg(self, means, logvars):\n \"\"\"\n Compute the KL divergence between Gaussian distribution\n \"\"\"\n kl_cost = -0.5 * (\n logvars - paddle.square(means) - paddle.exp(logvars) + 1.0)\n kl_cost = paddle.mean(kl_cost, 0)\n\n return paddle.sum(kl_cost)\n\n def forward(self, src, src_length, trg, trg_length):\n # Encoder\n _, enc_final_state = self.encoder(src, src_length)\n\n # Build distribution\n z_mean, z_log_var = self.build_distribution(enc_final_state)\n\n # Decoder\n latent_z = self.sampling(z_mean, z_log_var)\n\n dec_first_hidden_cell = self.fc(latent_z)\n dec_first_hidden, dec_first_cell = paddle.split(\n dec_first_hidden_cell, 2, axis=-1)\n if self.num_layers > 1:\n dec_first_hidden = paddle.split(dec_first_hidden, self.num_layers)\n dec_first_cell = paddle.split(dec_first_cell, self.num_layers)\n else:\n dec_first_hidden = [dec_first_hidden]\n dec_first_cell = [dec_first_cell]\n dec_initial_states = [[h, c]\n for h, c in zip(dec_first_hidden, dec_first_cell)]\n\n dec_output = self.decoder(trg, dec_initial_states, latent_z)\n\n kl_loss = self.calc_kl_dvg(z_mean, z_log_var)\n trg_mask = (self.PAD_ID != trg).astype(paddle.get_default_dtype())\n return kl_loss, dec_output, trg_mask\n\n\nclass VAESeq2SeqInferModel(VAESeq2SeqModel):\n def __init__(self,\n embed_dim,\n hidden_size,\n latent_size,\n vocab_size,\n start_token=1,\n end_token=2,\n beam_size=1,\n max_out_len=100):\n self.start_token = start_token\n self.end_token = end_token\n self.beam_size = beam_size\n self.max_out_len = max_out_len\n super(VAESeq2SeqInferModel, self).__init__(embed_dim, hidden_size,\n latent_size, vocab_size)\n\n def forward(self, trg):\n # Encoder\n latent_z = paddle.normal(shape=(trg.shape[0], self.latent_size))\n dec_first_hidden_cell = self.fc(latent_z)\n dec_first_hidden, dec_first_cell = paddle.split(\n dec_first_hidden_cell, 2, axis=-1)\n if self.num_layers > 1:\n dec_first_hidden = paddle.split(dec_first_hidden, self.num_layers)\n dec_first_cell = paddle.split(dec_first_cell, self.num_layers)\n else:\n dec_first_hidden = [dec_first_hidden]\n dec_first_cell = [dec_first_cell]\n dec_initial_states = [[h, c]\n for h, c in zip(dec_first_hidden, dec_first_cell)]\n\n output_fc = lambda x: F.one_hot(\n paddle.multinomial(\n F.softmax(paddle.squeeze(\n self.decoder.output_fc(x),[1]))),num_classes=self.vocab_size)\n\n latent_z = nn.BeamSearchDecoder.tile_beam_merge_with_batch(\n latent_z, self.beam_size)\n\n decoder = nn.BeamSearchDecoder(\n cell=self.decoder.lstm.cell,\n start_token=self.start_token,\n end_token=self.end_token,\n beam_size=self.beam_size,\n embedding_fn=self.decoder.trg_embedder,\n output_fn=output_fc)\n\n outputs, _ = nn.dynamic_decode(\n decoder,\n inits=dec_initial_states,\n max_step_num=self.max_out_len,\n latent_z=latent_z)\n return outputs\n" ]
[ [ "numpy.sum", "numpy.exp" ] ]
Kathryn-Downey/DeepMergeDomainAdaptation
[ "334331ce8871cda80590cd9ec671941a82fa859c" ]
[ "galaxy_merge_edits/grad_cam.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n#\n# Author: Kazuto Nakashima\n# URL: http://kazuto1011.github.io\n# Created: 2017-05-26\n\nfrom collections import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\n\nclass _BaseWrapper(object):\n def __init__(self, model):\n super(_BaseWrapper, self).__init__()\n self.device = next(model.parameters()).device\n self.model = model\n self.handlers = [] # a set of hook function handlers\n\n def _encode_one_hot(self, ids):\n one_hot = torch.zeros_like(self.logits).to(self.device)\n one_hot.scatter_(1, ids, 1.0)\n return one_hot\n\n def forward(self, image):\n self.image_shape = image.shape[2:]\n self.features, self.logits = self.model(image)\n self.probs = F.softmax(self.logits, dim=1)\n return self.probs.sort(dim=1, descending=True) # ordered results\n\n def backward(self, ids):\n \"\"\"\n Class-specific backpropagation\n \"\"\"\n one_hot = self._encode_one_hot(ids)\n self.model.zero_grad()\n self.logits.backward(gradient=one_hot, retain_graph=True)\n\n def generate(self):\n raise NotImplementedError\n\n def remove_hook(self):\n \"\"\"\n Remove all the forward/backward hook functions\n \"\"\"\n for handle in self.handlers:\n handle.remove()\n\n\nclass BackPropagation(_BaseWrapper):\n def forward(self, image):\n self.image = image.requires_grad_()\n return super(BackPropagation, self).forward(self.image)\n\n def generate(self):\n gradient = self.image.grad.clone()\n self.image.grad.zero_()\n return gradient\n\n\nclass GuidedBackPropagation(BackPropagation):\n \"\"\"\n \"Striving for Simplicity: the All Convolutional Net\"\n https://arxiv.org/pdf/1412.6806.pdf\n Look at Figure 1 on page 8.\n \"\"\"\n\n def __init__(self, model):\n super(GuidedBackPropagation, self).__init__(model)\n\n def backward_hook(module, grad_in, grad_out):\n # Cut off negative gradients\n if isinstance(module, nn.ReLU):\n return (F.relu(grad_in[0]),)\n\n for module in self.model.named_modules():\n self.handlers.append(module[1].register_backward_hook(backward_hook))\n\n\nclass Deconvnet(BackPropagation):\n \"\"\"\n \"Striving for Simplicity: the All Convolutional Net\"\n https://arxiv.org/pdf/1412.6806.pdf\n Look at Figure 1 on page 8.\n \"\"\"\n\n def __init__(self, model):\n super(Deconvnet, self).__init__(model)\n\n def backward_hook(module, grad_in, grad_out):\n # Cut off negative gradients and ignore ReLU\n if isinstance(module, nn.ReLU):\n return (F.relu(grad_out[0]),)\n\n for module in self.model.named_modules():\n self.handlers.append(module[1].register_backward_hook(backward_hook))\n\n\nclass GradCAM(_BaseWrapper):\n \"\"\"\n \"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization\"\n https://arxiv.org/pdf/1610.02391.pdf\n Look at Figure 2 on page 4\n \"\"\"\n\n def __init__(self, model, candidate_layers=None):\n super(GradCAM, self).__init__(model)\n self.fmap_pool = {}\n self.grad_pool = {}\n self.candidate_layers = candidate_layers # list\n\n def save_fmaps(key):\n def forward_hook(module, input, output):\n\n #print(output) #try 0 or 1?\n self.fmap_pool[key] = output[0].detach() #since it outputs a tuple, what do we do\n\n return forward_hook\n\n def save_grads(key):\n def backward_hook(module, grad_in, grad_out):\n self.grad_pool[key] = grad_out[0].detach()\n\n return backward_hook\n\n # If any candidates are not specified, the hook is registered to all the layers.\n for name, module in self.model.named_modules():\n if self.candidate_layers is None or name in self.candidate_layers:\n self.handlers.append(module.register_forward_hook(save_fmaps(name)))\n self.handlers.append(module.register_backward_hook(save_grads(name)))\n\n def _find(self, pool, target_layer):\n if target_layer in pool.keys():\n return pool[target_layer]\n else:\n raise ValueError(\"Invalid layer name: {}\".format(target_layer))\n\n def generate(self, target_layer):\n fmaps = self._find(self.fmap_pool, target_layer)\n grads = self._find(self.grad_pool, target_layer)\n weights = F.adaptive_avg_pool2d(grads, 1)\n\n gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)\n gcam = F.relu(gcam)\n gcam = F.interpolate(\n gcam, self.image_shape, mode=\"bilinear\", align_corners=False\n )\n\n B, C, H, W = gcam.shape\n gcam = gcam.view(B, -1)\n gcam -= gcam.min(dim=1, keepdim=True)[0]\n gcam /= gcam.max(dim=1, keepdim=True)[0]\n gcam = gcam.view(B, C, H, W)\n\n return gcam\n\n\ndef occlusion_sensitivity(\n model, images, ids, mean=None, patch=35, stride=1, n_batches=128\n):\n \"\"\"\n \"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization\"\n https://arxiv.org/pdf/1610.02391.pdf\n Look at Figure A5 on page 17\n\n Originally proposed in:\n \"Visualizing and Understanding Convolutional Networks\"\n https://arxiv.org/abs/1311.2901\n \"\"\"\n\n torch.set_grad_enabled(False)\n model.eval()\n mean = mean if mean else 0\n patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch)\n pad_H, pad_W = patch_H // 2, patch_W // 2\n\n # Padded image\n images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean)\n B, _, H, W = images.shape\n new_H = (H - patch_H) // stride + 1\n new_W = (W - patch_W) // stride + 1\n\n # Prepare sampling grids\n anchors = []\n grid_h = 0\n while grid_h <= H - patch_H:\n grid_w = 0\n while grid_w <= W - patch_W:\n grid_w += stride\n anchors.append((grid_h, grid_w))\n grid_h += stride\n\n # Baseline score without occlusion\n baseline = model(images).detach().gather(1, ids)\n\n # Compute per-pixel logits\n scoremaps = []\n for i in tqdm(range(0, len(anchors), n_batches), leave=False):\n batch_images = []\n batch_ids = []\n for grid_h, grid_w in anchors[i : i + n_batches]:\n images_ = images.clone()\n images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean\n batch_images.append(images_)\n batch_ids.append(ids)\n batch_images = torch.cat(batch_images, dim=0)\n batch_ids = torch.cat(batch_ids, dim=0)\n scores = model(batch_images).detach().gather(1, batch_ids)\n scoremaps += list(torch.split(scores, B))\n\n diffmaps = torch.cat(scoremaps, dim=1) - baseline\n diffmaps = diffmaps.view(B, new_H, new_W)\n\n return diffmaps\n" ]
[ [ "torch.split", "torch.set_grad_enabled", "torch.nn.functional.softmax", "torch.nn.functional.pad", "torch.zeros_like", "torch.nn.functional.relu", "torch.mul", "torch.nn.functional.adaptive_avg_pool2d", "torch.cat", "torch.nn.functional.interpolate" ] ]
enthought/numpy-refactor
[ "209866bc55eee56e92692307c4437af024bae87d" ]
[ "numpy/fft/tests/test_helper.py" ]
[ "#!/usr/bin/env python\n# Copied from fftpack.helper by Pearu Peterson, October 2005\n\"\"\" Test functions for fftpack.helper module\n\"\"\"\n\nfrom numpy.testing import *\nfrom numpy.fft import fftshift,ifftshift,fftfreq\n\nfrom numpy import pi\n\ndef random(size):\n return rand(*size)\n\nclass TestFFTShift(TestCase):\n def test_definition(self):\n x = [0,1,2,3,4,-4,-3,-2,-1]\n y = [-4,-3,-2,-1,0,1,2,3,4]\n assert_array_almost_equal(fftshift(x),y)\n assert_array_almost_equal(ifftshift(y),x)\n x = [0,1,2,3,4,-5,-4,-3,-2,-1]\n y = [-5,-4,-3,-2,-1,0,1,2,3,4]\n assert_array_almost_equal(fftshift(x),y)\n assert_array_almost_equal(ifftshift(y),x)\n\n def test_inverse(self):\n for n in [1,4,9,100,211]:\n x = random((n,))\n assert_array_almost_equal(ifftshift(fftshift(x)),x)\n\n\nclass TestFFTFreq(TestCase):\n def test_definition(self):\n x = [0,1,2,3,4,-4,-3,-2,-1]\n assert_array_almost_equal(9*fftfreq(9),x)\n assert_array_almost_equal(9*pi*fftfreq(9,pi),x)\n x = [0,1,2,3,4,-5,-4,-3,-2,-1]\n assert_array_almost_equal(10*fftfreq(10),x)\n assert_array_almost_equal(10*pi*fftfreq(10,pi),x)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.fft.fftshift", "numpy.fft.fftfreq", "numpy.fft.ifftshift" ] ]
chienerh/EPN_PointCloud
[ "d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30" ]
[ "SPConvNets/models/pointnet_epn_netvlad.py" ]
[ "\"\"\"\nCode taken from https://github.com/cattaneod/PointNetVlad-Pytorch/blob/master/models/PointNetVlad.py\n\"\"\"\n\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nimport math\nimport SPConvNets.models.pr_so3net_pn as pr_so3net_pn\nimport SPConvNets.utils as M\n\n\nclass STN3d(nn.Module):\n def __init__(self, num_points=2500, k=3, use_bn=True):\n super(STN3d, self).__init__()\n self.k = k\n self.kernel_size = 3 if k == 3 else 1\n self.channels = 1 if k == 3 else k\n self.num_points = num_points\n self.use_bn = use_bn\n self.conv1 = torch.nn.Conv2d(self.channels, 64, (1, self.kernel_size))\n self.conv2 = torch.nn.Conv2d(64, 128, (1,1))\n self.conv3 = torch.nn.Conv2d(128, 1024, (1,1))\n self.mp1 = torch.nn.MaxPool2d((num_points, 1), 1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, k*k)\n self.fc3.weight.data.zero_()\n self.fc3.bias.data.zero_()\n self.relu = nn.ReLU()\n\n if use_bn:\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(128)\n self.bn3 = nn.BatchNorm2d(1024)\n self.bn4 = nn.BatchNorm1d(512)\n self.bn5 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n batchsize = x.size()[0]\n if self.use_bn:\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n else:\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = self.mp1(x)\n x = x.view(-1, 1024)\n\n if self.use_bn:\n x = F.relu(self.bn4(self.fc1(x)))\n x = F.relu(self.bn5(self.fc2(x)))\n else:\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n iden = Variable(torch.from_numpy(np.eye(self.k).astype(np.float32))).view(\n 1, self.k*self.k).repeat(batchsize, 1)\n if x.is_cuda:\n iden = iden.cuda()\n x = x + iden\n x = x.view(-1, self.k, self.k)\n return x\n\n\nclass PointNetfeat(nn.Module):\n def __init__(self, num_points=2500, global_feat=True, feature_transform=False, max_pool=True):\n super(PointNetfeat, self).__init__()\n self.stn = STN3d(num_points=num_points, k=3, use_bn=False)\n self.feature_trans = STN3d(num_points=num_points, k=64, use_bn=False)\n self.apply_feature_trans = feature_transform\n self.conv1 = torch.nn.Conv2d(1, 64, (1, 3), stride=2)\n self.conv2 = torch.nn.Conv2d(64, 64, (1, 1), stride=2)\n self.conv3 = torch.nn.Conv2d(64, 64, (1, 1), stride=2)\n self.conv4 = torch.nn.Conv2d(64, 128, (1, 1), stride=2)\n self.conv5 = torch.nn.Conv2d(128, 1024, (1, 1), stride=1)\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(64)\n self.bn4 = nn.BatchNorm2d(128)\n self.bn5 = nn.BatchNorm2d(1024)\n self.mp1 = torch.nn.MaxPool2d((num_points, 1), 1)\n self.num_points = num_points\n self.global_feat = global_feat\n self.max_pool = max_pool\n\n def forward(self, x):\n '''\n INPUT: (22, 1, 4096, 3) [Bx(1+P+N+1), 1, N, D]\n OUTPUT: (22, 1024, 2096, 1) if not max pool\n '''\n batchsize = x.size()[0]\n trans = self.stn(x) # 22, 3, 3\n x = torch.matmul(torch.squeeze(x), trans) # 22, 4096, 3\n x = x.view(batchsize, 1, -1, 3) # 22, 1, 4096, 3\n x = F.relu(self.bn1(self.conv1(x))) # 22, 64, 4096, 1\n x = F.relu(self.bn2(self.conv2(x))) # 22, 64, 4096, 1\n pointfeat = x\n if self.apply_feature_trans:\n f_trans = self.feature_trans(x)\n x = torch.squeeze(x)\n if batchsize == 1:\n x = torch.unsqueeze(x, 0)\n x = torch.matmul(x.transpose(1, 2), f_trans)\n x = x.transpose(1, 2).contiguous()\n x = x.view(batchsize, 64, -1, 1)\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.relu(self.bn4(self.conv4(x)))\n x = self.bn5(self.conv5(x))\n if not self.max_pool:\n return x\n else:\n x = self.mp1(x)\n x = x.view(-1, 1024)\n if self.global_feat:\n return x, trans\n else:\n x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)\n return torch.cat([x, pointfeat], 1), trans\n\n\nclass PointNetEPN_NetVLAD(nn.Module):\n def __init__(self, opt):\n super(PointNetEPN_NetVLAD, self).__init__()\n self.opt = opt\n self.point_net = PointNetfeat(num_points=4096, global_feat=True,\n feature_transform=False, max_pool=False)\n mlps=[[64,64], [128,128]]\n out_mlps=[128, 1024]\n self.epn = pr_so3net_pn.build_model(self.opt, mlps=mlps, out_mlps=out_mlps)\n self.net_vlad = M.NetVLADLoupe(feature_size=1024, max_samples=2*self.opt.num_selected_points, cluster_size=64,\n output_dim=self.opt.global_feature_dim, gating=True, add_batch_norm=True,\n is_training=True)\n\n def forward(self, x):\n # print('x', x.shape)\n x_unsqueeze = x.unsqueeze(1)\n x_pointnet = self.point_net(x_unsqueeze) # Bx(1+P+N+1), LOCAL_DIM, N, 1\n # print('x_pointnet', x_pointnet.shape)\n x_pointnet = x_pointnet.transpose(1, 3).contiguous()\n x_pointnet = x_pointnet.view((-1, self.opt.num_selected_points, 1024))\n # print('x_pointnet', x_pointnet.shape)\n x_epn, _ = self.epn(x)\n # print('x_epn', x_epn.shape)\n x_frontend = torch.cat((x_pointnet, x_epn), 1) # Where to concatenate?\n # print('x_frontend', x_frontend.shape)\n x = self.net_vlad(x_frontend)\n return x, x_frontend\n\n\nclass PointNetVLAD_EPNNetVLAD(nn.Module):\n def __init__(self, opt):\n super(PointNetVLAD_EPNNetVLAD, self).__init__()\n self.opt = opt\n self.point_net = PointNetfeat(num_points=4096, global_feat=True,\n feature_transform=False, max_pool=False)\n self.net_vlad1 = M.NetVLADLoupe(feature_size=1024, max_samples=4096, cluster_size=64,\n output_dim=self.opt.global_feature_dim//2, gating=True, add_batch_norm=True,\n is_training=True)\n mlps=[[64,64], [128,128]]\n out_mlps=[128, 1024]\n self.epn = pr_so3net_pn.build_model(self.opt, mlps=mlps, out_mlps=out_mlps)\n self.net_vlad2 = M.NetVLADLoupe(feature_size=self.opt.model.output_num, max_samples=self.opt.num_selected_points, cluster_size=64,\n output_dim=self.opt.global_feature_dim//2, gating=True, add_batch_norm=True,\n is_training=True)\n\n def forward(self, x):\n # print('x input', x.shape)\n # PointNetVLAD\n x_unsqueeze = x.unsqueeze(1)\n x_pointnet = self.point_net(x_unsqueeze) # Bx(1+P+N+1), LOCAL_DIM, N, 1\n # print('x_pointnet', x_pointnet.shape)\n x_pointnet = x_pointnet.transpose(1, 3).contiguous()\n x_pointnet = x_pointnet.view((-1, 4096, 1024))\n # print('x_pointnet reshaped', x_pointnet.shape)\n x_pointnetvlad = self.net_vlad1(x_pointnet)\n # print('x_pointnetvlad', x_pointnetvlad.shape)\n # EPNNetVLAD\n x_epn, _ = self.epn(x)\n # print('x_epn', x_epn.shape)\n x_epnnetvlad = self.net_vlad2(x_epn)\n # print('x_epnnetvlad', x_epnnetvlad.shape)\n\n x_output = torch.cat((x_pointnetvlad, x_epnnetvlad), 1)\n x_frontend = torch.cat((x_pointnet, x_epn), 1)\n # print('x_output', x_output.shape)\n return x_output, x_frontend" ]
[ [ "torch.unsqueeze", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "numpy.eye", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.cat", "torch.squeeze" ] ]
XLPRUtils/pyUtils
[ "3a62c14b0658ad3c24d83f953ee0d88530b02b23" ]
[ "pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py" ]
[ "# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is refer from:\nhttps://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport paddle\nfrom paddle.nn import functional as F\n\nfrom pyxlpr.ppocr.postprocess.pse_postprocess.pse import pse\n\n\nclass PSEPostProcess(object):\n \"\"\"\n The post process for PSE.\n \"\"\"\n\n def __init__(self,\n thresh=0.5,\n box_thresh=0.85,\n min_area=16,\n box_type='box',\n scale=4,\n **kwargs):\n assert box_type in ['box', 'poly'], 'Only box and poly is supported'\n self.thresh = thresh\n self.box_thresh = box_thresh\n self.min_area = min_area\n self.box_type = box_type\n self.scale = scale\n\n def __call__(self, outs_dict, shape_list):\n pred = outs_dict['maps']\n if not isinstance(pred, paddle.Tensor):\n pred = paddle.to_tensor(pred)\n pred = F.interpolate(\n pred, scale_factor=4 // self.scale, mode='bilinear')\n\n score = F.sigmoid(pred[:, 0, :, :])\n\n kernels = (pred > self.thresh).astype('float32')\n text_mask = kernels[:, 0, :, :]\n kernels[:, 0:, :, :] = kernels[:, 0:, :, :] * text_mask\n\n score = score.numpy()\n kernels = kernels.numpy().astype(np.uint8)\n\n boxes_batch = []\n for batch_index in range(pred.shape[0]):\n boxes, scores = self.boxes_from_bitmap(score[batch_index],\n kernels[batch_index],\n shape_list[batch_index])\n\n boxes_batch.append({'points': boxes, 'scores': scores})\n return boxes_batch\n\n def boxes_from_bitmap(self, score, kernels, shape):\n label = pse(kernels, self.min_area)\n return self.generate_box(score, label, shape)\n\n def generate_box(self, score, label, shape):\n src_h, src_w, ratio_h, ratio_w = shape\n label_num = np.max(label) + 1\n\n boxes = []\n scores = []\n for i in range(1, label_num):\n ind = label == i\n points = np.array(np.where(ind)).transpose((1, 0))[:, ::-1]\n\n if points.shape[0] < self.min_area:\n label[ind] = 0\n continue\n\n score_i = np.mean(score[ind])\n if score_i < self.box_thresh:\n label[ind] = 0\n continue\n\n if self.box_type == 'box':\n rect = cv2.minAreaRect(points)\n bbox = cv2.boxPoints(rect)\n elif self.box_type == 'poly':\n box_height = np.max(points[:, 1]) + 10\n box_width = np.max(points[:, 0]) + 10\n\n mask = np.zeros((box_height, box_width), np.uint8)\n mask[points[:, 1], points[:, 0]] = 255\n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n bbox = np.squeeze(contours[0], 1)\n else:\n raise NotImplementedError\n\n bbox[:, 0] = np.clip(np.round(bbox[:, 0] / ratio_w), 0, src_w)\n bbox[:, 1] = np.clip(np.round(bbox[:, 1] / ratio_h), 0, src_h)\n boxes.append(bbox)\n scores.append(score_i)\n return boxes, scores\n" ]
[ [ "numpy.zeros", "numpy.squeeze", "numpy.max", "numpy.round", "numpy.where", "numpy.mean" ] ]
code-review-doctor/keras
[ "96130040540e1405ffe746ddf2b2cceb9b8b8f65" ]
[ "keras/api/tests/api_compatibility_test.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ==============================================================================\n\"\"\"Keras API compatibility tests.\n\nThis test ensures all changes to the public API of Keras are intended.\n\nIf this test fails, it means a change has been made to the public API. Backwards\nincompatible changes are not allowed. You can run the test with\n\"--update_goldens\" flag set to \"True\" to update goldens when making changes to\nthe public Keras python API.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport argparse\nimport os\nimport re\nimport sys\n\nimport six\n\nfrom google.protobuf import message\nfrom google.protobuf import text_format\n\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.tools.api.lib import api_objects_pb2\nfrom tensorflow.tools.api.lib import python_object_to_proto_visitor\nfrom tensorflow.tools.common import public_api\nfrom tensorflow.tools.common import traverse\n\n\n# FLAGS defined at the bottom:\nFLAGS = None\n# DEFINE_boolean, update_goldens, default False:\n_UPDATE_GOLDENS_HELP = \"\"\"\n Update stored golden files if API is updated. WARNING: All API changes\n have to be authorized by TensorFlow leads.\n\"\"\"\n\n# DEFINE_boolean, verbose_diffs, default True:\n_VERBOSE_DIFFS_HELP = \"\"\"\n If set to true, print line by line diffs on all libraries. If set to\n false, only print which libraries have differences.\n\"\"\"\n\n# Initialized with _InitPathConstants function below.\n_API_GOLDEN_FOLDER_V1 = None\n_API_GOLDEN_FOLDER_V2 = None\n\n\ndef _InitPathConstants():\n global _API_GOLDEN_FOLDER_V1\n global _API_GOLDEN_FOLDER_V2\n root_golden_path_v2 = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(),\n '..', 'golden', 'v2', 'tensorflow.keras.pbtxt')\n\n if FLAGS.update_goldens:\n root_golden_path_v2 = os.path.realpath(root_golden_path_v2)\n # Get API directories based on the root golden file. This way\n # we make sure to resolve symbolic links before creating new files.\n _API_GOLDEN_FOLDER_V2 = os.path.dirname(root_golden_path_v2)\n _API_GOLDEN_FOLDER_V1 = os.path.normpath(\n os.path.join(_API_GOLDEN_FOLDER_V2, '..', 'v1'))\n\n\n_TEST_README_FILE = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(), 'README.txt')\n_UPDATE_WARNING_FILE = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(),\n 'API_UPDATE_WARNING.txt')\n\n\ndef _KeyToFilePath(key, api_version):\n \"\"\"From a given key, construct a filepath.\n\n Filepath will be inside golden folder for api_version.\n\n Args:\n key: a string used to determine the file path\n api_version: a number indicating the tensorflow API version, e.g. 1 or 2.\n\n Returns:\n A string of file path to the pbtxt file which describes the public API\n \"\"\"\n\n def _ReplaceCapsWithDash(matchobj):\n match = matchobj.group(0)\n return '-%s' % (match.lower())\n\n case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash,\n six.ensure_str(key))\n api_folder = (\n _API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)\n return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)\n\n\ndef _FileNameToKey(filename):\n \"\"\"From a given filename, construct a key we use for api objects.\"\"\"\n\n def _ReplaceDashWithCaps(matchobj):\n match = matchobj.group(0)\n return match[1].upper()\n\n base_filename = os.path.basename(filename)\n base_filename_without_ext = os.path.splitext(base_filename)[0]\n api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,\n six.ensure_str(base_filename_without_ext))\n return api_object_key\n\n\ndef _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):\n \"\"\"A Visitor that crashes on subclasses of generated proto classes.\"\"\"\n # If the traversed object is a proto Message class\n if not (isinstance(parent, type) and issubclass(parent, message.Message)):\n return\n if parent is message.Message:\n return\n # Check that it is a direct subclass of Message.\n if message.Message not in parent.__bases__:\n raise NotImplementedError(\n 'Object tf.%s is a subclass of a generated proto Message. '\n 'They are not yet supported by the API tools.' % path)\n\n\ndef _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):\n \"\"\"Filter out golden proto dict symbols that should be omitted.\"\"\"\n if not omit_golden_symbols_map:\n return golden_proto_dict\n filtered_proto_dict = dict(golden_proto_dict)\n for key, symbol_list in six.iteritems(omit_golden_symbols_map):\n api_object = api_objects_pb2.TFAPIObject()\n api_object.CopyFrom(filtered_proto_dict[key])\n filtered_proto_dict[key] = api_object\n module_or_class = None\n if api_object.HasField('tf_module'):\n module_or_class = api_object.tf_module\n elif api_object.HasField('tf_class'):\n module_or_class = api_object.tf_class\n if module_or_class is not None:\n for members in (module_or_class.member, module_or_class.member_method):\n filtered_members = [m for m in members if m.name not in symbol_list]\n # Two steps because protobuf repeated fields disallow slice assignment.\n del members[:]\n members.extend(filtered_members)\n return filtered_proto_dict\n\n\nclass ApiCompatibilityTest(tf.test.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(ApiCompatibilityTest, self).__init__(*args, **kwargs)\n\n self._update_golden_warning = file_io.read_file_to_string(\n _UPDATE_WARNING_FILE)\n\n self._test_readme_message = file_io.read_file_to_string(_TEST_README_FILE)\n\n def _AssertProtoDictEquals(self,\n expected_dict,\n actual_dict,\n verbose=False,\n update_goldens=False,\n additional_missing_object_message='',\n api_version=2):\n \"\"\"Diff given dicts of protobufs and report differences a readable way.\n\n Args:\n expected_dict: a dict of TFAPIObject protos constructed from golden files.\n actual_dict: a ict of TFAPIObject protos constructed by reading from the\n TF package linked to the test.\n verbose: Whether to log the full diffs, or simply report which files were\n different.\n update_goldens: Whether to update goldens when there are diffs found.\n additional_missing_object_message: Message to print when a symbol is\n missing.\n api_version: TensorFlow API version to test.\n \"\"\"\n diffs = []\n verbose_diffs = []\n\n expected_keys = set(expected_dict.keys())\n actual_keys = set(actual_dict.keys())\n only_in_expected = expected_keys - actual_keys\n only_in_actual = actual_keys - expected_keys\n all_keys = expected_keys | actual_keys\n\n # This will be populated below.\n updated_keys = []\n\n for key in all_keys:\n diff_message = ''\n verbose_diff_message = ''\n # First check if the key is not found in one or the other.\n if key in only_in_expected:\n diff_message = 'Object %s expected but not found (removed). %s' % (\n key, additional_missing_object_message)\n verbose_diff_message = diff_message\n elif key in only_in_actual:\n diff_message = 'New object %s found (added).' % key\n verbose_diff_message = diff_message\n else:\n # Do not truncate diff\n self.maxDiff = None # pylint: disable=invalid-name\n # Now we can run an actual proto diff.\n try:\n self.assertProtoEquals(expected_dict[key], actual_dict[key])\n except AssertionError as e:\n updated_keys.append(key)\n diff_message = 'Change detected in python object: %s.' % key\n verbose_diff_message = str(e)\n\n # All difference cases covered above. If any difference found, add to the\n # list.\n if diff_message:\n diffs.append(diff_message)\n verbose_diffs.append(verbose_diff_message)\n\n # If diffs are found, handle them based on flags.\n if diffs:\n diff_count = len(diffs)\n logging.error(self._test_readme_message)\n logging.error('%d differences found between API and golden.', diff_count)\n\n if update_goldens:\n # Write files if requested.\n logging.warning(self._update_golden_warning)\n\n # If the keys are only in expected, some objects are deleted.\n # Remove files.\n for key in only_in_expected:\n filepath = _KeyToFilePath(key, api_version)\n tf.io.gfile.remove(filepath)\n\n # If the files are only in actual (current library), these are new\n # modules. Write them to files. Also record all updates in files.\n for key in only_in_actual | set(updated_keys):\n filepath = _KeyToFilePath(key, api_version)\n file_io.write_string_to_file(\n filepath, text_format.MessageToString(actual_dict[key]))\n else:\n # Include the actual differences to help debugging.\n for d, verbose_d in zip(diffs, verbose_diffs):\n logging.error(' %s', d)\n logging.error(' %s', verbose_d)\n # Fail if we cannot fix the test by updating goldens.\n self.fail('%d differences found between API and golden.' % diff_count)\n\n else:\n logging.info('No differences found between API and golden.')\n\n def _checkBackwardsCompatibility(self,\n root,\n golden_file_patterns,\n api_version,\n additional_private_map=None,\n omit_golden_symbols_map=None):\n # Extract all API stuff.\n visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor(\n default_path='tensorflow.keras')\n\n public_api_visitor = public_api.PublicAPIVisitor(visitor)\n if additional_private_map:\n public_api_visitor.private_map.update(additional_private_map)\n public_api_visitor.set_root_name('tf.keras')\n\n traverse.traverse(root, public_api_visitor)\n proto_dict = visitor.GetProtos()\n\n # Read all golden files.\n golden_file_list = tf.compat.v1.gfile.Glob(golden_file_patterns)\n\n def _ReadFileToProto(filename):\n \"\"\"Read a filename, create a protobuf from its contents.\"\"\"\n ret_val = api_objects_pb2.TFAPIObject()\n text_format.Merge(file_io.read_file_to_string(filename), ret_val)\n return ret_val\n\n golden_proto_dict = {\n _FileNameToKey(filename): _ReadFileToProto(filename)\n for filename in golden_file_list\n }\n golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,\n omit_golden_symbols_map)\n\n # Diff them. Do not fail if called with update.\n # If the test is run to update goldens, only report diffs but do not fail.\n self._AssertProtoDictEquals(\n golden_proto_dict,\n proto_dict,\n verbose=FLAGS.verbose_diffs,\n update_goldens=FLAGS.update_goldens,\n api_version=api_version)\n\n def testAPIBackwardsCompatibility(self):\n api_version = 1\n if hasattr(tf, '_major_api_version') and tf._major_api_version == 2:\n api_version = 2\n golden_file_patterns = [\n os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))]\n\n self._checkBackwardsCompatibility(\n tf.keras,\n golden_file_patterns,\n api_version,\n # Skip compat.v1 and compat.v2 since they are validated\n # in separate tests.\n additional_private_map={'tf.compat': ['v1', 'v2']},\n omit_golden_symbols_map={})\n\n def testAPIBackwardsCompatibilityV1(self):\n api_version = 1\n golden_file_patterns = os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))\n self._checkBackwardsCompatibility(\n tf.compat.v1.keras,\n golden_file_patterns,\n api_version,\n additional_private_map={\n 'tf': ['pywrap_tensorflow'],\n 'tf.compat': ['v1', 'v2'],\n },\n omit_golden_symbols_map={})\n\n def testAPIBackwardsCompatibilityV2(self):\n api_version = 2\n golden_file_patterns = [os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))]\n self._checkBackwardsCompatibility(\n tf.compat.v2.keras,\n golden_file_patterns,\n api_version,\n additional_private_map={'tf.compat': ['v1', 'v2']},\n omit_golden_symbols_map={})\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)\n parser.add_argument(\n '--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)\n FLAGS, unparsed = parser.parse_known_args()\n _InitPathConstants()\n\n # Now update argv, so that unittest library does not get confused.\n sys.argv = [sys.argv[0]] + unparsed\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.resource_loader.get_data_files_path", "tensorflow.tools.api.lib.python_object_to_proto_visitor.PythonObjectToProtoVisitor", "tensorflow.tools.common.traverse.traverse", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.platform.tf_logging.info", "tensorflow.compat.v1.resource_loader.get_root_dir_with_all_resources", "tensorflow.io.gfile.remove", "tensorflow.tools.common.public_api.PublicAPIVisitor", "tensorflow.compat.v1.gfile.Glob", "tensorflow.tools.api.lib.api_objects_pb2.TFAPIObject", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.python.platform.tf_logging.warning", "tensorflow.test.main" ] ]
belldandyxtq/chainer
[ "abffa9a7def07c2e6bcd79d8ddcebeed1e762161" ]
[ "examples/vae/train_vae.py" ]
[ "#!/usr/bin/env python\n\"\"\"Chainer example: train a VAE on MNIST\n\"\"\"\nimport argparse\nimport os\nimport warnings\n\nimport numpy as np\n\nimport chainer\nfrom chainer import training\nfrom chainer.training import extensions\nimport chainerx\n\nimport net\n\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer example: VAE')\n parser.add_argument('--initmodel', '-m', type=str,\n help='Initialize the model from given file')\n parser.add_argument('--resume', '-r', type=str,\n help='Resume the optimization from snapshot')\n parser.add_argument('--device', '-d', type=str, default='-1',\n help='Device specifier. Either ChainerX device '\n 'specifier or an integer. If non-negative integer, '\n 'CuPy arrays with specified device id are used. If '\n 'negative integer, NumPy arrays are used')\n parser.add_argument('--out', '-o', default='results',\n help='Directory to output the result')\n parser.add_argument('--epoch', '-e', default=100, type=int,\n help='number of epochs to learn')\n parser.add_argument('--dim-z', '-z', default=20, type=int,\n help='dimention of encoded vector')\n parser.add_argument('--dim-h', default=500, type=int,\n help='dimention of hidden layer')\n parser.add_argument('--beta', default=1.0, type=float,\n help='Regularization coefficient for '\n 'the second term of ELBO bound')\n parser.add_argument('--k', '-k', default=1, type=int,\n help='Number of Monte Carlo samples used in '\n 'encoded vector')\n parser.add_argument('--binary', action='store_true',\n help='Use binarized MNIST')\n parser.add_argument('--batch-size', '-b', type=int, default=100,\n help='learning minibatch size')\n parser.add_argument('--test', action='store_true',\n help='Use tiny datasets for quick tests')\n group = parser.add_argument_group('deprecated arguments')\n group.add_argument('--gpu', '-g', dest='device',\n type=int, nargs='?', const=0,\n help='GPU ID (negative value indicates CPU)')\n args = parser.parse_args()\n\n if chainer.get_dtype() == np.float16:\n warnings.warn(\n 'This example may cause NaN in FP16 mode.', RuntimeWarning)\n\n device = chainer.get_device(args.device)\n device.use()\n\n print('Device: {}'.format(device))\n print('# dim z: {}'.format(args.dim_z))\n print('# Minibatch-size: {}'.format(args.batch_size))\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Prepare VAE model, defined in net.py\n encoder = net.make_encoder(784, args.dim_z, args.dim_h)\n decoder = net.make_decoder(784, args.dim_z, args.dim_h,\n binary_check=args.binary)\n prior = net.make_prior(args.dim_z)\n avg_elbo_loss = net.AvgELBOLoss(encoder, decoder, prior,\n beta=args.beta, k=args.k)\n avg_elbo_loss.to_device(device)\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(avg_elbo_loss)\n\n # Initialize\n if args.initmodel is not None:\n chainer.serializers.load_npz(args.initmodel, avg_elbo_loss)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist(withlabel=False)\n\n if args.binary:\n # Binarize dataset\n train = (train >= 0.5).astype(np.float32)\n test = (test >= 0.5).astype(np.float32)\n\n if args.test:\n train, _ = chainer.datasets.split_dataset(train, 100)\n test, _ = chainer.datasets.split_dataset(test, 100)\n\n train_iter = chainer.iterators.SerialIterator(train, args.batch_size)\n test_iter = chainer.iterators.SerialIterator(test, args.batch_size,\n repeat=False, shuffle=False)\n\n # Set up an updater. StandardUpdater can explicitly specify a loss function\n # used in the training with 'loss_func' option\n updater = training.updaters.StandardUpdater(\n train_iter, optimizer, device=device, loss_func=avg_elbo_loss)\n\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n trainer.extend(extensions.Evaluator(\n test_iter, avg_elbo_loss, device=device))\n # TODO(niboshi): Temporarily disabled for chainerx. Fix it.\n if device.xp is not chainerx:\n trainer.extend(extensions.DumpGraph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss',\n 'main/reconstr', 'main/kl_penalty', 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n\n if args.resume is not None:\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\n # Visualize the results\n def save_images(x, filename):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)\n for ai, xi in zip(ax.flatten(), x):\n ai.imshow(xi.reshape(28, 28))\n fig.savefig(filename)\n\n avg_elbo_loss.to_cpu()\n train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]\n x = chainer.Variable(np.asarray(train[train_ind]))\n with chainer.using_config('train', False), chainer.no_backprop_mode():\n x1 = decoder(encoder(x).mean, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'train'))\n save_images(x1.array, os.path.join(args.out, 'train_reconstructed'))\n\n test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]\n x = chainer.Variable(np.asarray(test[test_ind]))\n with chainer.using_config('train', False), chainer.no_backprop_mode():\n x1 = decoder(encoder(x).mean, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'test'))\n save_images(x1.array, os.path.join(args.out, 'test_reconstructed'))\n\n # draw images from randomly sampled z\n z = prior().sample(9)\n x = decoder(z, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'sampled'))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.use", "numpy.asarray", "matplotlib.pyplot.subplots" ] ]