repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
LukasK13/ESBO-ETC
[ "d1db999f1670f2777c5227d79629d421f03e5393" ]
[ "tests/sensor/test_PixelMask.py" ]
[ "from unittest import TestCase\nfrom esbo_etc.classes.sensor.PixelMask import PixelMask\nimport numpy as np\nimport astropy.units as u\n\n\nclass TestPixelMask(TestCase):\n def setUp(self):\n self.mask = PixelMask(np.array([10, 8]) << u.pix, 6.5 * u.um, center_offset=np.array([0.2, 0.5]) << u.pix)\n\n def test___new__(self):\n self.assertTrue((self.mask.view(np.ndarray) == np.zeros((8, 10))).all())\n self.assertEqual(self.mask.center_ind, [3.5, 4.5])\n self.assertEqual(self.mask.psf_center_ind, [4.0, 4.7])\n self.assertEqual(self.mask.pixel_geometry, [8 * u.pix, 10 * u.pix])\n\n def test_createPhotometricAperture(self):\n # circle\n self.mask.createPhotometricAperture(\"circle\", 2.3 * u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n\n self.setUp()\n self.mask.createPhotometricAperture(\"circle\", 2.6 * u.pix, np.array([-0.5, 0.8]) << u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 0., 0., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 0., 0., 0.],\n [0., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 0., 0., 0.],\n [0., 0., 0., 1., 1., 1., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n\n # square\n self.setUp()\n self.mask.createPhotometricAperture(\"square\", 2.3 * u.pix)\n res = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n self.assertTrue((self.mask.view(np.ndarray) == res).all())\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lori94/DSCI_522_Group_404
[ "e8177bd7fa388dcada94bcb9c2f6e69dc0227591" ]
[ "src/model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n'''This script finds the best parameters for SVC and LGR models and fits the data to these two models and outputs the classification images and the classification reports as the csv documents.\n\nUsage: src/model.py --data_input=<data_input> --result_output=<result_output> \n\nArguments:\n--data_input=<data_input> The path for all the clean data\n--result_output=<result_output> The path where to store the csv data\n'''\nimport numpy as np\nimport pandas as pd\nfrom docopt import docopt\nfrom sklearn.model_selection import RandomizedSearchCV\n#from sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n#from plot_classifier import plot_classifier\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nimport lightgbm as lgb\n\nopt = docopt(__doc__)\n\ndef get_model_results(X, y, X_train, y_train, X_test, y_test, result_output):\n \n parameters_svc = {'C':np.logspace(-3,3,7), 'gamma':np.logspace(-4,2,7)}\n pd.DataFrame(parameters_svc).to_csv(result_output + '/hyper_parameters.csv')\n svc = SVC()\n svc_opt = RandomizedSearchCV(svc, parameters_svc, cv=5, iid=False, n_iter = 25)\n # svc_opt.fit(X_train, y_train)\n # train_score_svc = svc_opt.score(X_train,y_train)\n # test_score_svc= svc_opt.score(X_test,y_test)\n #svc_opt = GridSearchCV(svc, parameters_svc, cv=5, iid=False)\n \n svc_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())\n train_score_svc = svc_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_svc = svc_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n parameters_lgr = {'C':np.logspace(-3,3,7)}\n \n lgr = LogisticRegression()\n\n #lgr_opt = GridSearchCV(lgr, parameters_lgr, cv=5, iid=False)\n lgr_opt = RandomizedSearchCV(lgr, parameters_lgr, cv=5, iid=False, n_iter = 25)\n\n lgr_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())\n train_score_lgr = lgr_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_lgr = lgr_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n \n lgbm = lgb.LGBMClassifier()\n lgbm.fit(X_train.to_numpy(),y_train.to_numpy().ravel())\n train_score_lgbm = lgbm.score(X_train.to_numpy(),y_train.to_numpy().ravel())\n test_score_lgbm = lgbm.score(X_test.to_numpy(),y_test.to_numpy().ravel())\n \n data = {'Train accuracy':[train_score_svc, train_score_lgr, train_score_lgbm], 'Validation accuracy':[test_score_svc, test_score_lgr,test_score_lgbm], 'Best parameters':[svc_opt.best_params_,lgr_opt.best_params_, 'NA']}\n accuracy_df = pd.DataFrame(data, index = ['SVC','LGR','LGBM'])\n accuracy_df.to_csv(result_output+'/accuracy.csv')\n \n predictions_svc = svc_opt.predict(X_test)\n predictions_lgr = lgr_opt.predict(X_test)\n predictions_lgbm = lgbm.predict(X_test)\n svc_report = pd.DataFrame(classification_report(y_test, predictions_svc, output_dict=True))\n lgr_report = pd.DataFrame(classification_report(y_test, predictions_lgr, output_dict=True))\n lgbm_report = pd.DataFrame(classification_report(y_test, predictions_lgbm, output_dict=True))\n svc_report.to_csv(result_output+'/svc_classification_report.csv')\n lgr_report.to_csv(result_output+'/lgr_classification_report.csv')\n lgbm_report.to_csv(result_output+'/lgbm_classification_report.csv')\n \n try:\n pd.read_csv(result_output+'/svc_classification_report.csv')\n pd.read_csv(result_output+'/lgr_classification_report.csv')\n pd.read_csv(result_output+'/lgbm_classification_report.csv')\n \n except: \n raise Exception(\"result doesn't save successfully\")\n \n return svc_opt, lgr_opt, lgbm\n\ndef main(data_input, result_output):\n X_train = pd.read_csv(data_input+'/X_train_clean.csv')\n y_train = pd.read_csv(data_input+'/y_train.csv',usecols = [\"Target\"])\n X_test = pd.read_csv(data_input+'/X_test_clean.csv')\n y_test = pd.read_csv(data_input+'/y_test.csv',usecols = [\"Target\"])\n X = pd.read_csv(data_input+'/X_original.csv')\n y = pd.read_csv(data_input+'/y_original.csv')\n svc_opt, lgr_opt, lgbm = get_model_results(X, y, X_train, y_train, X_test, y_test, result_output)\n plt.figure(figsize=(18,3))\n # model = [svc_opt, lgr_opt]\n # for i in range(2):\n # plt.subplot(1,4,i+1)\n # classifier = model[i]\n # plot_classifier(X,y,classifier,ax=plt.gca())\n # plt.savefig(result_output+'classifier_plot.png')\nif __name__ == \"__main__\":\n main(opt[\"--data_input\"], opt[\"--result_output\"])\n\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.RandomizedSearchCV", "sklearn.linear_model.LogisticRegression", "numpy.logspace", "pandas.DataFrame", "sklearn.svm.SVC", "sklearn.metrics.classification_report", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
naegawa/Aup2wav_dataset
[ "bc22ce50704497f2d496da13b00cebae0083bbef" ]
[ "audacity.py" ]
[ "#!/usr/bin/env python\n# 2017 kojima changed https://github.com/davidavdav/audacity.py (c) 2016 David A. van Leeuwen\n# \n#\n# \nimport xml.etree.ElementTree as ET\nimport wave, os, numpy, struct\n\nclass Aup:\n\tdef __init__(self, aupfile):\n\t\tfqpath = os.path.join(os.path.curdir, aupfile)\n\t\tdir = os.path.dirname(fqpath)\n\t\txml = open(aupfile)\n\t\tself.tree = ET.parse(xml)\n\t\tself.root = self.tree.getroot()\n\t\tself.rate = float(self.root.attrib[\"rate\"])\n\t\tns = {\"ns\":\"http://audacity.sourceforge.net/xml/\"}\n\t\tself.project = self.root.attrib[\"projname\"]\n\t\tself.files = []\n\t\tself.labels= []\n\t\tfor channel, wavetrack in enumerate(self.root.findall(\"ns:wavetrack\", ns)):\n\t\t\ttrack_channel=wavetrack.attrib[\"channel\"]\n\t\t\taufiles = []\n\t\t\tfor b in wavetrack.iter(\"{%s}simpleblockfile\" % ns[\"ns\"]):\n\t\t\t\tfilename = b.attrib[\"filename\"]\n\t\t\t\td1 = filename[0:3]\n\t\t\t\td2 = \"d\" + filename[3:5]\n\t\t\t\tfile = os.path.join(dir, self.project, d1, d2, filename)\n\t\t\t\tif not os.path.exists(file):\n\t\t\t\t\traise IOError(\"File missing in %s: %s\" % (self.project, file))\n\t\t\t\telse:\n\t\t\t\t\taufiles.append((file, int(b.attrib[\"len\"]),track_channel))\n\t\t\tself.files.append(aufiles)\n\t\tfor channel, labeltrack in enumerate(self.root.findall(\"ns:labeltrack\", ns)):\n\t\t\tlabelset=[]\n\t\t\tfor b in labeltrack.iter(\"{%s}label\" % ns[\"ns\"]):\n\t\t\t\tl=(float(b.attrib[\"t\"]), float(b.attrib[\"t1\"]), b.attrib[\"title\"])\n\t\t\t\tlabelset.append(l)\n\t\t\tself.labels.append(labelset)\n\n\t\tself.nchannels = len(self.files)\n\t\tself.aunr = -1\n\n\tdef open(self, channel):\n\t\tif not (0 <= channel < self.nchannels):\n\t\t\traise ValueError(\"Channel number out of bounds\")\n\t\tself.channel = channel\n\t\tself.aunr = 0\n\t\tself.offset = 0\n\t\treturn self\n\n\tdef close(self):\n\t\tself.aunr = -1\n\n\t## a linear search (not great)\n\tdef seek(self, pos):\n\t\tif self.aunr < 0:\n\t\t\traise IOError(\"File not opened\")\n\t\ts = 0\n\t\ti = 0\n\t\tlength = 0\n\t\tfor i, f in enumerate(self.files[self.channel]):\n\t\t\ts += f[1]\n\t\t\tif s > pos:\n\t\t\t\tlength = f[1]\n\t\t\t\tbreak\n\t\tif pos >= s:\n\t\t\traise EOFError(\"Seek past end of file\")\n\t\tself.aunr = i\n\t\tself.offset = pos - s + length\n\n\tdef read(self):\n\t\tif self.aunr < 0:\n\t\t\traise IOError(\"File not opened\")\n\t\twhile self.aunr < len(self.files[self.channel]):\n\t\t\twith open(self.files[self.channel][self.aunr][0],\"rb\") as fd:\n\t\t\t\ttrack_ch=self.files[self.channel][self.aunr][2]\n\t\t\t\tif track_ch==\"0\":\n\t\t\t\t\tfd.seek((self.offset - self.files[self.channel][self.aunr][1]) * 2, 2)\n\t\t\t\telse:\n\t\t\t\t\tfd.seek((self.offset - self.files[self.channel][self.aunr][1]) * 4, 2)\n\t\t\t\tdata = fd.read()\n\t\t\t\tyield data,track_ch\n\t\t\tself.aunr += 1\n\t\t\tself.offset = 0\n\n\tdef __enter__(self):\n\t\treturn self\n\n\tdef __exit__(self, exc_type, exc_val, exc_tb):\n\t\tself.close()\n\n\tdef get_labels(self, channel):\n\t\tif not (0 <= channel <len(self.labels)):\n\t\t\traise ValueError(\"Channel number out of bounds\")\n\t\treturn self.labels[channel]\n\n\tdef towav(self, filename, channel, start=0, stop=None):\n\t\twav = wave.open(filename, \"w\")\n\t\twav.setnchannels(1)\n\t\twav.setsampwidth(2)\n\t\twav.setframerate(self.rate)\n\t\tscale = 1 << 15\n\t\tif stop:\n\t\t\tlength = int(self.rate * (stop - start)) ## number of samples to extract\n\t\twith self.open(channel) as fd: #fd=self\n\t\t\tfd.seek(int(self.rate * start))\n\t\t\tfor data,track_ch in fd.read():\n\t\t\t\tif track_ch==\"0\":\n\t\t\t\t\tshorts = numpy.frombuffer(data, numpy.short)\n\t\t\t\telse:\n\t\t\t\t\tshorts = numpy.short(numpy.clip(numpy.frombuffer(data, numpy.float32) * scale, -scale, scale-1))\n\t\t\t\tif stop and len(shorts) > length:\n\t\t\t\t\tshorts = shorts[range(length)]\n\t\t\t\tformat = \"<\" + str(len(shorts)) + \"h\"\n\t\t\t\twav.writeframesraw(struct.pack(format, *shorts))\n\t\t\t\tif stop:\n\t\t\t\t\tlength -= len(shorts)\n\t\t\t\t\tif length <= 0:\n\t\t\t\t\t\tbreak\n\t\t\twav.writeframes(bytes(b'')) ## sets length in wavfile\n\t\twav.close()\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ClaireDelplancke/SIRF-Contribs
[ "130223d9bc11991eadcd11f9b715aea34c4842fd" ]
[ "src/Python/sirf/contrib/kcl/user_dePierroMap_real_data.py" ]
[ "'''User implemented De Pierro MAPEM reconstruction\nReal data implementation of De Pierro MAPEM, using a Bowsher weighted quadratic\npenalty. The guidance image (here a T1-weighted MR image) must be pre-aligned \nto the PET image and sampled on the same image grid.\nImplemented by Sam Ellis (13th Feb 2019)\n\nUsage:\n dePierroMap_eg [--help | options]\n\nOptions:\n -f <file>, --file=<file> raw data file\n [default: my_forward_projection.hs]\n -p <path>, --path=<path> path to data files, defaults to data/examples/PET\n subfolder of SIRF root folder\n -s <subs>, --subs=<subs> number of subsets [default: 12]\n -i <siter>, --subiter=<siter> number of sub-iterations [default: 24]\n -e <engn>, --engine=<engn> reconstruction engine [default: STIR]\n'''\n\n## CCP PETMR Synergistic Image Reconstruction Framework (SIRF)\n## Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC\n## Copyright 2015 - 2017 University College London.\n##\n## This is software developed for the Collaborative Computational\n## Project in Positron Emission Tomography and Magnetic Resonance imaging\n## (http://www.ccppetmr.ac.uk/).\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n## http://www.apache.org/licenses/LICENSE-2.0\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n\n__version__ = '0.1.0'\n\ndef my_dePierroMap(image, obj_fun, beta, filter, weights, sensitivity_image):\n \n # Check that weights are normalised\n if (np.abs(np.sum(weights,axis=1)-1)>1.0e-6).any():\n raise ValueError(\"Weights should sum to 1 for each voxel\")\n \n # Create OSEM reconstructor\n print('Setting up reconstruction object')\n OSEM_reconstructor = OSMAPOSLReconstructor()\n OSEM_reconstructor.set_objective_function(obj_fun) \n OSEM_reconstructor.set_num_subsets(21)\n OSEM_reconstructor.set_num_subiterations(21*10)\n OSEM_reconstructor.set_up(image)\n num_subiterations = OSEM_reconstructor.get_num_subiterations()\n \n current_image = image.clone()\n\n for iter in range(1,num_subiterations + 1):\n print('\\n------------- Subiteration %d' % iter) \n\n # clear the temp files from the current working directory (vital when\n # reconstructing real datasets with many iterations)\n if np.mod(iter,5) == 0:\n os.system('rm *.hv *.hs *.v *.s *.ahv')\n \n # Calculate imageReg and return as an array\n imageReg_array = dePierroReg(current_image.as_array(),weights)\n \n # OSEM image update\n OSEM_reconstructor.update(current_image)\n imageEM_array = current_image.as_array()\n \n # Final image update\n imageUpdated_array = dePierroUpdate \\\n (imageEM_array, imageReg_array, beta, sensitivity_image.as_array())\n \n # Fill image and truncate to cylindrical field of view \n current_image.fill(imageUpdated_array)\n filter.apply(current_image)\n \n image_out = current_image.clone()\n return image_out\n\n\ndef dePierroUpdate(imageEM, imageReg, beta, sensImg):\n \n delta = 1e-6*abs(sensImg).max()\n sensImg[sensImg < delta] = delta # avoid division by zero\n beta_j = beta/sensImg\n \n b_j = 1 - beta_j*imageReg\n \n numer = (2*imageEM)\n denom = ((b_j**2 + 4*beta_j*imageEM)**0.5 + b_j)\n \n delta = 1e-6*abs(denom).max()\n denom[denom < delta] = delta # avoid division by zero\n \n imageUpdated = numer/denom\n \n return imageUpdated\n\ndef dePierroReg(image,weights):\n \n # get size and vectorise image for indexing \n imSize = image.shape\n imageVec = image.reshape(-1,1,order='F').flatten('F')\n \n # get the neigbourhoods of each voxel\n weightsSize = weights.shape\n w = int(round(weightsSize[1]**(1.0/3))) # side length of neighbourhood\n nhoodInd = neighbourExtract(imSize,w)\n nhoodIndVec = nhoodInd.reshape(-1,1,order='F').flatten('F')\n \n # retrieve voxel intensities for neighbourhoods \n resultVec = np.float32(imageVec[nhoodIndVec])\n result = resultVec.reshape(nhoodInd.shape,order='F')\n \n # compute xreg\n try:\n imageReg = 0.5*np.sum(weights*(result + np.float32(image).reshape(-1,1,order='F')),axis=1)\n except:\n tmpVar = 1; \n imageReg = imageReg.reshape(imSize,order='F')\n \n return imageReg\n\ndef neighbourExtract(imageSize,w):\n # Adapted from Prior class \n n = imageSize[0]\n m = imageSize[1]\n h = imageSize[2]\n wlen = 2*np.floor(w/2)\n widx = xidx = yidx = np.arange(-wlen/2,wlen/2+1)\n\n if h==1:\n zidx = [0]\n nN = w*w\n else:\n zidx = widx\n nN = w*w*w\n \n Y,X,Z = np.meshgrid(np.arange(0,m), np.arange(0,n), np.arange(0,h)) \n N = np.zeros([n*m*h, nN],dtype='int32')\n l = 0\n for x in xidx:\n Xnew = setBoundary(X + x,n)\n for y in yidx:\n Ynew = setBoundary(Y + y,m)\n for z in zidx:\n Znew = setBoundary(Z + z,h)\n N[:,l] = ((Xnew + (Ynew)*n + (Znew)*n*m)).reshape(-1,1).flatten('F')\n l += 1\n return N\n \ndef setBoundary(X,n):\n # Boundary conditions for neighbourExtract\n # Adapted from Prior class\n idx = X<0\n X[idx] = X[idx] + n\n idx = X>n-1\n X[idx] = X[idx] - n\n return X.flatten('F')\n\n# %%\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nfrom pUtilities import show_2D_array\nfrom pSTIR import *\nimport numpy as np\nimport Prior as pr\n\ndata_path = '/media/sf_SIRF_data/sino_rawdata_100/'\n#data_path='/home/sirfuser/data/NEMA'\nprint('Finding files in %s' % data_path)\n\nnum_subsets = 12\n \n# set filenames \n# input files\nsino_file = 'my_data_sino.hs'\nnorm_file = 'my_data_norm.hs'\nattn_file = 'my_data_mumap.hv'\nrand_file = 'my_data_rand.hs'\nmr_file = 'my_data_MR_SIRF.hv'\n\n# output goes to files\nmsg_red = MessageRedirector('info.txt', 'warn.txt', 'error.txt')\n\nacq_data = AcquisitionData(data_path + sino_file)\n\n#%%\n\n# copy the acquisition data into a Python array\nacq_array = acq_data.as_array()\nprint('acquisition data dimensions: %dx%dx%d' % acq_array.shape)\n# use a slice number for display that is appropriate for the NEMA phantom\nz = 71\nshow_2D_array('Acquisition data', acq_array[z,:,:])\n\n# create acquisition model\nacq_model = AcquisitionModelUsingRayTracingMatrix()\nacq_model.set_num_tangential_LORs(10);\n\n#%% Correction sinograms\nnorm_file = 'data-norm.n.hdr'\nasm_norm = AcquisitionSensitivityModel(data_path + norm_file)\nacq_model.set_acquisition_sensitivity(asm_norm)\n\n# ---------------- taken from the example-----------------------------------\nattn_image = ImageData(data_path + attn_file)\nattn_acq_model = AcquisitionModelUsingRayTracingMatrix()\nattn_acq_model.set_num_tangential_LORs(10)\nasm_attn = AcquisitionSensitivityModel(attn_image, attn_acq_model)\n\n# temporary fix pending attenuation offset fix in STIR:\n# converting attenuation into 'bin efficiency'\nasm_attn.set_up(acq_data)\nattn_factors = AcquisitionData(acq_data)\nattn_factors.fill(1.0)\nprint('applying attenuation (please wait, may take a while)...')\nasm_attn.unnormalise(attn_factors)\nasm_attn = AcquisitionSensitivityModel(attn_factors)\nasm = AcquisitionSensitivityModel(asm_norm, asm_attn)\nacq_model.set_acquisition_sensitivity(asm)\n# --------------------------------------------------------------------------\n\n# randoms\nrandoms = AcquisitionData(data_path + rand_file)\nrandoms_array=randoms.as_array()\nshow_2D_array('randoms',randoms_array[z,:,:])\nacq_model.set_background_term(randoms)\n\n# MR guidance\nmr_image = ImageData(data_path + mr_file)\nmr_array = mr_image.as_array()\nshow_2D_array('MR image',mr_array[45,110:220,115:225])\n\n\n#%%\n# define objective function to be maximized as\n# Poisson logarithmic likelihood (with linear model for mean)\nobj_fun = make_Poisson_loglikelihood(acq_data)\nobj_fun.set_acquisition_model(acq_model)\n\n#%%\n\n# create initial image estimate from one iteration of MLEM\nrecon_init = OSMAPOSLReconstructor()\nrecon_init.set_objective_function(obj_fun)\n \nrecon_init.set_num_subsets(1)\nrecon_init.set_num_subiterations(1)\nnxny = (344, 344, 127)\ninitial_image = acq_data.create_uniform_image(1.0, nxny)\n\nimage=initial_image\n\nrecon_init.set_up(image)\n\nrecon_init.set_current_estimate(image)\n\nrecon_init.process()\n\nimage = recon_init.get_current_estimate()\n\n\n# %% bit more prep\n\n# create filter that zeroes the image outside a cylinder of the same\n# diameter as the image xy-section size\nfilter = TruncateToCylinderProcessor()\n\n# filter image estimate to FOV\nfilter.apply(image)\n\n# get the full sensitivity image\nobj_fun2 = make_Poisson_loglikelihood(acq_data)\nobj_fun2.set_acquisition_model(acq_model)\nobj_fun2.set_num_subsets(1)\nobj_fun2.set_up(image)\nsensitivity_image = obj_fun2.get_subset_sensitivity(0)\n\n\n# %% guided reconstruction\n\n# create a Prior for computing Bowsher weights\nmyPrior = pr.Prior(sensitivity_image.as_array().shape)\nweights = myPrior.BowshserWeights(mr_array,7)\nweights = np.float32(weights/7.0)\n\nimage_guided = my_dePierroMap(image, obj_fun, 50000, filter, weights, sensitivity_image)\nimage_array_guided = image_guided.as_array()\nshow_2D_array('Reconstructed guided', image_array_guided[45,110:220,115:225])\n\nimage_guided.write('output_images/image_guided.v')\n\n## %% OSEM reconstruction (beta = 0)\n#\n#image_OSEM = my_dePierroMap(image, obj_fun, 0, filter, weights, sensitivity_image)\n#image_array_OSEM = image_OSEM.as_array()\n#show_2D_array('Reconstructed OSEM image', image_array_OSEM[45,110:220,115:225])\n#\n#image_OSEM.write('output_images/image_OSEM.v')\n#\n## %% unguided reconstruction\n#\n## uniform weights\n#weights = np.ones([image.as_array().size,27],dtype='float')\n#weights = np.float32(weights/27.0)\n#\n#image_unguided = my_dePierroMap(image, obj_fun, 50000, filter, weights, sensitivity_image)\n#image_array_unguided = image_unguided.as_array()\n#show_2D_array('Reconstructed unguided', image_array_unguided[45,110:220,115:225])\n#\n#image_unguided.write('output_images/image_unguided.v')\n\n" ]
[ [ "numpy.arange", "numpy.floor", "numpy.float32", "numpy.mod", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hojeong3709/RL
[ "a1c6eab8c3e7f2487e527fe68658d13eea5334af", "a1c6eab8c3e7f2487e527fe68658d13eea5334af" ]
[ "lecture/1. policy-iteration/environment.py", "tensorflow-keras-lab/1. tensorflow/ex01/8.py" ]
[ "import tkinter as tk\nfrom tkinter import Button\nimport time\nimport numpy as np\nfrom PIL import ImageTk, Image\n\nPhotoImage = ImageTk.PhotoImage\nUNIT = 100 # 픽셀 수\nHEIGHT = 5 # 그리드월드 세로\nWIDTH = 5 # 그리드월드 가로\nTRANSITION_PROB = 1 #상태 변환 확률 값\nPOSSIBLE_ACTIONS = [0, 1, 2, 3] # 상, 하, 좌, 우\nACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌표로 나타낸 행동\nREWARDS = []\n\n\nclass GraphicDisplay(tk.Tk):\n def __init__(self, agent):\n super(GraphicDisplay, self).__init__()\n self.title('Policy Iteration')\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))\n self.texts = [] #v값 표시\n self.arrows = []#진행 방향 표시\n self.env = Env()\n self.agent = agent #policyIteration\n self.evaluation_count = 0 #정책 평가 버튼 누른 횟 수\n self.improvement_count = 0#정책 발전 버튼 누른 횟 수\n self.is_moving = 0 #현재 move 중 인지를 나타내는 값\n (self.up, self.down, self.left, self.right), self.shapes = self.load_images()\n self.canvas = self._build_canvas()\n self.text_reward(2, 2, \"R : 1.0\")\n self.text_reward(1, 2, \"R : -1.0\")\n self.text_reward(2, 1, \"R : -1.0\")\n\n def _build_canvas(self):\n canvas = tk.Canvas(self, bg='white',\n height=HEIGHT * UNIT,\n width=WIDTH * UNIT)\n # 버튼 초기화\n iteration_button = Button(self, text=\"Evaluate\",\n command=self.evaluate_policy)\n iteration_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10,\n window=iteration_button)\n policy_button = Button(self, text=\"Improve\",\n command=self.improve_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10,\n window=policy_button)\n policy_button = Button(self, text=\"move\", command=self.move_by_policy)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10,\n window=policy_button)\n policy_button = Button(self, text=\"reset\", command=self.reset)\n policy_button.configure(width=10, activebackground=\"#33B5E5\")\n canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10,\n window=policy_button)\n\n # 그리드 생성\n for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT\n canvas.create_line(x0, y0, x1, y1)\n for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80\n x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row\n canvas.create_line(x0, y0, x1, y1)\n\n # 캔버스에 이미지 추가\n self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])\n canvas.create_image(250, 150, image=self.shapes[1])\n canvas.create_image(150, 250, image=self.shapes[1])\n canvas.create_image(250, 250, image=self.shapes[2])\n\n canvas.pack()\n\n return canvas\n\n def load_images(self):\n up = PhotoImage(Image.open(\"../img/up.png\").resize((13, 13)))\n right = PhotoImage(Image.open(\"../img/right.png\").resize((13, 13)))\n left = PhotoImage(Image.open(\"../img/left.png\").resize((13, 13)))\n down = PhotoImage(Image.open(\"../img/down.png\").resize((13, 13)))\n rectangle = PhotoImage(Image.open(\"../img/rectangle.png\").resize((65, 65)))\n triangle = PhotoImage(Image.open(\"../img/triangle.png\").resize((65, 65)))\n circle = PhotoImage(Image.open(\"../img/circle.png\").resize((65, 65)))\n return (up, down, left, right), (rectangle, triangle, circle)\n\n def reset(self):\n if self.is_moving == 0:\n self.evaluation_count = 0\n self.improvement_count = 0\n for i in self.texts:\n self.canvas.delete(i)\n\n for i in self.arrows:\n self.canvas.delete(i)\n self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]\n self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH\n for _ in range(HEIGHT)])\n self.agent.policy_table[2][2] = []\n x, y = self.canvas.coords(self.rectangle)\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)\n\n def text_value(self, row, col, contents, font='Helvetica', size=10,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 85, 70\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def text_reward(self, row, col, contents, font='Helvetica', size=10,\n style='normal', anchor=\"nw\"):\n origin_x, origin_y = 5, 5\n x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)\n font = (font, str(size), style)\n text = self.canvas.create_text(x, y, fill=\"black\", text=contents,\n font=font, anchor=anchor)\n return self.texts.append(text)\n\n def rectangle_move(self, action):\n base_action = np.array([0, 0])\n location = self.find_rectangle()\n self.render()\n if action == 0 and location[0] > 0: # 상\n base_action[1] -= UNIT\n elif action == 1 and location[0] < HEIGHT - 1: # 하\n base_action[1] += UNIT\n elif action == 2 and location[1] > 0: # 좌\n base_action[0] -= UNIT\n elif action == 3 and location[1] < WIDTH - 1: # 우\n base_action[0] += UNIT\n # move agent\n self.canvas.move(self.rectangle, base_action[0], base_action[1])\n\n #진행 중인 사각형 위치를 반환 ex) 2,3\n def find_rectangle(self):\n temp = self.canvas.coords(self.rectangle)\n x = (temp[0] / 100) - 0.5\n y = (temp[1] / 100) - 0.5\n return int(y), int(x)\n\n def move_by_policy(self):\n if self.improvement_count != 0 and self.is_moving != 1:\n self.is_moving = 1\n\n x, y = self.canvas.coords(self.rectangle) #좌상단 우하단 좌표\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)#사각형은 x,y 좌표 만큼 이동\n\n x, y = self.find_rectangle()\n while len(self.agent.policy_table[x][y]) != 0:\n self.after(100,\n self.rectangle_move(self.agent.get_action([x, y])))\n x, y = self.find_rectangle()\n self.is_moving = 0\n\n def draw_one_arrow(self, col, row, policy):\n if col == 2 and row == 2:\n return\n\n if policy[0] > 0: # up\n origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.up))\n if policy[1] > 0: # down\n origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.down))\n if policy[2] > 0: # left\n origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.left))\n if policy[3] > 0: # right\n origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)\n self.arrows.append(self.canvas.create_image(origin_x, origin_y,\n image=self.right))\n\n def draw_from_policy(self, policy_table):\n for i in range(HEIGHT):\n for j in range(WIDTH):\n self.draw_one_arrow(i, j, policy_table[i][j])\n\n def print_value_table(self, value_table):\n for i in range(WIDTH):\n for j in range(HEIGHT):\n self.text_value(i, j, value_table[i][j])\n\n def render(self):\n time.sleep(0.1)\n self.canvas.tag_raise(self.rectangle)\n self.update()\n\n def evaluate_policy(self):\n self.evaluation_count += 1\n for i in self.texts:\n self.canvas.delete(i)\n self.agent.policy_evaluation()\n self.print_value_table(self.agent.value_table)\n\n def improve_policy(self):\n self.improvement_count += 1\n for i in self.arrows:\n self.canvas.delete(i)\n self.agent.policy_improvement()\n self.draw_from_policy(self.agent.policy_table)\n\n\nclass Env:\n def __init__(self):\n self.transition_probability = TRANSITION_PROB\n self.width = WIDTH\n self.height = HEIGHT\n self.reward = [[0] * WIDTH for _ in range(HEIGHT)] #보상은 0으로 초기화\n self.possible_actions = POSSIBLE_ACTIONS # 4방향으로 초기화\n self.reward[2][2] = 1 # (2,2) 좌표 동그라미 위치에 보상 1\n self.reward[1][2] = -1 # (1,2) 좌표 세모 위치에 보상 -1\n self.reward[2][1] = -1 # (2,1) 좌표 세모 위치에 보상 -1\n self.all_state = [] #모든 상태값 저장\n\n #모든 상태값을 저장한다.\n for x in range(WIDTH):\n for y in range(HEIGHT):\n state = [x, y]\n self.all_state.append(state)\n\n def get_reward(self, state, action):\n next_state = self.state_after_action(state, action)\n return self.reward[next_state[0]][next_state[1]]#[상태][행동]\n\n def state_after_action(self, state, action_index):\n action = ACTIONS[action_index]\n return self.check_boundary([state[0] + action[0], state[1] + action[1]])\n\n #범위를 벗어나는 지 확인\n @staticmethod\n def check_boundary(state):\n state[0] = (0 if state[0] < 0 else WIDTH - 1\n if state[0] > WIDTH - 1 else state[0])\n state[1] = (0 if state[1] < 0 else HEIGHT - 1\n if state[1] > HEIGHT - 1 else state[1])\n return state\n\n def get_transition_prob(self, state, action):\n return self.transition_probability\n\n def get_all_states(self):\n return self.all_state\n", "import tensorflow as tf\n\nif __name__ == \"__main__\":\n\n g = tf.Graph()\n with g.as_default():\n in1 = tf.placeholder(dtype=tf.float32, shape=[None, 2], name=\"input1\")\n in2 = tf.placeholder(dtype=tf.float32, shape=[None, 2], name=\"input2\")\n const = tf.constant(2, dtype=tf.float32, name=\"static_value\")\n\n with tf.name_scope(\"Main\"):\n with tf.name_scope(\"A_Part\"):\n a_mul = tf.multiply(in1, const)\n a_out = tf.subtract(a_mul, in1)\n\n with tf.name_scope(\"B_Part\"):\n b_mul = tf.multiply(in2, const)\n b_out = tf.subtract(b_mul, in2)\n\n with tf.name_scope(\"C_Part\"):\n c_div = tf.div(a_out, b_out)\n c_out = tf.add(c_div, const)\n\n with tf.name_scope(\"D_Part\"):\n d_div = tf.div(b_out, a_out)\n d_out = tf.add(d_div, const)\n\n out = tf.maximum(c_out, d_out)\n sess = tf.Session(graph=g)\n _result, _c_out, _d_out = sess.run([out, c_out, d_out], feed_dict={in1: [[7, 3], [5, 2]],\n in2: [[5, 6], [8, 1]]})\n\n print(_c_out)\n print(_d_out)\n print(_result)\n tf.summary.FileWriter(\"./g_graph\", graph=g)" ]
[ [ "numpy.array" ], [ "tensorflow.Graph", "tensorflow.multiply", "tensorflow.summary.FileWriter", "tensorflow.constant", "tensorflow.maximum", "tensorflow.placeholder", "tensorflow.subtract", "tensorflow.div", "tensorflow.add", "tensorflow.name_scope", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ZhuokunYao/smoke
[ "d524fbe43b1aba6078c25d9aca7924b71a635e1d" ]
[ "smoke/layers/utils.py" ]
[ "import torch\nfrom torch.nn import functional as F\n\n\ndef sigmoid_hm(hm_features, training=False):\n x = hm_features.sigmoid_()\n if training:\n x = x.clamp(min=1e-4, max=1 - 1e-4)\n return x\n\n\ndef nms_hm(heat_map, kernel=3):\n pad = (kernel - 1) // 2\n hmax = F.max_pool2d(heat_map,\n kernel_size=(kernel, kernel),\n stride=1,\n padding=pad)\n eq_index = torch.floor(heat_map - hmax) + 1.0\n\n return heat_map * eq_index\n\n\ndef select_topk(heat_map, K=100):\n '''\n Args:\n heat_map: heat_map in [N, C, H, W]\n K: top k samples to be selected\n score: detection threshold\n\n Returns:\n\n '''\n batch, cls, height, width = heat_map.size()\n\n # First select topk scores in all classes and batchs\n # [N, C, H, W] -----> [N, C, H*W]\n heat_map = heat_map.view(batch, cls, -1)\n # Both in [N, C, K] top K of each class, K each class\n topk_scores_all, topk_inds_all = torch.topk(heat_map, K)\n\n # topk_inds_all = topk_inds_all % (height * width) # todo: this seems redudant\n # [N, C, K]\n topk_ys = (topk_inds_all / width).float()\n topk_xs = (topk_inds_all % width).float()\n\n assert isinstance(topk_xs, torch.cuda.FloatTensor)\n assert isinstance(topk_ys, torch.cuda.FloatTensor)\n\n # Select topK examples across channel\n # [N, C, K] -----> [N, C*K]\n topk_scores_all = topk_scores_all.view(batch, -1)\n # Both in [N, K]\n topk_scores, topk_inds = torch.topk(topk_scores_all, K)\n topk_clses = (topk_inds / K).float()\n\n assert isinstance(topk_clses, torch.cuda.FloatTensor)\n\n # First expand it as 3 dimension\n topk_inds_all = _gather_feat(topk_inds_all.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_inds).view(batch, K)\n topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_inds).view(batch, K)\n # bs,50 bs,50 bs,50 bs,50 bs,50\n return topk_scores, topk_inds_all, topk_clses, topk_ys, topk_xs\n\n\ndef _gather_feat(feat, ind):\n '''\n Select specific indexs on featuremap\n Args:\n feat: all results in 3 dimensions\n ind: positive index\n\n Returns:\n\n '''\n channel = feat.size(-1)\n ind = ind.unsqueeze(-1).expand(ind.size(0), ind.size(1), channel)\n feat = feat.gather(1, ind)\n\n return feat\n# batch: 100\n# index: bs, 100, 2\n# feature_maps: bs, 9 * 6, h, w\n# target_cls: bs, 100\n# cls_num: 6\ndef select_point_of_interest(batch, index, feature_maps, target_cls, cls_num):\n '''\n Select POI(point of interest) on feature map\n Args:\n batch: batch size\n index: in point format or index format\n feature_maps: regression feature map in [N, C, H, W]\n\n Returns:\n\n '''\n w = feature_maps.shape[3]\n # bs, 100, 2\n if len(index.shape) == 3:\n index = index[:, :, 1] * w + index[:, :, 0]\n # bs, 100\n index = index.view(batch, -1)\n \n # [N, 9 * 6, H, W] -----> [N, H, W, 9 * 6]\n feature_maps = feature_maps.permute(0, 2, 3, 1).contiguous()\n channel = feature_maps.shape[-1]\n # [N, H, W, C] -----> [N, H*W, C]\n feature_maps = feature_maps.view(batch, -1, channel)\n # expand index in channels\n # bs, 100, C\n index = index.unsqueeze(-1).repeat(1, 1, channel)\n # select specific features bases on POIs\n feature_maps = feature_maps.gather(1, index.long()) # bs, 100, 9 * 6\n\n feature_maps = feature_maps.view(batch, feature_maps.shape[1], cls_num, -1) # bs, 100, 6, 9\n cls_index = target_cls.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,feature_maps.shape[-1]) # bs, 100, 1, 9\n\n feature_maps = feature_maps.gather(2, cls_index.long()).squeeze(2) # bs, 100, 9\n\n return feature_maps\n" ]
[ [ "torch.topk", "torch.nn.functional.max_pool2d", "torch.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aviolinist/EEE
[ "032e2029815229875048cc92dd7da24ff3f71e93", "032e2029815229875048cc92dd7da24ff3f71e93" ]
[ "codes/lib/position.py", "codes/2_run_model_raven-hmets.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\nimport numpy as np\n\n__all__ = ['position']\n\ndef position(row=1, col=1, num=1,\n left=0.125, right=0.9, bottom=0.1, top=0.9,\n hspace=0.1, vspace=None, wspace=None,\n width=None, height=None,\n sortcol=False, golden=False, inversegolden=False,\n figsize=(1.,1.)):\n \"\"\"\n Gives positions of subplots.\n To be used with add_axes instead of subplot.\n\n All dimensions are fractions of the figure width or height.\n Figure and subplot spaces are the same as for figure.subplotparams\n except for hspace and vspace, which are halved.\n\n If the figsize keyword is given, a rectangular section of the figure\n will be used.\n\n\n Definition\n ----------\n def position(row=1, col=1, num=1,\n left=0.125, right=0.9, bottom=0.1, top=0.9,\n hspace=0.1, vspace=None, wspace=None,\n width=None, height=None,\n sortcol=False, golden=False, inversegolden=False,\n figsize=(1.,1.)):\n\n\n Optional Input\n --------------\n row number of subplot rows (default 1)\n col number of subplot columns (default 1)\n num subplot number (default 1)\n left left border of plot (default 0.125)\n right right border of plot (default 0.9)\n bottom bottom border of plot (default 0.1)\n top top border of plot (default 0.9)\n hspace space between columns (default 0.1)\n vspace space between rows (default 0.1)\n wspace historical, same as vspace; will be overwritten by vspace\n width prescribe width of plots (default None)\n height prescribe height of plots (default None)\n sortcol fill columns then rows (default False)\n golden golden ratio of width/height = (1+sqrt(5))/2\n (default False)\n inversegolden golden ratio of height/width\n (overwritten by golden) (default False)\n figsize (width, height) of figure as given by e.g.\n matplotlib.rcParams['figure.figsize'].\n Scales everything to rectangular section\n (default (1,1))\n\n\n Output\n ------\n position array with [left, bottom, width, height)\n to be used with fig.add_axes.\n\n\n Examples\n --------\n # Use, for example, as follows\n # fig1 = figure(1)\n # sub1 = fig1.add_axes(position(2,2,1))\n # sub2 = fig1.add_axes(position(2,2,2))\n\n # if you want to have a true rectangle\n # figsize = matplotlib.rcParams['figure.figsize']\n # sub = fig1.add_axes(position(1,1,1,figsize=figsize,left=0.1))\n\n # if you want to have a true golden ratio\n # sub = fig1.add_axes(position(1,1,1,figsize=figsize,golden=True))\n\n # Doctest examples\n >>> from autostring import astr\n >>> print(astr(position(2,2,1),3,pp=True))\n ['0.125' '0.550' '0.338' '0.350']\n >>> print(astr(position(2,2,1,sortcol=True),3,pp=True))\n ['0.125' '0.550' '0.338' '0.350']\n >>> print(astr(position(2,2,1,golden=True),3,pp=True))\n ['0.125' '0.409' '0.338' '0.209']\n >>> print(astr(position(2,2,1,inversegolden=True),3,pp=True))\n ['0.125' '0.550' '0.216' '0.350']\n >>> print(astr(position(2,2,1,golden=True,sortcol=True),3,pp=True))\n ['0.125' '0.409' '0.338' '0.209']\n >>> print(astr(position(2,2,1,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.000' '0.500' '0.500' '0.500']\n >>> print(astr(position(2,2,2,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.500' '0.500' '0.500' '0.500']\n >>> print(astr(position(2,2,3,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.000' '0.000' '0.500' '0.500']\n >>> print(astr(position(2,2,4,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.),3,pp=True))\n ['0.500' '0.000' '0.500' '0.500']\n >>> print(astr(position(2,2,1,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.000' '0.309' '0.500' '0.309']\n >>> print(astr(position(2,2,2,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.500' '0.309' '0.500' '0.309']\n >>> print(astr(position(2,2,3,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.000' '0.000' '0.500' '0.309']\n >>> print(astr(position(2,2,4,top=1.,bottom=0.,left=0.,right=1.,hspace=0.,vspace=0.,golden=True),3,pp=True))\n ['0.500' '0.000' '0.500' '0.309']\n >>> figsize=[8,11]\n >>> print(astr(position(2,2,1,golden=True,sortcol=True,figsize=figsize),3,pp=True))\n ['0.125' '0.324' '0.338' '0.152']\n >>> print(astr(position(2,2,1,figsize=figsize,left=0.1),3,pp=True))\n ['0.100' '0.427' '0.350' '0.255']\n >>> print(astr(position(2,2,1,figsize=figsize,left=0.1,golden=True),3,pp=True))\n ['0.100' '0.330' '0.350' '0.157']\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT License.\n\n Copyright (c) 2009-2016 Matthias Cuntz - mc (at) macu (dot) de\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, MC, Aug 2009\n Modified, MC, Feb 2013 - ported to Python 3\n MC, Jul 2013 - vspace, wspace obsolete\n MC, Apr 2014 - assert\n ST, Feb 2016 - added height and width\n \"\"\"\n #\n # Check\n nplots = row*col\n assert num <= nplots, 'num > number of plots: '+str(num)+' > '+str(nplots)\n assert right-left > 0., 'right > left: '+str(right)+' > '+str(left)\n assert top-bottom > 0., 'top < bottom: '+str(top)+' < '+str(bottom)\n if vspace != None:\n ivspace = vspace\n elif wspace != None:\n ivspace = wspace\n else:\n ivspace = 0.1\n #\n # Scaling to figsize\n scalex = figsize[1]/float(max(figsize))\n scaley = figsize[0]/float(max(figsize))\n #\n # width, height\n if width is None:\n dx = (right-left-(col-1)*hspace)/col\n else:\n dx = width\n if height is None:\n dy = (top-bottom-(row-1)*ivspace)/row\n else:\n dy = height\n #\n # golden ratio\n ratio = (1.+np.sqrt(5.))/2.\n if golden:\n width = dx\n height = dx / ratio\n checkheight = (top-bottom-row*height) - (row-1)*ivspace\n if checkheight < 0.:\n height = dy\n width = dy * ratio\n checkwidth = (right-left-col*width) - (col-1)*hspace\n if checkwidth < 0.:\n raise ValueError('golden ratio does not work. Have to recode.')\n else:\n if inversegolden:\n height = dy\n width = dy / ratio\n checkwidth = (right-left-col*width) - (col-1)*hspace\n if checkwidth < 0.:\n width = dx\n height = dx * ratio\n checkheight = (top-bottom-row*height) - (row-1)*ivspace\n if checkheight < 0.:\n raise ValueError('inverse golden ratio does not work. Have to recode.')\n else:\n width = dx\n height = dy\n #\n # order row/colmn, column/row\n if sortcol:\n irow = (num-1) % row\n icol = (num-1) // row\n else:\n irow = (num-1) // col\n icol = (num-1) % col\n #\n # position\n pos = np.empty(4)\n pos[0] = left + icol*(width+hspace) *scalex\n pos[1] = bottom + (row-1-irow)*(height+ivspace) *scaley\n pos[2] = width *scalex\n pos[3] = height *scaley\n #\n return pos\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)\n\n", "#!/usr/bin/env python\nfrom __future__ import print_function\n\n# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca\n#\n# License\n# This file is part of the EEE code library for \"Computationally inexpensive identification\n# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)\".\n#\n# The EEE code library is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# The MVA code library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with The EEE code library.\n# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.\n#\n# If you use this method in a publication please cite:\n#\n# M Cuntz & J Mai et al. (2015).\n# Computationally inexpensive identification of noninformative model parameters by sequential screening.\n# Water Resources Research, 51, 6417-6441.\n# https://doi.org/10.1002/2015WR016907.\n#\n# An example calling sequence to derive model outputs for previously sampled parameter sets stored\n# in an ASCII file (option -i) where some lines might be skipped (option -s). The final model outputs\n# are stored in a pickle file (option -o). The model outputs are stored as dictionaries. Multiple\n# model outputs are possible.\n#\n# python 2_run_model_raven-hmets.py \\\n# -i parameter_sets_1_scaled_para21_M.dat \\\n# -s XXX\n# -o model_output.pkl\n\n\"\"\"\nRuns a model for a bunch of parameter sets and stores model outputs in a pickle file.\n\nHistory\n-------\nWritten, JM, Mar 2019\n\"\"\"\n\n# -------------------------------------------------------------------------\n# Command line arguments - if script\n#\n\n# Comment|Uncomment - Begin\n#if __name__ == '__main__':\n\n# -----------------------\n# add subolder scripts/lib to search path\n# -----------------------\nimport sys\nimport os\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.abspath(dir_path+'/lib'))\nsys.path.append(os.path.abspath(dir_path+'/../examples/raven-hmets/model'))\n\nimport argparse\nimport numpy as np\nimport scipy.stats as stats\nimport copy\nimport pickle\nfrom pathlib2 import Path\nimport subprocess\nimport shutil\n\nfrom raven_templates import RVI, RVT, RVP, RVH, RVC # in examples/raven-hmets/model/\nfrom raven_common import writeString, makeDirectories # in examples/raven-hmets/model/\nfrom fread import fread # in lib/\n\ninfile = 'example_raven-hmets/parameter_sets_1_scaled_para15_M.dat' # name of file containing sampled parameter sets to run the model\noutfile = 'example_raven-hmets/model_output.pkl' # name of file used to save (scalar) model outputs\nskip = None # number of lines to skip in input file\n\nparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''An example calling sequence to derive model outputs for previously sampled parameter sets stored in an ASCII file (option -i) where some lines might be skipped (option -s). The final model outputs are stored in a pickle file (option -o). The model outputs are stored as dictionaries. Multiple model outputs are possible..''')\nparser.add_argument('-i', '--infile', action='store',\n default=infile, dest='infile', metavar='infile',\n help=\"Name of file containing sampled SCALED parameter sets to run the model (default: 'parameter_sets.out').\")\nparser.add_argument('-s', '--skip', action='store',\n default=skip, dest='skip', metavar='skip',\n help=\"Number of lines to skip in input file (default: None).\")\nparser.add_argument('-o', '--outfile', action='store',\n default=outfile, dest='outfile', metavar='outfile',\n help=\"Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').\")\n\nargs = parser.parse_args()\ninfile = args.infile\noutfile = args.outfile\nskip = args.skip\n\ndel parser, args\n\ndef model_function(paras, run_id=None):\n # input:\n # paras ... list of model parameters scaled to their range;\n # values for all N model parameters have to be provided\n # example:\n # [ x1, x2, x3, x4, .... ]\n # run_id ... optional name of this run (to, e.g., print or store in a file)\n # example:\n # run_aset_001\n # output:\n # model output in dictionary\n # example:\n # model['out'] = 7.4\n\n if not(run_id is None):\n print(\"Run ID: \",run_id)\n\n # ---------------\n # derive some parameters\n # ---------------\n dict_dparas = {}\n\n dict_dparas['sum_x05_x06'] = paras[4]+paras[5] # MAX_MELT_FACTOR > MIN_MELT_FACTOR\n dict_dparas['sum_x09_x10'] = paras[8]+paras[9] # SNOW_SWI_MAX > SNOW_SWI_MIN\n dict_dparas['half_x20'] = paras[19] * 0.5 * 1000 # half the value but in [mm] not [m]\n dict_dparas['half_x21'] = paras[20] * 0.5 * 1000 # half the value but in [mm] not [m]\n\n # ---------------\n # paste all paras into template files\n # ---------------\n # ex.: string = \"parameter x01 = {par[x01]} and another parameter x02 = {par[x02]}\"\n # keys = ['x01','x02']\n # vals = [1.0,3.0]\n # string.format(par=dict(zip(keys,vals)))\n #\n # --> 'parameter x01 = 1.0 and another parameter x02 = 3.0'\n #\n # to replace patterns: {par[x01]} by parameter value paras[0]\n # {par[x02]} by parameter value paras[1]\n # ...\n if len(paras) > 9 and len(paras) < 100:\n keys_paras = [\"x{:02d}\".format(ii) for ii in range(1,len(paras)+1) ]\n elif len(paras) > 99 and len(paras) < 1000:\n keys_paras = [\"x{:03d}\".format(ii) for ii in range(1,len(paras)+1) ]\n elif len(paras) <= 9:\n keys_paras = [\"x{:01d}\".format(ii) for ii in range(1,len(paras)+1) ]\n else:\n raise ValueError(\"More than 999 parameters are not implemented yet!\")\n vals_paras = paras\n dict_paras = dict(zip(keys_paras,vals_paras))\n\n # fill in to templates\n # templates need to have patterns:\n # {par[x01]}, {par[x02]}, ... for parameters\n # {dpar[something]}, {dpar[somethingelse]}, ... for derived parameters\n\n # ---------------\n # create a run folder\n # ---------------\n tmp_folder = \"/tmp/eee-analysis/\"+str(run_id) # \"/tmp/juletest\" # TODO a generic folder name in /tmp\n raven_exe_name = os.path.abspath(dir_path+\"/../\"+\"examples/raven-hmets/model/Raven.exe\")\n raven_obs_folder = os.path.abspath(dir_path+\"/../\"+\"examples/raven-hmets/model/data_obs\")\n\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n\n # all RAVEN setup files\n writeString( Path(tmp_folder,\"raven_hmets.rvi\"), RVI.format(par=dict_paras,dpar=dict_dparas) )\n writeString( Path(tmp_folder,\"raven_hmets.rvp\"), RVP.format(par=dict_paras,dpar=dict_dparas) )\n writeString( Path(tmp_folder,\"raven_hmets.rvh\"), RVH.format(par=dict_paras,dpar=dict_dparas) )\n writeString( Path(tmp_folder,\"raven_hmets.rvt\"), RVT.format(par=dict_paras,dpar=dict_dparas) )\n writeString( Path(tmp_folder,\"raven_hmets.rvc\"), RVC.format(par=dict_paras,dpar=dict_dparas) )\n\n # link executable\n if not(os.path.exists(str(Path(tmp_folder,os.path.basename(raven_exe_name))))):\n print(\"from: \",os.path.realpath(raven_exe_name))\n print(\"to: \",str(Path(tmp_folder,os.path.basename(raven_exe_name))))\n os.symlink(os.path.realpath(raven_exe_name), str(Path(tmp_folder,os.path.basename(raven_exe_name))))\n\n # link observations folder\n if not(os.path.exists(str(Path(tmp_folder,os.path.basename(raven_obs_folder))))):\n os.symlink(os.path.realpath(raven_obs_folder), str(Path(tmp_folder,os.path.basename(raven_obs_folder))))\n\n # create ouput folder\n out_folder = str(Path(tmp_folder,\"output\"))\n os.makedirs(out_folder)\n\n # ---------------\n # run the model with these input rv* files\n # ---------------\n cmd = [str(Path(tmp_folder,os.path.basename(raven_exe_name))),str(Path(tmp_folder,\"raven_hmets\")),\"-o\",str(Path(tmp_folder,\"output\"))+'/']\n print(\"run cmd: \",' '.join(cmd))\n\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\n print(\"\")\n print(\"Raven standard output:\")\n for line in process.stdout:\n print(\">>> \",line.rstrip()) # rstrip removes trailing \\n\n\n if not(os.path.exists(str(Path(tmp_folder,\"output\",\"Diagnostics.csv\")))):\n print(\"\")\n print(\"ERROR: No Diagnostics.csv produced\")\n print(\"\")\n print(\"Raven error file content:\")\n ff = open(str(Path(tmp_folder,\"output\",\"Raven_errors.txt\")), \"r\")\n lines = ff.readlines()\n ff.close()\n for line in lines:\n print(\">>> \",line.rstrip()) # rstrip removes trailing \\n\n\n raise ValueError(\"ERROR: No Diagnostics.csv produced (scroll up to see content of error file)\")\n\n model = {}\n\n # ---------------\n # extract model output: Diagnostics: NSE\n # ---------------\n model['nse'] = 0.0\n ff = open(str(Path(tmp_folder,\"output\",\"Diagnostics.csv\")), \"r\")\n lines = ff.readlines()\n ff.close()\n\n nse = np.float(lines[-1].strip().split(',')[2])\n print(\"NSE: \",nse)\n model['nse'] = nse\n print(\"\")\n\n # ---------------\n # extract model output: Hydrographs: simulated Q\n # ---------------\n model['Q'] = 0.0\n warmup = 2*365 # 1 # model timestep 1 day and want to skip 2 years # first day 1991-01-01 00:00:00.00 (checked)\n model['Q'] = np.transpose(fread(str(Path(tmp_folder,\"output\",\"Hydrographs.csv\")),skip=warmup+1,cskip=4,nc=1))[0]\n\n print(\"Q: \",model['Q'][0:4],\"...\",model['Q'][-4:])\n print(\"Q_range: [\",np.min(model['Q']),\",\",np.max(model['Q']),\"]\")\n print(\"shape Q: \",np.shape(model['Q']))\n print(\"\")\n\n # ---------------\n # extract model output: BETWEEN_PONDED_WATER_AND_SOIL[0]_Daily_Average_BySubbasin.csv: accumulated infiltration volume\n # ---------------\n model['infiltration'] = 0.0\n warmup = 2*365 # 1 # model timestep 1 day and want to skip 2 years # first day 1990-12-31 00:00:00.00 (checked) But all timesteps are shifted by 1 day...\n #\n # de-accumulated infiltration volume\n model['infiltration'] = np.transpose(fread(str(Path(tmp_folder,\"output\",\"BETWEEN_PONDED_WATER_AND_SOIL[0]_Daily_Average_BySubbasin.csv\")),skip=warmup,cskip=2,nc=1))[0]\n model['infiltration'] = np.diff(model['infiltration'])\n\n print(\"Infiltration I: \",model['infiltration'][0:4],\"...\",model['infiltration'][-4:])\n print(\"I_range: [\",np.min(model['infiltration']),\",\",np.max(model['infiltration']),\"]\")\n print(\"shape I: \",np.shape(model['infiltration']))\n print(\"\")\n\n # ---------------\n # cleanup\n # ---------------\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n\n return model\n\n# read parameter sets\nff = open(infile, \"r\")\nparasets = ff.readlines()\nff.close()\n\nif skip is None:\n skip = np.int(parasets[0].strip().split(':')[1])\nelse:\n skip = np.int(skip)\nparasets = parasets[skip:]\n\nmodel_output = {}\n\n# this loop could be easily parallized and modified such that it\n# actually submits multiple tasks to a HPC\nfor iparaset,paraset in enumerate(parasets):\n\n paraset = list(map(float,paraset.strip().split()))\n model = model_function(paraset,run_id='run_set_'+str(iparaset))\n\n if iparaset == 0:\n for ikey in model.keys():\n model_output[ikey] = []\n\n\n for ikey in model.keys():\n\n model_output[ikey].append(model[ikey])\n\n\npickle.dump( model_output, open( outfile, \"wb\" ) )\n\nprint(\"wrote: '\"+outfile+\"'\")\n" ]
[ [ "numpy.sqrt", "numpy.empty" ], [ "numpy.min", "numpy.int", "numpy.max", "numpy.shape", "numpy.diff" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ConeyLiu/oap-raydp
[ "3fe728f01dbb6494d94c4abd65bc9aacff771080" ]
[ "python/raydp/spark/torch/dataset.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections.abc import Iterable\nfrom typing import Any, List, Optional\n\nimport numpy as np\nimport pandas\nimport torch\nfrom torch.utils.data import Dataset, DistributedSampler\n\nfrom raydp.spark.context import save_to_ray\nfrom raydp.spark.resource_manager.exchanger import SharedDataset\nfrom raydp.spark.utils import BLOCK_SIZE_BIT, divide_blocks\n\n\nclass _Dataset(Dataset):\n def __init__(self,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param feature_columns: the feature columns in df\n :param feature_shapes: the each feature shape that need to return when loading this\n dataset. If it is not None, it's size must match the size of feature_columns.\n If it is None, we guess all are scalar value and return all as a tensor when\n loading this dataset.\n :param feature_types: the feature types. All will be casted into torch.float by default\n :param label_column: the label column in df\n :param label_type: the label type. It will be casted into torch.float by default.\n \"\"\"\n super(_Dataset, self).__init__()\n self._feature_columns = feature_columns\n self._feature_shapes = feature_shapes\n self._feature_types = feature_types\n self._label_column = label_column\n self._label_type = label_type\n\n self._feature_tensor = None\n self._label_tensor = None\n\n def _check_and_convert(self):\n # convert to list for convenience\n if not isinstance(self._feature_columns, List):\n self._feature_columns = [self._feature_columns]\n\n if self._feature_shapes:\n if not isinstance(self._feature_shapes, list):\n self._feature_shapes = [self._feature_shapes]\n\n assert len(self._feature_columns) == len(self._feature_shapes), \\\n \"The feature_shapes size must match the feature_columns\"\n for i in range(len(self._feature_shapes)):\n if not isinstance(self._feature_shapes[i], Iterable):\n self._feature_shapes[i] = [self._feature_shapes[i]]\n\n if self._feature_types:\n if not isinstance(self._feature_types, list):\n self._feature_types = [self._feature_types]\n\n assert len(self._feature_columns) == len(self._feature_types), \\\n \"The feature_types size must match the feature_columns\"\n for i in range(len(self._feature_types)):\n assert all(isinstance(dtype, torch.dtype) for dtype in self._feature_types), \\\n \"All value in feature_types should be torch.dtype instance\"\n\n if not self._feature_shapes and self._feature_types:\n assert all(dtype == self._feature_types[0] for dtype in self._feature_types), \\\n \"All dtypes should be same when feature_shapes doesn't provide\"\n\n if not self._feature_types:\n self._feature_types = [torch.float] * len(self._feature_columns)\n\n if not self._label_type:\n self._label_type = torch.float\n\n def _convert_to_tensor(self, df):\n if self._feature_shapes:\n tensors = []\n for col, shape, dtype in zip(self._feature_columns, self._feature_shapes,\n self._feature_types):\n column = df[col].values\n if column.dtype == np.object:\n if isinstance(column[0], np.ndarray):\n column = np.stack(column)\n elif isinstance(column[0], (list, tuple)):\n column = list(column)\n else:\n raise Exception(\n f\"Column {col}'s type: {type(column[0])} is not supported. It must \"\n \"be numpy built in type or numpy object of (ndarray, list, tuple)\")\n\n t = torch.as_tensor(column, dtype=dtype)\n if shape != [0]:\n t = t.view(*(-1, *shape))\n tensors.append(t)\n self._feature_tensor = tensors\n else:\n feature_columns = (self._feature_columns if\n len(self._feature_columns) > 1 else self._feature_columns[0])\n feature_df = df[feature_columns].values\n t = torch.as_tensor(feature_df, dtype=self._feature_types[0])\n self._feature_tensor = [t]\n\n label_df = df[self._label_column].values\n self._label_tensor = torch.as_tensor(label_df, dtype=self._label_type)\n\n def _get_next(self, index):\n label = self._label_tensor[index]\n features = [tensor[index] for tensor in self._feature_tensor]\n return (*features, label)\n\n\nclass RayDataset(_Dataset):\n \"\"\"\n Store Spark DataFrame or koalas.DataFrame into ray object store and wrap into a torch\n Dataset which could be used by torch DataLoader.\n \"\"\"\n def __init__(self,\n df: Any = None,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param df: Spark DataFrame or Koalas.DataFrame\n \"\"\"\n super(RayDataset, self).__init__(feature_columns, feature_shapes,\n feature_types, label_column, label_type)\n self._unresolved_shared_dataset: SharedDataset = None\n self._resolved_shared_dataset: SharedDataset = None\n self._previous_block_index = -1\n\n self._check_and_convert()\n\n if df is not None:\n self._unresolved_shared_dataset = save_to_ray(df)\n\n def _resolve_with_indices(self,\n indices: List[int],\n plasma_store_socket_name: Optional[str]):\n resolved_shared_dataset = self._unresolved_shared_dataset.subset(indices)\n resolved_shared_dataset.set_plasma_store_socket_name(plasma_store_socket_name)\n resolved_shared_dataset.resolve()\n self._resolved_shared_dataset = resolved_shared_dataset\n\n def __getitem__(self, index):\n block_index = index >> BLOCK_SIZE_BIT\n block_inner_index = (block_index << BLOCK_SIZE_BIT) ^ index\n if block_index != self._previous_block_index:\n self._previous_block_index = block_index\n df = self._resolved_shared_dataset[block_index]\n self._convert_to_tensor(df)\n return self._get_next(block_inner_index)\n\n def __len__(self):\n \"\"\"Get the total size\"\"\"\n return self._unresolved_shared_dataset.total_size()\n\n def block_sizes(self) -> List[int]:\n \"\"\"Get the block sizes\"\"\"\n return self._unresolved_shared_dataset.partition_sizes()\n\n @classmethod\n def _custom_deserialize(cls,\n data_set: SharedDataset,\n feature_columns: List[str],\n feature_shapes: List[Any],\n feature_types: List[torch.dtype],\n label_column: str,\n label_type: torch.dtype):\n instance = cls(\n None, feature_columns, feature_shapes, feature_types, label_column, label_type)\n instance._unresolved_shared_dataset = data_set\n return instance\n\n def __reduce__(self):\n return (RayDataset._custom_deserialize,\n (self._unresolved_shared_dataset, self._feature_columns, self._feature_shapes,\n self._feature_types, self._label_column, self._label_type))\n\n\nclass BlockSetSampler(DistributedSampler):\n \"\"\"\n A distributed sampler for BlockSet.\n\n We will shuffle the blocks order and then shuffle the block inner if shuffle is set to True.\n \"\"\"\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, init_lazy=True):\n assert isinstance(dataset, RayDataset)\n self._args = (dataset, num_replicas, rank, shuffle)\n self._inited = False\n\n self._block_indices = None\n self._selected_indices = None\n\n if not init_lazy:\n self._init_lazy()\n\n def _init_lazy(self):\n \"\"\"\n This is a workaround because of ray sgd call initialize the data creator before of\n setup distributed components.\n \"\"\"\n if not self._inited:\n super(BlockSetSampler, self).__init__(*self._args)\n self._split_blocks()\n self._inited = True\n\n def _split_blocks(self):\n block_indexes, packed_selected_indexes = divide_blocks(\n self.dataset.block_sizes(), self.num_replicas, self.rank, self.shuffle)\n self._block_indices = block_indexes\n self._selected_indices = packed_selected_indexes\n\n def resolve(self, plasma_store_socket_name: Optional[str] = None):\n \"\"\"Manually trigger the underlying object transfer.\"\"\"\n self._init_lazy()\n self.dataset._resolve_with_indices(self._block_indices,\n plasma_store_socket_name)\n\n @property\n def block_indices(self):\n return self._block_indices\n\n def __iter__(self):\n self.resolve()\n # deterministically shuffle based on epoch\n np.random.seed(self.epoch)\n block_indices = list(range(len(self._block_indices)))\n if self.shuffle:\n np.random.shuffle(block_indices)\n\n indices = []\n for index in block_indices:\n tmp = self._selected_indices[index]\n tmp = np.copy(tmp)\n if self.shuffle:\n np.random.shuffle(tmp)\n indices += tmp.tolist()\n\n return iter(indices)\n\n def __len__(self):\n # if we use `if sampler` to determine whether the sampler is None,\n # it will call this method. This can be happened when the BlockSetSampler\n # used in the evaluation in ray TorchTrainer.\n self._init_lazy()\n return self.num_samples\n\n\nclass PandasDataset(_Dataset):\n \"\"\"\n A pandas dataset which support feature columns with different shapes.\n \"\"\"\n def __init__(self,\n df: pandas.DataFrame = None,\n feature_columns: List[str] = None,\n feature_shapes: Optional[List[Any]] = None,\n feature_types: Optional[List[torch.dtype]] = None,\n label_column: str = None,\n label_type: Optional[torch.dtype] = None):\n \"\"\"\n :param df: pandas DataFrame\n \"\"\"\n super(PandasDataset, self).__init__(feature_columns, feature_shapes,\n feature_types, label_column, label_type)\n self._check_and_convert()\n\n self._size = len(df)\n self._convert_to_tensor(df)\n\n def __getitem__(self, index):\n return self._get_next(index)\n\n def __len__(self):\n return self._size\n" ]
[ [ "numpy.random.seed", "numpy.random.shuffle", "numpy.stack", "numpy.copy", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
2anchao/NTS_NET
[ "3b0a58616cb4b44699a11541eac2777556169812" ]
[ "core/model.py" ]
[ "from torch import nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom core import resnet\r\nimport numpy as np\r\nfrom core.anchors import generate_default_anchor_maps, hard_nms\r\nfrom config import Config as cfg\r\n\r\n\r\nclass ProposalNet(nn.Module):\r\n \"\"\"\r\n Navigator Network\r\n \"\"\"\r\n def __init__(self, in_channel=2048, inner_channel=128, out_channels=[6, 6, 9]):\r\n super(ProposalNet, self).__init__()\r\n self.down1 = nn.Conv2d(in_channel, inner_channel, 3, 1, 1)\r\n self.down2 = nn.Conv2d(inner_channel, inner_channel, 3, 2, 1)\r\n self.down3 = nn.Conv2d(inner_channel, inner_channel, 3, 2, 1)\r\n self.ReLU = nn.ReLU()\r\n\r\n self.tidy1 = nn.Conv2d(inner_channel, out_channels[0], 1, 1, 0) #32倍, 6 Anchor Box\r\n self.tidy2 = nn.Conv2d(inner_channel, out_channels[1], 1, 1, 0) #64倍, 6 Anchor Box\r\n self.tidy3 = nn.Conv2d(inner_channel, out_channels[2], 1, 1, 0) ##128倍, 9 Anchor Box\r\n\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n d1 = self.ReLU(self.down1(x)) # 32倍下采样, 14x14\r\n d2 = self.ReLU(self.down2(d1)) # 64倍下采样, 7x7\r\n d3 = self.ReLU(self.down3(d2)) # 128倍下采样, 4x4\r\n\r\n t1 = self.tidy1(d1).view(batch_size, -1)\r\n t2 = self.tidy2(d2).view(batch_size, -1)\r\n t3 = self.tidy3(d3).view(batch_size, -1)\r\n #一个像素点对应一个Anchor Box的得分\r\n return torch.cat((t1, t2, t3), dim=1)\r\n\r\n\r\nclass attention_net(nn.Module):\r\n def __init__(self, topN=4, num_class=196, fc_channel=2048, pad_side=224):\r\n super(attention_net, self).__init__()\r\n self.pretrained_model = resnet.resnet50(pretrained=True)\r\n self.pretrained_model.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.pretrained_model.fc = nn.Linear(fc_channel, num_class)\r\n self.proposal_net = ProposalNet()\r\n self.topN = topN\r\n #concat_net就是Scrutinizer Network\r\n self.concat_net = nn.Linear(fc_channel * (cfg.CAT_NUM + 1), num_class)\r\n #partcls_net就是Teacher Network\r\n self.partcls_net = nn.Linear(fc_channel, num_class)\r\n _, edge_anchors, _ = generate_default_anchor_maps()\r\n self.pad_side = pad_side\r\n #有padding操作,因为要抠图,所以anchor box坐标要更新\r\n self.edge_anchors = (edge_anchors + pad_side).astype(np.int)\r\n\r\n def forward(self, x):\r\n resnet_out, rpn_feature, feature = self.pretrained_model(x)\r\n x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0)\r\n batch = x.size(0)\r\n # we will reshape rpn to shape: batch * nb_anchor\r\n rpn_score = self.proposal_net(rpn_feature.detach())\r\n all_cdds = [\r\n np.concatenate((x.reshape(-1, 1), self.edge_anchors.copy(), np.arange(0, len(x)).reshape(-1, 1)), axis=1)\r\n for x in rpn_score.data.cpu().numpy()]\r\n top_n_cdds = [hard_nms(x, topn=self.topN, iou_thresh=0.25) for x in all_cdds]\r\n top_n_cdds = np.array(top_n_cdds)\r\n top_n_index = top_n_cdds[:, :, -1].astype(np.int)\r\n top_n_index = torch.from_numpy(top_n_index).cuda()\r\n top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index)\r\n part_imgs = torch.zeros([batch, self.topN, 3, 224, 224]).cuda()\r\n for i in range(batch):\r\n for j in range(self.topN):\r\n [y0, x0, y1, x1] = top_n_cdds[i][j, 1:5].astype(np.int)\r\n part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode='bilinear',\r\n align_corners=True)\r\n part_imgs = part_imgs.view(batch * self.topN, 3, 224, 224)\r\n _, _, part_features = self.pretrained_model(part_imgs.detach())\r\n part_feature = part_features.view(batch, self.topN, -1)\r\n part_feature = part_feature[:, :cfg.CAT_NUM, ...].contiguous()\r\n part_feature = part_feature.view(batch, -1)\r\n # concat_logits have the shape: B*num_class\r\n concat_out = torch.cat([part_feature, feature], dim=1)\r\n concat_logits = self.concat_net(concat_out)\r\n raw_logits = resnet_out\r\n # part_logits have the shape: B*N*num_class\r\n part_logits = self.partcls_net(part_features).view(batch, self.topN, -1)\r\n return [raw_logits, concat_logits, part_logits, top_n_index, top_n_prob]\r\n\r\n\r\ndef list_loss(logits, targets):\r\n temp = F.log_softmax(logits, -1)\r\n loss = [-temp[i][targets[i].item()] for i in range(logits.size(0))]\r\n #置信度越靠近1,Loss越靠近0\r\n return torch.stack(loss)\r\n\r\n\r\ndef ranking_loss(score, targets, proposal_num=cfg.PROPOSAL_NUM):\r\n loss = Variable(torch.zeros(1).cuda())\r\n batch_size = score.size(0)\r\n for i in range(proposal_num):\r\n targets_p = (targets > targets[:, i].unsqueeze(1)).type(torch.cuda.FloatTensor)\r\n pivot = score[:, i].unsqueeze(1)\r\n loss_p = (1 - pivot + score) * targets_p\r\n loss_p = torch.sum(F.relu(loss_p))\r\n loss += loss_p\r\n return loss / batch_size\r\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.cat", "torch.zeros", "torch.nn.Conv2d", "torch.gather", "torch.from_numpy", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.interpolate", "torch.stack", "torch.nn.ReLU", "numpy.array", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ProskuraPD/catboost
[ "d4593d4fbc8b8da66ff2d8b838578eba819d9d0d" ]
[ "catboost/python-package/catboost/core.py" ]
[ "import sys\nfrom copy import deepcopy\nfrom six import iteritems, string_types, integer_types\nimport os\nimport imp\nfrom collections import Iterable, Sequence, Mapping, MutableMapping\nimport warnings\nimport numpy as np\nimport ctypes\nimport platform\nimport tempfile\nfrom enum import Enum\nfrom operator import itemgetter\n\nif platform.system() == 'Linux':\n try:\n ctypes.CDLL('librt.so')\n except Exception:\n pass\n\ntry:\n from pandas import DataFrame, Series\nexcept ImportError:\n class DataFrame(object):\n pass\n\n class Series(object):\n pass\n\n\ndef get_so_paths(dir_name):\n dir_name = os.path.join(os.path.dirname(__file__), dir_name)\n list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else []\n return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']]\n\n\ndef get_catboost_bin_module():\n if '_catboost' in sys.modules:\n return sys.modules['_catboost']\n so_paths = get_so_paths('./')\n for so_path in so_paths:\n try:\n loaded_catboost = imp.load_dynamic('_catboost', so_path)\n sys.modules['catboost._catboost'] = loaded_catboost\n return loaded_catboost\n except ImportError:\n pass\n import _catboost\n return _catboost\n\n\n_catboost = get_catboost_bin_module()\n_PoolBase = _catboost._PoolBase\n_CatBoost = _catboost._CatBoost\n_MetricCalcerBase = _catboost._MetricCalcerBase\n_cv = _catboost._cv\n_set_logger = _catboost._set_logger\n_reset_logger = _catboost._reset_logger\n_configure_malloc = _catboost._configure_malloc\nCatboostError = _catboost.CatboostError\n_metric_description_or_str_to_str = _catboost._metric_description_or_str_to_str\ncompute_wx_test = _catboost.compute_wx_test\nis_classification_objective = _catboost.is_classification_objective\nis_regression_objective = _catboost.is_regression_objective\n_PreprocessParams = _catboost._PreprocessParams\n_check_train_params = _catboost._check_train_params\n_MetadataHashProxy = _catboost._MetadataHashProxy\n_NumpyAwareEncoder = _catboost._NumpyAwareEncoder\nFeaturesData = _catboost.FeaturesData\n\n\nfrom contextlib import contextmanager # noqa E402\n\n\n_configure_malloc()\n_catboost._library_init()\n\nINTEGER_TYPES = (integer_types, np.integer)\nFLOAT_TYPES = (float, np.floating)\nSTRING_TYPES = (string_types,)\nARRAY_TYPES = (list, np.ndarray, DataFrame, Series)\n\n\n@contextmanager\ndef log_fixup():\n _set_logger(sys.stdout, sys.stderr)\n yield\n _reset_logger()\n\n\ndef _cast_to_base_types(value):\n # NOTE: Special case, avoiding new list creation.\n if isinstance(value, list):\n for index, element in enumerate(value):\n value[index] = _cast_to_base_types(element)\n return value\n if isinstance(value, ARRAY_TYPES[1:]):\n new_value = []\n for element in value:\n new_value.append(_cast_to_base_types(element))\n return new_value\n if isinstance(value, (Mapping, MutableMapping)):\n for key in list(value):\n value[key] = _cast_to_base_types(value[key])\n return value\n if isinstance(value, bool):\n return value\n if isinstance(value, INTEGER_TYPES):\n return int(value)\n if isinstance(value, FLOAT_TYPES):\n return float(value)\n return value\n\n\ndef metric_description_or_str_to_str(description):\n return _metric_description_or_str_to_str(description)\n\n\ndef _check_param_type(value, name, types, or_none=True):\n if not isinstance(value, types + ((type(None),) if or_none else ())):\n raise CatboostError('Parameter {} should have a type of {}, got {}'.format(name, [t.__class__.__name__ for t in types], type(value).__class__.__name__))\n\n\ndef _process_verbose(metric_period=None, verbose=None, logging_level=None, verbose_eval=None, silent=None):\n _check_param_type(metric_period, 'metric_period', (int,))\n _check_param_type(verbose, 'verbose', (bool, int))\n _check_param_type(logging_level, 'logging_level', (str,))\n _check_param_type(verbose_eval, 'verbose_eval', (bool, int))\n _check_param_type(silent, 'silent', (bool,))\n\n params = locals()\n exclusive_params = ['verbose', 'logging_level', 'verbose_eval', 'silent']\n at_most_one = sum([params[exclusive] is not None for exclusive in exclusive_params])\n if at_most_one > 1:\n raise CatboostError('Only one of parameters {} should be set'.format(exclusive_params.keys()))\n\n if verbose is None:\n if silent is not None:\n verbose = not silent\n elif verbose_eval is not None:\n verbose = verbose_eval\n if verbose is not None:\n logging_level = 'Verbose' if verbose else 'Silent'\n verbose = int(verbose)\n\n if isinstance(metric_period, int):\n if metric_period <= 0:\n raise CatboostError('metric_period should be positive.')\n if verbose is not None:\n if verbose % metric_period != 0:\n raise CatboostError('verbose should be a multiple of metric_period')\n\n return (metric_period, verbose, logging_level)\n\n\ndef enum_from_enum_or_str(enum_type, arg):\n if isinstance(arg, enum_type):\n return arg\n elif isinstance(arg, str):\n return enum_type[arg]\n else:\n raise Exception(\"can't create enum \" + str(enum_type) + \" from type \" + str(type(arg)))\n\n\nclass EFstrType(Enum):\n \"\"\"Calculate score for every feature.\"\"\"\n FeatureImportance = 0\n \"\"\"Calculate pairwise score between every feature.\"\"\"\n Interaction = 1\n \"\"\"Calculate SHAP Values for every object.\"\"\"\n ShapValues = 2\n\n\nclass Pool(_PoolBase):\n \"\"\"\n Pool used in CatBoost as data structure to train model from.\n \"\"\"\n\n def __init__(self, data, label=None, cat_features=None, column_description=None, pairs=None, delimiter='\\t',\n has_header=False, weight=None, group_id=None, group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None,\n feature_names=None, thread_count=-1):\n \"\"\"\n Pool is a internal data structure that used by CatBoost.\n You can construct Pool from list, numpy.array, pandas.DataFrame, pandas.Series.\n\n Parameters\n ----------\n data : list or numpy.array or pandas.DataFrame or pandas.Series or FeaturesData or string\n Data source of Pool.\n If list or numpy.arrays or pandas.DataFrame or pandas.Series, giving 2 dimensional array like data.\n If FeaturesData - see FeaturesData description for details, 'cat_features' and 'feature_names'\n parameters must be equal to None in this case\n If string, giving the path to the file with data in catboost format.\n\n label : list or numpy.arrays or pandas.DataFrame or pandas.Series, optional (default=None)\n Label of the training data.\n If not None, giving 1 dimensional array like data with floats.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Must be None if 'data' parameter has FeatureData type\n\n column_description : string, optional (default=None)\n ColumnsDescription parameter.\n There are several columns description types: Label, Categ, Num, Auxiliary, DocId, Weight, Baseline, GroupId, Timestamp.\n All columns are Num as default, it's not necessary to specify\n this type of columns. Default Label column index is 0 (zero).\n If None, Label column is 0 (zero) as default, all data columns are Num as default.\n If string, giving the path to the file with ColumnsDescription in column_description format.\n\n pairs : list or numpy.array or pandas.DataFrame or string\n The pairs description.\n If list or numpy.arrays or pandas.DataFrame, giving 2 dimensional.\n The shape should be Nx2, where N is the pairs' count. The first element of pair is\n the index of winner object in training set. The second element of pair is\n the index of loser object in training set.\n If string, giving the path to the file with pairs description.\n\n delimiter : string, optional (default='\\t')\n Delimiter to use for separate features in file.\n Should be only one symbol, otherwise would be taken only the first character of the string.\n\n has_header : boolm optional (default=False)\n If True, read column names from first line.\n\n weight : list or numpy.array, optional (default=None)\n Weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n group_id : list or numpy.array, optional (default=None)\n group id for each instance.\n If not None, giving 1 dimensional array like data.\n\n group_weight : list or numpy.array, optional (default=None)\n Group weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n subgroup_id : list or numpy.array, optional (default=None)\n subgroup id for each instance.\n If not None, giving 1 dimensional array like data.\n\n pairs_weight : list or numpy.array, optional (default=None)\n Weight for each pair.\n If not None, giving 1 dimensional array like pairs.\n\n baseline : list or numpy.array, optional (default=None)\n Baseline for each instance.\n If not None, giving 2 dimensional array like data.\n\n feature_names : list, optional (default=None)\n Names for each given data_feature.\n Must be None if 'data' parameter has FeatureData type\n\n thread_count : int, optional (default=-1)\n Thread count to read data from file.\n Use only with reading data from file.\n If -1, then the number of threads is set to the number of cores.\n\n \"\"\"\n if data is not None:\n self._check_data_type(data, cat_features)\n self._check_data_empty(data)\n if pairs is not None and isinstance(data, STRING_TYPES) != isinstance(pairs, STRING_TYPES):\n raise CatboostError(\"data and pairs parameters should be the same types.\")\n if column_description is not None and not isinstance(data, STRING_TYPES):\n raise CatboostError(\"data should be the string type if column_description parameter is specified.\")\n if isinstance(data, STRING_TYPES):\n if any(v is not None for v in [cat_features, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names]):\n raise CatboostError(\"cat_features, weight, group_id, group_weight, subgroup_id, pairs_weight, \\\n baseline, feature_names should have the None type when the pool is read from the file.\")\n self._read(data, column_description, pairs, delimiter, has_header, thread_count)\n else:\n if isinstance(data, FeaturesData):\n if any(v is not None for v in [cat_features, feature_names]):\n raise CatboostError(\n \"cat_features, feature_names should have the None type when 'data' parameter \"\n \" has FeaturesData type\"\n )\n elif isinstance(data, np.ndarray):\n if (data.dtype == np.float32) and (cat_features is not None) and (len(cat_features) > 0):\n raise CatboostError(\n \"'data' is numpy array of np.float32, it means no categorical features,\"\n \" but 'cat_features' parameter specifies nonzero number of categorical features\"\n )\n\n self._init(data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)\n super(Pool, self).__init__()\n\n def _check_files(self, data, column_description, pairs):\n \"\"\"\n Check files existence.\n \"\"\"\n if not os.path.isfile(data):\n raise CatboostError(\"Invalid data path='{}': file does not exist.\".format(data))\n if column_description is not None and not os.path.isfile(column_description):\n raise CatboostError(\"Invalid column_description path='{}': file does not exist.\".format(column_description))\n if pairs is not None and not os.path.isfile(pairs):\n raise CatboostError(\"Invalid pairs path='{}': file does not exist.\".format(pairs))\n\n def _check_delimiter(self, delimiter):\n if not isinstance(delimiter, STRING_TYPES):\n raise CatboostError(\"Invalid delimiter type={} : must be str().\".format(type(delimiter)))\n if len(delimiter) < 1:\n raise CatboostError(\"Invalid delimiter length={} : must be > 0.\".format(len(delimiter)))\n\n def _check_column_description_type(self, column_description):\n \"\"\"\n Check type of column_description parameter.\n \"\"\"\n if not isinstance(column_description, STRING_TYPES):\n raise CatboostError(\"Invalid column_description type={}: must be str().\".format(type(column_description)))\n\n def _check_cf_type(self, cat_features):\n \"\"\"\n Check type of cat_feature parameter.\n \"\"\"\n if not isinstance(cat_features, (list, np.ndarray)):\n raise CatboostError(\"Invalid cat_features type={}: must be list() or np.ndarray().\".format(type(cat_features)))\n\n def _check_cf_value(self, cat_features, features_count):\n \"\"\"\n Check values in cat_feature parameter. Must be int indices.\n \"\"\"\n for indx, feature in enumerate(cat_features):\n if not isinstance(feature, INTEGER_TYPES):\n raise CatboostError(\"Invalid cat_features[{}] = {} value type={}: must be int().\".format(indx, feature, type(feature)))\n if feature >= features_count:\n raise CatboostError(\"Invalid cat_features[{}] = {} value: must be < {}.\".format(indx, feature, features_count))\n\n def _check_pairs_type(self, pairs):\n \"\"\"\n Check type of pairs parameter.\n \"\"\"\n if not isinstance(pairs, (list, np.ndarray, DataFrame)):\n raise CatboostError(\"Invalid pairs type={}: must be list(), np.ndarray() or pd.DataFrame.\".format(type(pairs)))\n\n def _check_pairs_value(self, pairs):\n \"\"\"\n Check values in pairs parameter. Must be int indices.\n \"\"\"\n for pair_id, pair in enumerate(pairs):\n if (len(pair) != 2):\n raise CatboostError(\"Length of pairs[{}] isn't equal to 2.\".format(pair_id))\n for i, index in enumerate(pair):\n if not isinstance(index, INTEGER_TYPES):\n raise CatboostError(\"Invalid pairs[{}][{}] = {} value type={}: must be int().\".format(pair_id, i, index, type(index)))\n\n def _check_data_type(self, data, cat_features):\n \"\"\"\n Check type of data.\n \"\"\"\n if not isinstance(data, (STRING_TYPES, ARRAY_TYPES, FeaturesData)):\n raise CatboostError(\"Invalid data type={}: data must be list(), np.ndarray(), DataFrame(), Series(), FeaturesData or filename str().\".format(type(data)))\n\n def _check_data_empty(self, data):\n \"\"\"\n Check data is not empty (0 objects is ok).\n note: already checked if data is FeatureType, so no need to check again\n \"\"\"\n\n if isinstance(data, STRING_TYPES):\n if not data:\n raise CatboostError(\"Features filename is empty.\")\n elif isinstance(data, ARRAY_TYPES):\n data_shape = np.shape(data)\n if len(data_shape) == 1 and data_shape[0] > 0:\n if isinstance(data[0], Iterable):\n data_shape = tuple(data_shape + tuple([len(data[0])]))\n else:\n data_shape = tuple(data_shape + tuple([1]))\n if not len(data_shape) == 2:\n raise CatboostError(\"Input data has invalid shape: {}. Must be 2 dimensional\".format(data_shape))\n if data_shape[1] == 0:\n raise CatboostError(\"Input data must have at least one feature\")\n\n def _check_label_type(self, label):\n \"\"\"\n Check type of label.\n \"\"\"\n if not isinstance(label, ARRAY_TYPES):\n raise CatboostError(\"Invalid label type={}: must be array like.\".format(type(label)))\n\n def _check_label_empty(self, label):\n \"\"\"\n Check label is not empty.\n \"\"\"\n if len(label) == 0:\n raise CatboostError(\"Labels variable is empty.\")\n\n def _check_label_shape(self, label, samples_count):\n \"\"\"\n Check label length and dimension.\n \"\"\"\n if len(label) != samples_count:\n raise CatboostError(\"Length of label={} and length of data={} is different.\".format(len(label), samples_count))\n if isinstance(label[0], Iterable) and not isinstance(label[0], STRING_TYPES):\n if len(label[0]) > 1:\n raise CatboostError(\"Input label cannot have multiple values per row.\")\n\n def _check_baseline_type(self, baseline):\n \"\"\"\n Check type of baseline parameter.\n \"\"\"\n if not isinstance(baseline, ARRAY_TYPES):\n raise CatboostError(\"Invalid baseline type={}: must be array like.\".format(type(baseline)))\n\n def _check_baseline_shape(self, baseline, samples_count):\n \"\"\"\n Check baseline length and dimension.\n \"\"\"\n if len(baseline) != samples_count:\n raise CatboostError(\"Length of baseline={} and length of data={} are different.\".format(len(baseline), samples_count))\n if not isinstance(baseline[0], Iterable) or isinstance(baseline[0], STRING_TYPES):\n raise CatboostError(\"Baseline must be 2 dimensional data, 1 column for each class.\")\n try:\n if np.array(baseline).dtype not in (np.dtype('float'), np.dtype('float32'), np.dtype('int')):\n raise CatboostError()\n except CatboostError:\n raise CatboostError(\"Invalid baseline value type={}: must be float or int.\".format(np.array(baseline).dtype))\n\n def _check_weight_type(self, weight):\n \"\"\"\n Check type of weight parameter.\n \"\"\"\n if not isinstance(weight, ARRAY_TYPES):\n raise CatboostError(\"Invalid weight type={}: must be array like.\".format(type(weight)))\n\n def _check_weight_shape(self, weight, samples_count):\n \"\"\"\n Check weight length.\n \"\"\"\n if len(weight) != samples_count:\n raise CatboostError(\"Length of weight={} and length of data={} are different.\".format(len(weight), samples_count))\n if not isinstance(weight[0], (INTEGER_TYPES, FLOAT_TYPES)):\n raise CatboostError(\"Invalid weight value type={}: must be 1 dimensional data with int, float or long types.\".format(type(weight[0])))\n\n def _check_group_id_type(self, group_id):\n \"\"\"\n Check type of group_id parameter.\n \"\"\"\n if not isinstance(group_id, ARRAY_TYPES):\n raise CatboostError(\"Invalid group_id type={}: must be array like.\".format(type(group_id)))\n\n def _check_group_id_shape(self, group_id, samples_count):\n \"\"\"\n Check group_id length.\n \"\"\"\n if len(group_id) != samples_count:\n raise CatboostError(\"Length of group_id={} and length of data={} are different.\".format(len(group_id), samples_count))\n\n def _check_group_weight_type(self, group_weight):\n \"\"\"\n Check type of group_weight parameter.\n \"\"\"\n if not isinstance(group_weight, ARRAY_TYPES):\n raise CatboostError(\"Invalid group_weight type={}: must be array like.\".format(type(group_weight)))\n\n def _check_group_weight_shape(self, group_weight, samples_count):\n \"\"\"\n Check group_weight length.\n \"\"\"\n if len(group_weight) != samples_count:\n raise CatboostError(\"Length of group_weight={} and length of data={} are different.\".format(len(group_weight), samples_count))\n if not isinstance(group_weight[0], (FLOAT_TYPES)):\n raise CatboostError(\"Invalid group_weight value type={}: must be 1 dimensional data with float types.\".format(type(group_weight[0])))\n\n def _check_subgroup_id_type(self, subgroup_id):\n \"\"\"\n Check type of subgroup_id parameter.\n \"\"\"\n if not isinstance(subgroup_id, ARRAY_TYPES):\n raise CatboostError(\"Invalid subgroup_id type={}: must be array like.\".format(type(subgroup_id)))\n\n def _check_subgroup_id_shape(self, subgroup_id, samples_count):\n \"\"\"\n Check subgroup_id length.\n \"\"\"\n if len(subgroup_id) != samples_count:\n raise CatboostError(\"Length of subgroup_id={} and length of data={} are different.\".format(len(subgroup_id), samples_count))\n\n def _check_feature_names(self, feature_names, num_col=None):\n if num_col is None:\n num_col = self.num_col()\n if not isinstance(feature_names, Sequence):\n raise CatboostError(\"Invalid feature_names type={} : must be list\".format(type(feature_names)))\n if len(feature_names) != num_col:\n raise CatboostError(\"Invalid length feature_names={} : must be equal to number of columns in data={}\".format(len(feature_names), num_col))\n\n def _check_thread_count(self, thread_count):\n if not isinstance(thread_count, INTEGER_TYPES):\n raise CatboostError(\"Invalid thread_count type={} : must be int\".format(type(thread_count)))\n\n def slice(self, rindex):\n if not isinstance(rindex, ARRAY_TYPES):\n raise CatboostError(\"Invalid rindex type={} : must be list or numpy.array\".format(type(rindex)))\n slicedPool = Pool(None)\n slicedPool._take_slice(self, rindex)\n return slicedPool\n\n def set_pairs(self, pairs):\n self._check_pairs_type(pairs)\n if isinstance(pairs, DataFrame):\n pairs = pairs.values\n self._check_pairs_value(pairs)\n self._set_pairs(pairs)\n return self\n\n def set_feature_names(self, feature_names):\n self._check_feature_names(feature_names)\n self._set_feature_names(feature_names)\n return self\n\n def set_baseline(self, baseline):\n self._check_baseline_type(baseline)\n baseline = self._if_pandas_to_numpy(baseline)\n baseline = np.reshape(baseline, (self.num_row(), -1))\n self._check_baseline_shape(baseline, self.num_row())\n self._set_baseline(baseline)\n return self\n\n def set_weight(self, weight):\n self._check_weight_type(weight)\n weight = self._if_pandas_to_numpy(weight)\n self._check_weight_shape(weight, self.num_row())\n self._set_weight(weight)\n return self\n\n def set_group_id(self, group_id):\n self._check_group_id_type(group_id)\n group_id = self._if_pandas_to_numpy(group_id)\n self._check_group_id_shape(group_id, self.num_row())\n self._set_group_id(group_id)\n return self\n\n def set_group_weight(self, group_weight):\n self._check_group_weight_type(group_weight)\n group_weight = self._if_pandas_to_numpy(group_weight)\n self._check_group_weight_shape(group_weight, self.num_row())\n self._set_group_weight(group_weight)\n return self\n\n def set_subgroup_id(self, subgroup_id):\n self._check_subgroup_id_type(subgroup_id)\n subgroup_id = self._if_pandas_to_numpy(subgroup_id)\n self._check_subgroup_id_shape(subgroup_id, self.num_row())\n self._set_subgroup_id(subgroup_id)\n return self\n\n def set_pairs_weight(self, pairs_weight):\n self._check_weight_type(pairs_weight)\n pairs_weight = self._if_pandas_to_numpy(pairs_weight)\n self._check_weight_shape(pairs_weight, self.num_pairs())\n self._set_pairs_weight(pairs_weight)\n return self\n\n def _if_pandas_to_numpy(self, array):\n if isinstance(array, Series):\n array = array.values\n if isinstance(array, DataFrame):\n array = np.transpose(array.values)[0]\n return array\n\n def _read(self, pool_file, column_description, pairs, delimiter, has_header, thread_count):\n \"\"\"\n Read Pool from file.\n \"\"\"\n with log_fixup():\n self._check_files(pool_file, column_description, pairs)\n self._check_delimiter(delimiter)\n if column_description is None:\n column_description = ''\n else:\n self._check_column_description_type(column_description)\n if pairs is None:\n pairs = ''\n self._check_thread_count(thread_count)\n self._read_pool(pool_file, column_description, pairs, delimiter[0], has_header, thread_count)\n\n def _init(self, data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names):\n \"\"\"\n Initialize Pool from array like data.\n \"\"\"\n if isinstance(data, DataFrame):\n feature_names = list(data.columns)\n if isinstance(data, Series):\n data = data.values.tolist()\n if isinstance(data, FeaturesData):\n samples_count = data.get_object_count()\n features_count = data.get_feature_count()\n else:\n if len(np.shape(data)) == 1:\n data = np.expand_dims(data, 1)\n samples_count, features_count = np.shape(data)\n pairs_len = 0\n if label is not None:\n self._check_label_type(label)\n self._check_label_empty(label)\n label = self._if_pandas_to_numpy(label)\n self._check_label_shape(label, samples_count)\n if cat_features is not None:\n self._check_cf_type(cat_features)\n self._check_cf_value(cat_features, features_count)\n if pairs is not None:\n self._check_pairs_type(pairs)\n if isinstance(pairs, DataFrame):\n pairs = pairs.values\n self._check_pairs_value(pairs)\n pairs_len = np.shape(pairs)[0]\n if weight is not None:\n self._check_weight_type(weight)\n weight = self._if_pandas_to_numpy(weight)\n self._check_weight_shape(weight, samples_count)\n if group_id is not None:\n self._check_group_id_type(group_id)\n group_id = self._if_pandas_to_numpy(group_id)\n self._check_group_id_shape(group_id, samples_count)\n if group_weight is not None:\n self._check_group_weight_type(group_weight)\n group_weight = self._if_pandas_to_numpy(group_weight)\n self._check_group_weight_shape(group_weight, samples_count)\n if subgroup_id is not None:\n self._check_subgroup_id_type(subgroup_id)\n subgroup_id = self._if_pandas_to_numpy(subgroup_id)\n self._check_subgroup_id_shape(subgroup_id, samples_count)\n if pairs_weight is not None:\n self._check_weight_type(pairs_weight)\n pairs_weight = self._if_pandas_to_numpy(pairs_weight)\n self._check_weight_shape(pairs_weight, pairs_len)\n if baseline is not None:\n self._check_baseline_type(baseline)\n baseline = self._if_pandas_to_numpy(baseline)\n baseline = np.reshape(baseline, (samples_count, -1))\n self._check_baseline_shape(baseline, samples_count)\n if feature_names is not None:\n self._check_feature_names(feature_names, features_count)\n self._init_pool(data, label, cat_features, pairs, weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, feature_names)\n\n\ndef _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description):\n train_pool = None\n if isinstance(X, Pool):\n train_pool = X\n if any(v is not None for v in [cat_features, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline]):\n raise CatboostError(\"cat_features, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline should have the None type when X has catboost.Pool type.\")\n if X.get_label() is None and X.num_pairs() == 0:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n elif isinstance(X, STRING_TYPES):\n train_pool = Pool(data=X, pairs=pairs, column_description=column_description)\n else:\n if y is None:\n raise CatboostError(\"y has not initialized in fit(): X is not catboost.Pool object, y must be not None in fit().\")\n train_pool = Pool(X, y, cat_features=cat_features, pairs=pairs, weight=sample_weight, group_id=group_id,\n group_weight=group_weight, subgroup_id=subgroup_id, pairs_weight=pairs_weight, baseline=baseline)\n return train_pool\n\n\ndef _clear_training_files(train_dir):\n for filename in ['catboost_training.json']:\n path = os.path.join(train_dir, filename)\n if os.path.exists(path):\n os.remove(path)\n\n\ndef _get_train_dir(params):\n return params.get('train_dir', 'catboost_info')\n\n\ndef _get_catboost_widget(train_dir):\n _clear_training_files(train_dir)\n try:\n from .widget import MetricVisualizer\n return MetricVisualizer(train_dir)\n except ImportError as e:\n warnings.warn(\"To draw plots in fit() method you should install ipywidgets and ipython\")\n raise ImportError(str(e))\n\n\n@contextmanager\ndef plot_wrapper(plot, params):\n if plot:\n widget = _get_catboost_widget(_get_train_dir(params))\n widget._run_update()\n try:\n yield\n finally:\n if plot:\n widget._stop_update()\n\n\n# the first element of the synonyms list is the canonical name\ndef _process_synonyms_group(synonyms, params):\n assert len(synonyms) > 1, 'there should be more than one synonym'\n\n value = None\n for synonym in synonyms:\n if synonym in params:\n if value is not None:\n raise CatboostError('only one of the parameters ' + (', '.join(synonyms)) + ' should be initialized.')\n value = params[synonym]\n del params[synonym]\n\n if value is not None:\n params[synonyms[0]] = value\n\n\ndef _process_synonyms(params):\n if 'objective' in params:\n params['loss_function'] = params['objective']\n del params['objective']\n\n if 'scale_pos_weight' in params:\n if 'loss_function' in params and params['loss_function'] != 'Logloss':\n raise CatboostError('scale_pos_weight is only supported for binary classification Logloss loss')\n if 'class_weights' in params:\n raise CatboostError('only one of parameters scale_pos_weight, class_weights should be initialized.')\n params['class_weights'] = [1.0, params['scale_pos_weight']]\n del params['scale_pos_weight']\n\n _process_synonyms_group(['learning_rate', 'eta'], params)\n _process_synonyms_group(['border_count', 'max_bin'], params)\n _process_synonyms_group(['depth', 'max_depth'], params)\n _process_synonyms_group(['rsm', 'colsample_bylevel'], params)\n _process_synonyms_group(['random_seed', 'random_state'], params)\n _process_synonyms_group(['l2_leaf_reg', 'reg_lambda'], params)\n _process_synonyms_group(['iterations', 'n_estimators', 'num_boost_round', 'num_trees'], params)\n _process_synonyms_group(['od_wait', 'early_stopping_rounds'], params)\n _process_synonyms_group(['custom_metric', 'custom_loss'], params)\n\n metric_period = None\n if 'metric_period' in params:\n metric_period = params['metric_period']\n del params['metric_period']\n\n verbose = None\n if 'verbose' in params:\n verbose = params['verbose']\n del params['verbose']\n\n logging_level = None\n if 'logging_level' in params:\n logging_level = params['logging_level']\n del params['logging_level']\n\n verbose_eval = None\n if 'verbose_eval' in params:\n verbose_eval = params['verbose_eval']\n del params['verbose_eval']\n\n silent = None\n if 'silent' in params:\n silent = params['silent']\n del params['silent']\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval, silent)\n\n if metric_period is not None:\n params['metric_period'] = metric_period\n if verbose is not None:\n params['verbose'] = verbose\n if logging_level is not None:\n params['logging_level'] = logging_level\n\n if 'used_ram_limit' in params:\n params['used_ram_limit'] = str(params['used_ram_limit'])\n\n\nclass _CatBoostBase(object):\n def __init__(self, params):\n self._init_params = params.copy() if params is not None else {}\n self._object = _CatBoost()\n\n def __getstate__(self):\n params = self._init_params.copy()\n test_evals = self._object._get_test_evals()\n if test_evals:\n params['_test_evals'] = test_evals\n if self.is_fitted():\n params['__model'] = self._serialize_model()\n for attr in ['_classes', '_feature_importance']:\n if getattr(self, attr, None) is not None:\n params[attr] = getattr(self, attr, None)\n return params\n\n def __setstate__(self, state):\n if '_object' not in dict(self.__dict__.items()):\n self._object = _CatBoost()\n if '_init_params' not in dict(self.__dict__.items()):\n self._init_params = {}\n if '__model' in state:\n self._deserialize_model(state['__model'])\n self._set_trained_model_attributes()\n del state['__model']\n if '_test_eval' in state:\n self._set_test_evals([state['_test_eval']])\n del state['_test_eval']\n if '_test_evals' in state:\n self._set_test_evals(state['_test_evals'])\n del state['_test_evals']\n for attr in ['_classes', '_feature_importance']:\n if attr in state:\n setattr(self, attr, state[attr])\n del state[attr]\n self._init_params.update(state)\n\n def __copy__(self):\n return self.__deepcopy__(None)\n\n def __deepcopy__(self, _):\n state = self.__getstate__()\n model = self.__class__()\n model.__setstate__(state)\n return model\n\n def copy(self):\n return self.__copy__()\n\n def is_fitted(self):\n return getattr(self, '_random_seed', None) is not None\n\n def _set_trained_model_attributes(self):\n setattr(self, '_random_seed', self._object._get_random_seed())\n setattr(self, '_learning_rate', self._object._get_learning_rate())\n setattr(self, '_tree_count', self._object._get_tree_count())\n\n def _train(self, train_pool, test_pool, params, allow_clear_pool):\n self._object._train(train_pool, test_pool, params, allow_clear_pool)\n self._set_trained_model_attributes()\n\n def _set_test_evals(self, test_evals):\n self._object._set_test_evals(test_evals)\n\n def get_test_eval(self):\n test_evals = self._object._get_test_evals()\n if len(test_evals) == 0:\n if self.is_fitted():\n raise CatboostError('The model was trained without eval set.')\n else:\n raise CatboostError('You should train the model first.')\n if len(test_evals) > 1:\n raise CatboostError(\"With multiple eval sets use 'get_test_evals()'\")\n test_eval = test_evals[0]\n return test_eval[0] if len(test_eval) == 1 else test_eval\n\n def get_test_evals(self):\n test_evals = self._object._get_test_evals()\n if len(test_evals) == 0:\n if self.is_fitted():\n raise CatboostError('The model was trained without eval set.')\n else:\n raise CatboostError('You should train the model first.')\n return test_evals\n\n def get_evals_result(self):\n return self._object._get_metrics_evals()\n\n def get_best_score(self):\n return self._object._get_best_score()\n\n def get_best_iteration(self):\n return self._object._get_best_iteration()\n\n def _get_float_feature_indices(self):\n return self._object._get_float_feature_indices()\n\n def _get_cat_feature_indices(self):\n return self._object._get_cat_feature_indices()\n\n def _base_predict(self, pool, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n return self._object._base_predict(pool, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _base_predict_multi(self, pool, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n return self._object._base_predict_multi(pool, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _staged_predict_iterator(self, pool, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose):\n return self._object._staged_predict_iterator(pool, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def _base_eval_metrics(self, pool, metrics_description, ntree_start, ntree_end, eval_period, thread_count, result_dir, tmp_dir):\n metrics_description_list = metrics_description if isinstance(metrics_description, list) else [metrics_description]\n return self._object._base_eval_metrics(pool, metrics_description_list, ntree_start, ntree_end, eval_period, thread_count, result_dir, tmp_dir)\n\n def _calc_fstr(self, fstr_type, pool, thread_count, verbose):\n \"\"\"returns (fstr_values, feature_ids).\"\"\"\n return self._object._calc_fstr(fstr_type.name, pool, thread_count, verbose)\n\n def _calc_ostr(self, train_pool, test_pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose):\n return self._object._calc_ostr(train_pool, test_pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose)\n\n def _base_shrink(self, ntree_start, ntree_end):\n self._object._base_shrink(ntree_start, ntree_end)\n self._set_trained_model_attributes()\n\n def _base_drop_unused_features(self):\n self._object._base_drop_unused_features()\n\n def _save_model(self, output_file, format, export_parameters, pool):\n import json\n if self.is_fitted():\n params_string = \"\"\n if export_parameters:\n params_string = json.dumps(export_parameters, cls=_NumpyAwareEncoder)\n\n self._object._save_model(output_file, format, params_string, pool)\n\n def _load_model(self, model_file, format):\n self._object._load_model(model_file, format)\n self._set_trained_model_attributes()\n for key, value in iteritems(self._get_params()):\n self._init_params[key] = value\n\n def _serialize_model(self):\n return self._object._serialize_model()\n\n def _deserialize_model(self, dump_model_str):\n self._object._deserialize_model(dump_model_str)\n\n def _sum_models(self, models_base, weights=None, ctr_merge_policy='IntersectingCountersAverage'):\n if weights is None:\n weights = [1.0 for _ in models_base]\n models_inner = [model._object for model in models_base]\n self._object._sum_models(models_inner, weights, ctr_merge_policy)\n setattr(self, '_random_seed', 0)\n setattr(self, '_learning_rate', 0)\n setattr(self, '_tree_count', self._object._get_tree_count())\n\n def _get_params(self):\n params = self._object._get_params()\n init_params = self._init_params.copy()\n for key, value in iteritems(init_params):\n if key not in params:\n params[key] = value\n return params\n\n def _is_classification_objective(self, loss_function):\n return isinstance(loss_function, str) and is_classification_objective(loss_function)\n\n def _is_regression_objective(self, loss_function):\n return isinstance(loss_function, str) and is_regression_objective(loss_function)\n\n def get_metadata(self):\n return self._object._get_metadata_wrapper()\n\n @property\n def metadata_(self):\n raise CatboostError(\"metadata_ property is not supported anymore, use get_metadata() method instead.\")\n\n @property\n def is_fitted_(self):\n raise CatboostError(\"is_fitted_ property is not supported anymore, use is_fitted() method instead.\")\n\n @property\n def tree_count_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_tree_count')\n\n @property\n def random_seed_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_random_seed')\n\n @property\n def learning_rate_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return getattr(self, '_learning_rate')\n\n @property\n def feature_names_(self):\n if not self.is_fitted():\n raise CatboostError('Model is not fitted.')\n return self._object._get_feature_names()\n\n @property\n def evals_result_(self):\n return self.get_evals_result()\n\n @property\n def best_score_(self):\n return self.get_best_score()\n\n @property\n def best_iteration_(self):\n return self.get_best_iteration()\n\n\ndef _check_param_types(params):\n if not isinstance(params, (Mapping, MutableMapping)):\n raise CatboostError(\"Invalid params type={}: must be dict().\".format(type(params)))\n if 'ctr_description' in params:\n if not isinstance(params['ctr_description'], Sequence):\n raise CatboostError(\"Invalid ctr_description type={} : must be list of strings\".format(type(params['ctr_description'])))\n if 'custom_loss' in params:\n if isinstance(params['custom_loss'], STRING_TYPES):\n params['custom_loss'] = [params['custom_loss']]\n if not isinstance(params['custom_loss'], Sequence):\n raise CatboostError(\"Invalid `custom_loss` type={} : must be string or list of strings.\".format(type(params['custom_loss'])))\n if 'custom_metric' in params:\n if isinstance(params['custom_metric'], STRING_TYPES):\n params['custom_metric'] = [params['custom_metric']]\n if not isinstance(params['custom_metric'], Sequence):\n raise CatboostError(\"Invalid `custom_metric` type={} : must be string or list of strings.\".format(type(params['custom_metric'])))\n\n\ndef _params_type_cast(params):\n casted_params = {}\n for key, value in iteritems(params):\n value = _cast_to_base_types(value)\n casted_params[key] = value\n return casted_params\n\n\nclass CatBoost(_CatBoostBase):\n \"\"\"\n CatBoost model, that contains training, prediction and evaluation.\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"\n Initialize the CatBoost.\n\n Parameters\n ----------\n params : dict\n Parameters for CatBoost.\n If None, all params are set to their defaults.\n If dict, overriding parameters present in dict.\n \"\"\"\n super(CatBoost, self).__init__(params)\n\n def _fit(self, X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id,\n pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,\n column_description, verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval):\n\n params = deepcopy(self._init_params)\n if params is None:\n params = {}\n\n _process_synonyms(params)\n\n if 'cat_features' in params:\n if isinstance(X, Pool):\n if set(X.get_cat_feature_indices()) != set(params['cat_features']):\n raise CatboostError(\"categorical features in the model are set to \" + str(params['cat_features']) +\n \" and train dataset categorical features are set to \" +\n str(X.get_cat_feature_indices()))\n elif isinstance(X, FeaturesData):\n raise CatboostError(\"Categorical features are set in the model. It is not allowed to use FeaturesData type for training dataset.\")\n else:\n if cat_features is not None and set(cat_features) != set(params['cat_features']):\n raise CatboostError(\"categorical features in the model are set to \" + str(params['cat_features']) +\n \". categorical features passed to fit function are set to \" + str(cat_features))\n cat_features = params['cat_features']\n del params['cat_features']\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval, silent)\n\n if metric_period is not None:\n params['metric_period'] = metric_period\n if logging_level is not None:\n params['logging_level'] = logging_level\n if verbose is not None:\n params['verbose'] = verbose\n if use_best_model is not None:\n params['use_best_model'] = use_best_model\n\n if early_stopping_rounds is not None:\n params['od_type'] = 'Iter'\n params['od_wait'] = early_stopping_rounds\n if 'od_pval' in params:\n del params['od_pval']\n\n if save_snapshot is not None:\n params['save_snapshot'] = save_snapshot\n\n if snapshot_file is not None:\n params['snapshot_file'] = snapshot_file\n\n if snapshot_interval is not None:\n params['snapshot_interval'] = snapshot_interval\n\n _check_param_types(params)\n params = _params_type_cast(params)\n _check_train_params(params)\n\n train_pool = _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)\n if train_pool.is_empty_:\n raise CatboostError(\"X is empty.\")\n\n allow_clear_pool = not isinstance(X, Pool)\n\n eval_set_list = eval_set if isinstance(eval_set, list) else [eval_set]\n eval_sets = []\n eval_total_row_count = 0\n for eval_set in eval_set_list:\n if isinstance(eval_set, Pool):\n eval_sets.append(eval_set)\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in Pool\")\n elif isinstance(eval_set, STRING_TYPES):\n eval_sets.append(Pool(eval_set, column_description=column_description))\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in file {}\".format(eval_set))\n elif isinstance(eval_set, tuple):\n if len(eval_set) != 2:\n raise CatboostError(\"Invalid shape of 'eval_set': {}, must be (X, y).\".format(str(tuple(type(_) for _ in eval_set))))\n eval_sets.append(Pool(eval_set[0], eval_set[1], cat_features=train_pool.get_cat_feature_indices()))\n eval_total_row_count += eval_sets[-1].num_row()\n if eval_sets[-1].num_row() == 0:\n raise CatboostError(\"Empty 'eval_set' in tuple\")\n elif eval_set is None:\n if len(eval_set_list) > 1:\n raise CatboostError(\"Multiple eval set shall not contain None\")\n else:\n raise CatboostError(\"Invalid type of 'eval_set': {}, while expected Pool or (X, y) or filename, or list thereof.\".format(type(eval_set)))\n\n if self.get_param('use_best_model') and eval_total_row_count == 0:\n raise CatboostError(\"To employ param {'use_best_model': True} provide non-empty 'eval_set'.\")\n\n with log_fixup(), plot_wrapper(plot, self.get_params()):\n self._train(train_pool, eval_sets, params, allow_clear_pool)\n\n if (not self._object._has_leaf_weights_in_model()) and allow_clear_pool:\n train_pool = _build_train_pool(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id, pairs_weight, baseline, column_description)\n setattr(\n self,\n \"_feature_importance\",\n self.get_feature_importance(train_pool, EFstrType.FeatureImportance)\n )\n\n if 'loss_function' in params and self._is_classification_objective(params['loss_function']):\n setattr(self, \"_classes\", np.unique(train_pool.get_label()))\n return self\n\n def fit(self, X, y=None, cat_features=None, pairs=None, sample_weight=None, group_id=None,\n group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoost model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or catboost.FeaturesData\n or string.\n If not catboost.Pool or catboost.FeaturesData it must be 2 dimensional Feature matrix\n or string - file with dataset.\n\n Must be non-empty (contain > 0 objects)\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool and not catboost.FeaturesData\n\n pairs : list or numpy.array or pandas.DataFrame\n The pairs description.\n If list or numpy.arrays or pandas.DataFrame, giving 2 dimensional.\n The shape should be Nx2, where N is the pairs' count. The first element of pair is\n the index of winner object in training set. The second element of pair is\n the index of loser object in training set.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n group_id : list or numpy.array, optional (default=None)\n group id for each instance.\n If not None, giving 1 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n group_weight : list or numpy.array, optional (default=None)\n Group weight for each instance.\n If not None, giving 1 dimensional array like data.\n\n subgroup_id : list or numpy.array, optional (default=None)\n subgroup id for each instance.\n If not None, giving 1 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n pairs_weight : list or numpy.array, optional (default=None)\n Weight for each pair.\n If not None, giving 1 dimensional array like pairs.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool, or list of catboost.Pool, or list of (X, y) tuples, optional (default=None)\n Used as a validation set for early-stopping.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait parameter set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n return self._fit(X, y, cat_features, pairs, sample_weight, group_id, group_weight, subgroup_id,\n pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,\n column_description, verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval)\n\n def _predict(self, data, prediction_type, ntree_start, ntree_end, thread_count, verbose):\n verbose = verbose or self.get_param('verbose')\n if verbose is None:\n verbose = False\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n if data is None:\n raise CatboostError(\"Data to predict must be initialized\")\n is_data_single_object = False\n if not isinstance(data, Pool):\n if isinstance(data, ARRAY_TYPES):\n if not isinstance(data[0], ARRAY_TYPES):\n data = [data]\n is_data_single_object = True\n data = Pool(\n data=data,\n cat_features=self._get_cat_feature_indices() if not isinstance(data, FeaturesData) else None\n )\n if not isinstance(prediction_type, STRING_TYPES):\n raise CatboostError(\"Invalid prediction_type type={}: must be str().\".format(type(prediction_type)))\n if prediction_type not in ('Class', 'RawFormulaVal', 'Probability'):\n raise CatboostError(\"Invalid value of prediction_type={}: must be Class, RawFormulaVal or Probability.\".format(prediction_type))\n loss_function_type = self.get_param('loss_function')\n if loss_function_type is None:\n loss_function_type = self.get_param('objective')\n # TODO(kirillovs): very bad solution. user should be able to use custom multiclass losses\n if loss_function_type is not None and (loss_function_type == 'MultiClass' or loss_function_type == 'MultiClassOneVsAll'):\n return np.transpose(self._base_predict_multi(data, prediction_type, ntree_start, ntree_end, thread_count, verbose))\n predictions = np.array(self._base_predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose))\n if prediction_type == 'Probability':\n predictions = np.transpose([1 - predictions, predictions])\n if is_data_single_object:\n return predictions[0]\n return predictions\n\n def predict(self, data, prediction_type='RawFormulaVal', ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n or catboost.FeaturesData or single object\n Data to predict.\n\n prediction_type : string, optional (default='RawFormulaVal')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool, optional (default=False)\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single answer object prediction for single object\n \"\"\"\n return self._predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def _staged_predict(self, data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose):\n verbose = verbose or self.get_param('verbose')\n if verbose is None:\n verbose = False\n if not self.is_fitted() or self.tree_count_ is None:\n raise CatboostError(\"There is no trained model to use staged_predict(). Use fit() to train model. Then use staged_predict().\")\n if data is None:\n raise CatboostError(\"Data to predict must be initialized\")\n is_data_single_object = False\n if not isinstance(data, Pool):\n if isinstance(data, ARRAY_TYPES):\n if not isinstance(data[0], ARRAY_TYPES):\n data = [data]\n is_data_single_object = True\n data = Pool(\n data=data,\n cat_features=self._get_cat_feature_indices() if not isinstance(data, FeaturesData) else None\n )\n if not isinstance(prediction_type, STRING_TYPES):\n raise CatboostError(\"Invalid prediction_type type={}: must be str().\".format(type(prediction_type)))\n if prediction_type not in ('Class', 'RawFormulaVal', 'Probability'):\n raise CatboostError(\"Invalid value of prediction_type={}: must be Class, RawFormulaVal or Probability.\".format(prediction_type))\n if ntree_end == 0:\n ntree_end = self.tree_count_\n staged_predict_iterator = self._staged_predict_iterator(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n loss_function = self.get_param('loss_function')\n if loss_function is None:\n loss_function = self.get_param('objective')\n while True:\n predictions = staged_predict_iterator.next()\n if loss_function is not None and (loss_function == 'MultiClass' or loss_function == 'MultiClassOneVsAll'):\n predictions = np.transpose(predictions)\n else:\n predictions = np.array(predictions[0])\n if prediction_type == 'Probability':\n predictions = np.transpose([1 - predictions, predictions])\n if is_data_single_object:\n predictions = predictions[0]\n yield predictions\n\n def staged_predict(self, data, prediction_type='RawFormulaVal', ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='RawFormulaVal')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def get_cat_feature_indices(self):\n if not self.is_fitted():\n raise CatboostError(\"Model is not fitted\")\n return self._get_cat_feature_indices()\n\n def _eval_metrics(self, data, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, plot):\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n if not isinstance(data, Pool):\n raise CatboostError(\"Invalid data type={}, must be catboost.Pool.\".format(type(data)))\n if data.is_empty_:\n raise CatboostError(\"Data is empty.\")\n if not isinstance(metrics, ARRAY_TYPES) and not isinstance(metrics, STRING_TYPES):\n raise CatboostError(\"Invalid metrics type={}, must be list() or str().\".format(type(metrics)))\n if not all(map(lambda metric: isinstance(metric, string_types), metrics)):\n raise CatboostError(\"Invalid metric type: must be string().\")\n if tmp_dir is None:\n tmp_dir = tempfile.mkdtemp()\n\n with log_fixup(), plot_wrapper(plot, self.get_params()):\n metrics_score, metric_names = self._base_eval_metrics(data, metrics, ntree_start, ntree_end, eval_period, thread_count, _get_train_dir(self.get_params()), tmp_dir)\n\n return dict(zip(metric_names, metrics_score))\n\n def eval_metrics(self, data, metrics, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, tmp_dir=None, plot=False):\n \"\"\"\n Calculate metrics.\n\n Parameters\n ----------\n data : catboost.Pool\n Data to eval metrics.\n\n metrics : list of strings\n List of eval metrics.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n tmp_dir : string (default=None)\n The name of the temporary directory for intermediate results.\n If None, then the name will be generated.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n Returns\n -------\n prediction : dict: metric -> array of shape [(ntree_end - ntree_start) / eval_period]\n \"\"\"\n return self._eval_metrics(data, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, plot)\n\n def create_metric_calcer(self, metrics, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, tmp_dir=None):\n \"\"\"\n Create batch metric calcer. Could be used to aggregate metric on several pools\n Parameters\n ----------\n Same as in eval_metrics except data\n Returns\n -------\n BatchMetricCalcer object\n\n Usage example\n -------\n # Large dataset is partitioned into parts [part1, part2]\n model.fit(params)\n batch_calcer = model.create_metric_calcer(['Logloss'])\n batch_calcer.add_pool(part1)\n batch_calcer.add_pool(part2)\n metrics = batch_calcer.eval_metrics()\n \"\"\"\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use predict(). Use fit() to train model. Then use predict().\")\n return BatchMetricCalcer(self._object, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir)\n\n @property\n def feature_importances_(self):\n feature_importances_ = getattr(self, \"_feature_importance\", None)\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use `feature_importances_`. Use fit() to train model. Then use `feature_importances_`.\")\n return np.array(feature_importances_)\n\n def get_feature_importance(self, data=None, fstr_type=EFstrType.FeatureImportance, prettified=False, thread_count=-1, verbose=False):\n \"\"\"\n Parameters\n ----------\n data : catboost.Pool or None\n Data to get feature importance.\n If type == Shap data is a dataset. For every object in this dataset feature importances will be calculated.\n If type == 'FeatureImportance', data is None or train dataset (in case if model was explicitly trained with flag store no leaf weights).\n\n fstr_type : EFStrType or string (deprecated, converted to EFstrType), optional\n (default=EFstrType.FeatureImportance)\n Possible values:\n - FeatureImportance\n Calculate score for every feature.\n - ShapValues\n Calculate SHAP Values for every object.\n - Interaction\n Calculate pairwise score between every feature.\n\n prettified : bool, optional (default=False)\n used only for FeatureImportance fstr_type\n change returned data format to the list of (feature_id, importance) pairs sorted by importance\n\n thread_count : int, optional (default=-1)\n Number of threads.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool or int\n If False, then evaluation is not logged. If True, then each possible iteration is logged.\n If a positive integer, then it stands for the size of batch N. After processing each batch, print progress\n and remaining time.\n\n\n Returns\n -------\n depends on fstr_type:\n - FeatureImportance with prettified=False (default)\n list of length [n_features] with feature_importance values (float) for feature\n - FeatureImportance with prettified=True\n list of length [n_features] with (feature_id (string), feature_importance (float)) pairs, sorted by feature_importance in descending order\n - ShapValues\n np.array of shape (n_objects, n_features + 1) with Shap values (float) for (object, feature).\n In case of multiclass the returned value is np.array of shape\n (n_objects, classes_count, n_features + 1). For each object it contains Shap values (float).\n Values are calculated for RawFormulaVal predictions.\n - Interaction\n list of length [n_features] of 3-element lists of (first_feature_index, second_feature_index, interaction_score (float))\n \"\"\"\n\n if not isinstance(verbose, bool) and not isinstance(verbose, int):\n raise CatboostError('verbose should be bool or int.')\n verbose = int(verbose)\n if verbose < 0:\n raise CatboostError('verbose should be non-negative.')\n\n fstr_type = enum_from_enum_or_str(EFstrType, fstr_type)\n empty_data_is_ok = (((fstr_type == EFstrType.FeatureImportance) and self._object._has_leaf_weights_in_model())\n or (fstr_type == EFstrType.Interaction))\n if not empty_data_is_ok:\n if not isinstance(data, Pool):\n raise CatboostError(\"Invalid metric type={}, must be catboost.Pool.\".format(type(data)))\n if data.is_empty_:\n raise CatboostError(\"data is empty.\")\n\n with log_fixup():\n fstr, feature_names = self._calc_fstr(fstr_type, data, thread_count, verbose)\n if fstr_type == EFstrType.FeatureImportance:\n feature_importances = [value[0] for value in fstr]\n if prettified:\n return sorted(zip(feature_names, feature_importances), key=itemgetter(1), reverse=True)\n else:\n return feature_importances\n if fstr_type == EFstrType.ShapValues:\n if isinstance(fstr[0][0], ARRAY_TYPES):\n return np.array([np.array([np.array([\n value for value in dimension]) for dimension in doc]) for doc in fstr])\n else:\n return np.array([np.array([value for value in doc]) for doc in fstr])\n elif fstr_type == EFstrType.Interaction:\n return [[int(row[0]), int(row[1]), row[2]] for row in fstr]\n\n def get_object_importance(self, pool, train_pool, top_size=-1, ostr_type='Average', update_method='SinglePoint', importance_values_sign='All', thread_count=-1, verbose=False):\n \"\"\"\n This is the implementation of the LeafInfluence algorithm from the following paper:\n https://arxiv.org/pdf/1802.06640.pdf\n\n Parameters\n ----------\n pool : Pool\n The pool for which you want to evaluate the object importances.\n\n train_pool : Pool\n The pool on which the model was trained.\n\n top_size : int (default=-1)\n Method returns the result of the top_size most important train objects.\n If -1, then the top size is not limited.\n\n ostr_type : string, optional (default='Average')\n Possible values:\n - Average (Method returns the mean train objects scores for all input objects)\n - PerObject (Method returns the train objects scores for every input object)\n\n importance_values_sign : string, optional (default='All')\n Method returns only Positive, Negative or All values.\n Possible values:\n - Positive\n - Negative\n - All\n\n update_method : string, optional (default='SinglePoint')\n Possible values:\n - SinglePoint\n - TopKLeaves (It is posible to set top size : TopKLeaves:top=2)\n - AllPoints\n Description of the update set methods are given in section 3.1.3 of the paper.\n\n thread_count : int, optional (default=-1)\n Number of threads.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool or int\n If False, then evaluation is not logged. If True, then each possible iteration is logged.\n If a positive integer, then it stands for the size of batch N. After processing each batch, print progress\n and remaining time.\n\n Returns\n -------\n object_importances : tuple of two arrays (indices and scores) of shape = [top_size]\n \"\"\"\n\n if not isinstance(verbose, bool) and not isinstance(verbose, int):\n raise CatboostError('verbose should be bool or int.')\n verbose = int(verbose)\n if verbose < 0:\n raise CatboostError('verbose should be non-negative.')\n\n with log_fixup():\n result = self._calc_ostr(train_pool, pool, top_size, ostr_type, update_method, importance_values_sign, thread_count, verbose)\n return result\n\n def shrink(self, ntree_end, ntree_start=0):\n \"\"\"\n Shrink the model.\n\n Parameters\n ----------\n ntree_end: int\n Leave the trees with indices from the interval [ntree_start, ntree_end) (zero-based indexing).\n ntree_start: int, optional (default=0)\n Leave the trees with indices from the interval [ntree_start, ntree_end) (zero-based indexing).\n \"\"\"\n if ntree_start > ntree_end:\n raise CatboostError(\"ntree_start should be less than ntree_end.\")\n self._base_shrink(ntree_start, ntree_end)\n\n def drop_unused_features(self):\n \"\"\"\n Drop unused features information from model\n \"\"\"\n self._base_drop_unused_features()\n\n def save_model(self, fname, format=\"cbm\", export_parameters=None, pool=None):\n \"\"\"\n Save the model to a file.\n\n Parameters\n ----------\n fname : string\n Output file name.\n format : string\n Either 'cbm' for catboost binary format, or 'coreml' to export into Apple CoreML format, or 'cpp' to export as C++ code, or 'python' to export as Python code.\n export_parameters : dict\n Parameters for CoreML export:\n * prediction_type : string - either 'probability' or 'raw'\n * coreml_description : string\n * coreml_model_version : string\n * coreml_model_author : string\n * coreml_model_license: string\n pool : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or catboost.FeaturesData\n Training pool.\n \"\"\"\n if not self.is_fitted():\n raise CatboostError(\"There is no trained model to use save_model(). Use fit() to train model. Then use save_model().\")\n if not isinstance(fname, STRING_TYPES):\n raise CatboostError(\"Invalid fname type={}: must be str().\".format(type(fname)))\n if pool is not None and not isinstance(pool, Pool):\n pool = Pool(\n data=pool,\n cat_features=self._get_cat_feature_indices() if not isinstance(pool, FeaturesData) else None\n )\n self._save_model(fname, format, export_parameters, pool)\n\n def load_model(self, fname, format='catboost'):\n \"\"\"\n Load model from a file.\n\n Parameters\n ----------\n fname : string\n Input file name.\n \"\"\"\n if not isinstance(fname, STRING_TYPES):\n raise CatboostError(\"Invalid fname type={}: must be str().\".format(type(fname)))\n self._load_model(fname, format)\n return self\n\n def get_param(self, key):\n \"\"\"\n Get param value from CatBoost model.\n\n Parameters\n ----------\n key : string\n The key to get param value from.\n\n Returns\n -------\n value :\n The param value of the key, returns None if param do not exist.\n \"\"\"\n params = self.get_params()\n if params is None:\n return {}\n return params.get(key)\n\n def get_params(self, deep=True):\n \"\"\"\n Get all params from CatBoost model.\n\n Returns\n -------\n result : dict\n Dictionary of {param_key: param_value}.\n \"\"\"\n params = self._init_params.copy()\n if deep:\n return deepcopy(params)\n else:\n return params\n\n def set_params(self, **params):\n \"\"\"\n Set parameters into CatBoost model.\n\n Parameters\n ----------\n **params : key=value format\n List of key=value paris. Example: model.set_params(iterations=500, thread_count=2).\n \"\"\"\n for key, value in iteritems(params):\n self._init_params[key] = value\n return self\n\n\nclass CatBoostClassifier(CatBoost):\n \"\"\"\n Implementation of the scikit-learn API for CatBoost classification.\n\n Parameters\n ----------\n iterations : int, [default=500]\n Max count of trees.\n range: [1,+inf]\n learning_rate : float, [default=0.03]\n Step size shrinkage used in update to prevents overfitting.\n range: (0,1]\n depth : int, [default=6]\n Depth of a tree. All trees are the same depth.\n range: [1,+inf]\n l2_leaf_reg : int, [default=3]\n L2 regularization term on weights.\n range: [0,+inf]\n model_size_reg : float, [default=None]\n Model size regularization coefficient.\n range: [0,+inf]\n rsm : float, [default=None]\n Subsample ratio of columns when constructing each tree.\n range: (0,1]\n loss_function : string or object, [default='Logloss']\n The metric to use in training and also selector of the machine learning\n problem to solve. If string, then the name of a supported metric,\n optionally suffixed with parameter description.\n If object, it shall provide methods 'calc_ders_range' or 'calc_ders_multi'.\n border_count : int, [default=32]\n The number of partitions for Num features. Used in the preliminary calculation.\n range: (0,+inf]\n feature_border_type : string, [default='MinEntropy']\n Type of binarization target. Used only in Reggression tasks.\n Possible values:\n - 'Median'\n - 'UniformAndQuantiles'\n - 'GreedyLogSum'\n - 'MaxLogSum'\n - 'MinEntropy'\n fold_permutation_block_size : int, [default=1]\n To accelerate the learning.\n The recommended value is within [1, 256]. On small samples, must be set to 1.\n range: [1,+inf]\n od_pval : float, [default=None]\n Use overfitting detector to stop training when reaching a specified threshold.\n Can be used only with eval_set.\n range: [0,1]\n od_wait : int, [default=None]\n Number of iterations which overfitting detector will wait after new best error.\n od_type : string, [default=None]\n Type of overfitting detector which will be used in program.\n Posible values:\n - 'IncToDec'\n - 'Iter'\n For 'Iter' type od_pval must not be set.\n If None, then od_type=IncToDec.\n nan_mode : string, [default=None]\n Way to process nan-values.\n Possible values:\n - 'Forbidden' - raises an exception if there is nan value in dataset.\n - 'Min' - each nan float feature will be processed as minimum value from dataset.\n - 'Max' - each nan float feature will be processed as maximum value from dataset.\n If None, then nan_mode=Min.\n counter_calc_method : string, [default=None]\n The method used to calculate counters for dataset with Counter type.\n Possible values:\n - 'PrefixTest' - only objects up to current in the test dataset are considered\n - 'FullTest' - all objects are considered in the test dataset\n - 'SkipTest' - Objects from test dataset are not considered\n - 'Full' - all objects are considered for both learn and test dataset\n If None, then counter_calc_method=PrefixTest.\n leaf_estimation_iterations : int, [default=None]\n The number of steps in the gradient when calculating the values in the leaves.\n If None, then leaf_estimation_iterations=1.\n range: [1,+inf]\n leaf_estimation_method : string, [default='Gradient']\n The method used to calculate the values in the leaves.\n Possible values:\n - 'Newton'\n - 'Gradient'\n thread_count : int, [default=None]\n Number of parallel threads used to run CatBoost.\n If None, then the number of thread is set to the number of cores.\n range: [1,+inf]\n random_seed : int, [default=None]\n Random number seed.\n If None, used random number.\n range: [0,+inf]\n use_best_model : bool, [default=None]\n To limit the number of trees in predict() using information about the optimal value of the error function.\n Can be used only with eval_set.\n best_model_min_trees : int, [default=None]\n The minimal number of trees the best model should have.\n verbose: bool\n When set to True, logging_level is set to 'Verbose'.\n When set to False, logging_level is set to 'Silent'.\n silent: bool, synonym for verbose\n logging_level : string, [default='Verbose']\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n metric_period : int, [default=1]\n The frequency of iterations to print the information to stdout. The value should be a positive integer.\n simple_ctr: list of strings, [default=None]\n Binarization settings for categorical features.\n Format : see documentation\n Example: ['Borders:CtrBorderCount=5:Prior=0:Prior=0.5', 'BinarizedTargetMeanValue:TargetBorderCount=10:TargetBorderType=MinEntropy', ...]\n CTR types:\n CPU and GPU\n - 'Borders'\n - 'Buckets'\n CPU only\n - 'BinarizedTargetMeanValue'\n - 'Counter'\n GPU only\n - 'FloatTargetMeanValue'\n - 'FeatureFreq'\n Number_of_borders, binarization type, target borders and binarizations, priors are optional parametrs\n combinations_ctr: list of strings, [default=None]\n per_feature_ctr: list of strings, [default=None]\n ctr_leaf_count_limit : int, [default=None]\n The maximum number of leaves with categorical features.\n If the number of leaves exceeds the specified limit, some leaves are discarded.\n The leaves to be discarded are selected as follows:\n - The leaves are sorted by the frequency of the values.\n - The top N leaves are selected, where N is the value specified in the parameter.\n - All leaves starting from N+1 are discarded.\n This option reduces the resulting model size\n and the amount of memory required for training.\n Note that the resulting quality of the model can be affected.\n range: [1,+inf] (for zero limit use ignored_features)\n store_all_simple_ctr : bool, [default=None]\n Ignore categorical features, which are not used in feature combinations,\n when choosing candidates for exclusion.\n Use this parameter with ctr_leaf_count_limit only.\n max_ctr_complexity : int, [default=4]\n The maximum number of Categ features that can be combined.\n range: [0,+inf]\n has_time : bool, [default=False]\n To use the order in which objects are represented in the input data\n (do not perform a random permutation on the stages of converting\n the Categ features to Num and the choice of a tree structure).\n allow_const_label : bool, [default=False]\n To allow the constant label value in dataset.\n classes_count : int, [default=None]\n The upper limit for the numeric class label.\n Defines the number of classes for multiclassification.\n Only non-negative integers can be specified.\n The given integer should be greater than any of the target values.\n If this parameter is specified the labels for all classes in the input dataset\n should be smaller than the given value.\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n class_weights : list of floats, [default=None]\n Classes weights. The values are used as multipliers for the object weights.\n If None, all classes are supposed to have weight one.\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n class_names: list of strings, [default=None]\n Class names. Allows to redefine the default values for class labels (integer numbers).\n If several of 'classes_count', 'class_weights', 'class_names' parameters are defined\n the numbers of classes specified by each of them must be equal.\n one_hot_max_size : int, [default=None]\n Convert the feature to float\n if the number of different values that it takes exceeds the specified value.\n Ctrs are not calculated for such features.\n random_strength : float, [default=1]\n Score standard deviation multiplier.\n name : string, [default='experiment']\n The name that should be displayed in the visualization tools.\n ignored_features : list, [default=None]\n Indices of features that should be excluded when training.\n train_dir : string, [default=None]\n The directory in which you want to record generated in the process of learning files.\n custom_metric : string or list of strings, [default=None]\n To use your own metric function.\n custom_loss: alias to custom_metric\n eval_metric : string or object, [default=None]\n To optimize your custom metric in loss.\n bagging_temperature : float, [default=None]\n Controls intensity of Bayesian bagging. The higher the temperature the more aggressive bagging is.\n Typical values are in range [0, 1] (0 - no bagging, 1 - default).\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n fold_len_multiplier : float, [default=None]\n Fold length multiplier. Should be greater than 1\n used_ram_limit : string or number, [default=None]\n Set a limit on memory consumption (value like '1.2gb' or 1.2e9).\n WARNING: Currently this option affects CTR memory usage only.\n gpu_ram_part : float, [default=0.95]\n Fraction of the GPU RAM to use for training, a value from (0, 1].\n pinned_memory_size: int [default=None]\n Size of additional CPU pinned memory used for GPU learning,\n usually is estimated automatically, thus usually should not be set.\n allow_writing_files : bool, [default=True]\n If this flag is set to False, no files with different diagnostic info will be created during training.\n With this flag no snapshotting can be done. Plus visualisation will not\n work, because visualisation uses files that are created and updated during training.\n final_ctr_computation_mode : string, [default='Default']\n Possible values:\n - 'Default' - Compute final ctrs for all pools.\n - 'Skip' - Skip final ctr computation. WARNING: model without ctrs can't be applied.\n approx_on_full_history : bool, [default=False]\n If this flag is set to True, each approximated value is calculated using all the preceeding rows in the fold (slower, more accurate).\n If this flag is set to False, each approximated value is calculated using only the beginning 1/fold_len_multiplier fraction of the fold (faster, slightly less accurate).\n boosting_type : string, default value depends on object count and feature count in train dataset and on learning mode.\n Boosting scheme.\n Possible values:\n - 'Ordered' - Gives better quality, but may slow down the training.\n - 'Plain' - The classic gradient boosting scheme. May result in quality degradation, but does not slow down the training.\n task_type : string, [default=None]\n The calcer type used to train the model.\n Possible values:\n - 'CPU'\n - 'GPU'\n device_config : string, [default=None], deprecated, use devices instead\n devices : list or string, [default=None], GPU devices to use.\n String format is: '0' for 1 device or '0:1:3' for multiple devices or '0-3' for range of devices.\n List format is : [0] for 1 device or [0,1,3] for multiple devices.\n\n bootstrap_type : string, Bayesian, Bernoulli, Poisson.\n Default bootstrap is Bayesian.\n Poisson bootstrap is supported only on GPU.\n\n subsample : float, [default=None]\n Sample rate for bagging. This parameter can be used Poisson or Bernoully bootstrap types.\n\n dev_score_calc_obj_block_size: int, [default=5000000]\n CPU only. Size of block of samples in score calculation. Should be > 0\n Used only for learning speed tuning.\n Changing this parameter can affect results due to numerical accuracy differences\n\n max_depth : int, Synonym for depth.\n\n n_estimators : int, synonym for iterations.\n\n num_trees : int, synonym for iterations.\n\n num_boost_round : int, synonym for iterations.\n\n colsample_bylevel : float, synonym for rsm.\n\n random_state : int, synonym for random_seed.\n\n reg_lambda : float, synonym for l2_leaf_reg.\n\n objective : string, synonym for loss_function.\n\n eta : float, synonym for learning_rate.\n\n max_bin : float, synonym for border_count.\n\n scale_pos_weight : float, synonym for class_weights.\n Can be used only for binary classification. Sets weight multiplier for\n class 1 to scale_pos_weight value.\n\n metadata : dict, string to string key-value pairs to be stored in model metadata storage\n\n early_stopping_rounds : int\n Synonym for od_wait. Only one of these parameters should be set.\n\n cat_features : list of numpy.array of integer feature indices.\n \"\"\"\n def __init__(\n self,\n iterations=None,\n learning_rate=None,\n depth=None,\n l2_leaf_reg=None,\n model_size_reg=None,\n rsm=None,\n loss_function='Logloss',\n border_count=None,\n feature_border_type=None,\n fold_permutation_block_size=None,\n od_pval=None,\n od_wait=None,\n od_type=None,\n nan_mode=None,\n counter_calc_method=None,\n leaf_estimation_iterations=None,\n leaf_estimation_method=None,\n thread_count=None,\n random_seed=None,\n use_best_model=None,\n best_model_min_trees=None,\n verbose=None,\n silent=None,\n logging_level=None,\n metric_period=None,\n ctr_leaf_count_limit=None,\n store_all_simple_ctr=None,\n max_ctr_complexity=None,\n has_time=None,\n allow_const_label=None,\n classes_count=None,\n class_weights=None,\n class_names=None,\n one_hot_max_size=None,\n random_strength=None,\n name=None,\n ignored_features=None,\n train_dir=None,\n custom_loss=None,\n custom_metric=None,\n eval_metric=None,\n bagging_temperature=None,\n save_snapshot=None,\n snapshot_file=None,\n snapshot_interval=None,\n fold_len_multiplier=None,\n used_ram_limit=None,\n gpu_ram_part=None,\n pinned_memory_size=None,\n allow_writing_files=None,\n final_ctr_computation_mode=None,\n approx_on_full_history=None,\n boosting_type=None,\n simple_ctr=None,\n combinations_ctr=None,\n per_feature_ctr=None,\n ctr_description=None,\n task_type=None,\n device_config=None,\n devices=None,\n bootstrap_type=None,\n subsample=None,\n dev_score_calc_obj_block_size=None,\n max_depth=None,\n n_estimators=None,\n num_boost_round=None,\n num_trees=None,\n colsample_bylevel=None,\n random_state=None,\n reg_lambda=None,\n objective=None,\n eta=None,\n max_bin=None,\n scale_pos_weight=None,\n gpu_cat_features_storage=None,\n data_partition=None,\n metadata=None,\n early_stopping_rounds=None,\n cat_features=None\n ):\n params = {}\n not_params = [\"not_params\", \"self\", \"params\", \"__class__\"]\n for key, value in iteritems(locals().copy()):\n if key not in not_params and value is not None:\n params[key] = value\n\n super(CatBoostClassifier, self).__init__(params)\n\n @property\n def classes_(self):\n return getattr(self, \"_classes\", None)\n\n def fit(self, X, y=None, cat_features=None, sample_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoostClassifier model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n If not catboost.Pool, 2 dimensional Feature matrix or string - file with dataset.\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool or list, optional (default=None)\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n\n params = self._init_params.copy()\n _process_synonyms(params)\n if 'loss_function' in params:\n self._check_is_classification_objective(params['loss_function'])\n\n self._fit(X, y, cat_features, None, sample_weight, None, None, None, None, baseline, use_best_model,\n eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period,\n silent, early_stopping_rounds, save_snapshot, snapshot_file, snapshot_interval)\n return self\n\n def predict(self, data, prediction_type='Class', ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='Class')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool, optional (default=False)\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose)\n\n def predict_proba(self, data, ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict class probability with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, 'Probability', ntree_start, ntree_end, thread_count, verbose)\n\n def staged_predict(self, data, prediction_type='Class', ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n prediction_type : string, optional (default='Class')\n Can be:\n - 'RawFormulaVal' : return raw value.\n - 'Class' : return majority vote class.\n - 'Probability' : return probability for every class.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def staged_predict_proba(self, data, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict classification target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, 'Probability', ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def score(self, X, y=None):\n \"\"\"\n Calculate accuracy.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n y : list or numpy.array\n True labels.\n\n Returns\n -------\n accuracy : float\n \"\"\"\n if isinstance(X, Pool):\n if X.get_label() is None:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n y = X.get_label()\n elif y is None:\n raise CatboostError(\"y should be specified.\")\n correct = []\n y = np.array(y, dtype=np.int32)\n for i, val in enumerate(self.predict(X)):\n correct.append(1 * (y[i] == np.int32(val)))\n return np.mean(correct)\n\n def _check_is_classification_objective(self, loss_function):\n if isinstance(loss_function, str) and not self._is_classification_objective(loss_function):\n raise CatboostError(\"Invalid loss_function='{}': for classifier use \"\n \"Logloss, CrossEntropy, MultiClass, MultiClassOneVsAll or custom objective object\".format(loss_function))\n\n\nclass CatBoostRegressor(CatBoost):\n \"\"\"\n Implementation of the scikit-learn API for CatBoost regression.\n\n Parameters\n ----------\n Like in CatBoostClassifier, except loss_function, classes_count, class_names and class_weights\n\n loss_function : string, [default='RMSE']\n 'RMSE'\n 'MAE'\n 'Quantile:alpha=value'\n 'LogLinQuantile:alpha=value'\n 'Poisson'\n 'MAPE'\n 'Lq:q=value'\n \"\"\"\n def __init__(\n self,\n iterations=None,\n learning_rate=None,\n depth=None,\n l2_leaf_reg=None,\n model_size_reg=None,\n rsm=None,\n loss_function='RMSE',\n border_count=None,\n feature_border_type=None,\n fold_permutation_block_size=None,\n od_pval=None,\n od_wait=None,\n od_type=None,\n nan_mode=None,\n counter_calc_method=None,\n leaf_estimation_iterations=None,\n leaf_estimation_method=None,\n thread_count=None,\n random_seed=None,\n use_best_model=None,\n best_model_min_trees=None,\n verbose=None,\n silent=None,\n logging_level=None,\n metric_period=None,\n ctr_leaf_count_limit=None,\n store_all_simple_ctr=None,\n max_ctr_complexity=None,\n has_time=None,\n allow_const_label=None,\n one_hot_max_size=None,\n random_strength=None,\n name=None,\n ignored_features=None,\n train_dir=None,\n custom_metric=None,\n eval_metric=None,\n bagging_temperature=None,\n save_snapshot=None,\n snapshot_file=None,\n snapshot_interval=None,\n fold_len_multiplier=None,\n used_ram_limit=None,\n gpu_ram_part=None,\n pinned_memory_size=None,\n allow_writing_files=None,\n final_ctr_computation_mode=None,\n approx_on_full_history=None,\n boosting_type=None,\n simple_ctr=None,\n combinations_ctr=None,\n per_feature_ctr=None,\n ctr_description=None,\n task_type=None,\n device_config=None,\n devices=None,\n bootstrap_type=None,\n subsample=None,\n dev_score_calc_obj_block_size=None,\n max_depth=None,\n n_estimators=None,\n num_boost_round=None,\n num_trees=None,\n colsample_bylevel=None,\n random_state=None,\n reg_lambda=None,\n objective=None,\n eta=None,\n max_bin=None,\n gpu_cat_features_storage=None,\n data_partition=None,\n metadata=None,\n early_stopping_rounds=None,\n cat_features=None\n ):\n params = {}\n not_params = [\"not_params\", \"self\", \"params\", \"__class__\"]\n for key, value in iteritems(locals().copy()):\n if key not in not_params and value is not None:\n params[key] = value\n\n super(CatBoostRegressor, self).__init__(params)\n\n def fit(self, X, y=None, cat_features=None, sample_weight=None, baseline=None, use_best_model=None,\n eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,\n verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Fit the CatBoost model.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n If not catboost.Pool, 2 dimensional Feature matrix or string - file with dataset.\n\n y : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Labels, 1 dimensional array like.\n Use only if X is not catboost.Pool.\n\n cat_features : list or numpy.array, optional (default=None)\n If not None, giving the list of Categ columns indices.\n Use only if X is not catboost.Pool.\n\n sample_weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Instance weights, 1 dimensional array like.\n\n baseline : list or numpy.array, optional (default=None)\n If not None, giving 2 dimensional array like data.\n Use only if X is not catboost.Pool.\n\n use_best_model : bool, optional (default=None)\n Flag to use best model\n\n eval_set : catboost.Pool or list, optional (default=None)\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n silent : bool\n If silent is True, logging_level is set to Silent.\n If silent is False, logging_level is set to Verbose.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost\n \"\"\"\n\n params = deepcopy(self._init_params)\n _process_synonyms(params)\n if 'loss_function' in params:\n self._check_is_regressor_loss(params['loss_function'])\n\n return self._fit(X, y, cat_features, None, sample_weight, None, None, None, None, baseline,\n use_best_model, eval_set, verbose, logging_level, plot, column_description,\n verbose_eval, metric_period, silent, early_stopping_rounds,\n save_snapshot, snapshot_file, snapshot_interval)\n\n def predict(self, data, ntree_start=0, ntree_end=0, thread_count=-1, verbose=None):\n \"\"\"\n Predict with data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series or single object\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : numpy.array or single object\n \"\"\"\n return self._predict(data, \"RawFormulaVal\", ntree_start, ntree_end, thread_count, verbose)\n\n def staged_predict(self, data, ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None):\n \"\"\"\n Predict target at each stage for data.\n\n Parameters\n ----------\n data : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n\n ntree_start: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n ntree_end: int, optional (default=0)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n If value equals to 0 this parameter is ignored and ntree_end equal to tree_count_.\n\n eval_period: int, optional (default=1)\n Model is applyed on the interval [ntree_start, ntree_end) with the step eval_period (zero-based indexing).\n\n thread_count : int (default=-1)\n The number of threads to use when applying the model.\n Allows you to optimize the speed of execution. This parameter doesn't affect results.\n If -1, then the number of threads is set to the number of cores.\n\n verbose : bool\n If True, writes the evaluation metric measured set to stderr.\n\n Returns\n -------\n prediction : generator numpy.array or single object for each iteration\n \"\"\"\n return self._staged_predict(data, \"RawFormulaVal\", ntree_start, ntree_end, eval_period, thread_count, verbose)\n\n def score(self, X, y=None):\n \"\"\"\n Calculate RMSE.\n\n Parameters\n ----------\n X : catboost.Pool or list or numpy.array or pandas.DataFrame or pandas.Series\n Data to predict.\n y : list or numpy.array\n True labels.\n\n Returns\n -------\n RMSE : float\n \"\"\"\n if isinstance(X, Pool):\n if X.get_label() is None:\n raise CatboostError(\"Label in X has not initialized.\")\n if y is not None:\n raise CatboostError(\"Wrong initializing y: X is catboost.Pool object, y must be initialized inside catboost.Pool.\")\n y = X.get_label()\n elif y is None:\n raise CatboostError(\"y should be specified.\")\n error = []\n y = np.array(y, dtype=np.float64)\n for i, val in enumerate(self.predict(X)):\n error.append(pow(y[i] - val, 2))\n return np.sqrt(np.mean(error))\n\n def _check_is_regressor_loss(self, loss_function):\n if isinstance(loss_function, str) and not self._is_regression_objective(loss_function):\n raise CatboostError(\"Invalid loss_function='{}': for regressor use \"\n \"RMSE, MAE, Quantile, LogLinQuantile, Poisson, MAPE, Lq or custom objective object\".format(loss_function))\n\n\ndef train(pool=None, params=None, dtrain=None, logging_level=None, verbose=None, iterations=None,\n num_boost_round=None, evals=None, eval_set=None, plot=None, verbose_eval=None, metric_period=None,\n early_stopping_rounds=None, save_snapshot=None, snapshot_file=None, snapshot_interval=None):\n \"\"\"\n Train CatBoost model.\n\n Parameters\n ----------\n params : dict\n Parameters for CatBoost.\n If None, all params are set to their defaults.\n If dict, overriding parameters present in the dict.\n\n pool : catboost.Pool or tuple (X, y)\n Data to train on.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n evals : catboost.Pool or tuple (X, y)\n Synonym for eval_set. Only one of these parameters should be set.\n\n dtrain : catboost.Pool or tuple (X, y)\n Synonym for pool parameter. Only one of these parameters should be set.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n num_boost_round : int\n Synonym for iterations. Only one of these parameters should be set.\n\n eval_set : catboost.Pool or tuple (X, y) or list [(X, y)]\n Dataset for evaluation.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n Returns\n -------\n model : CatBoost class\n \"\"\"\n\n if params is None:\n raise CatboostError(\"params should be set.\")\n\n if dtrain is not None:\n if pool is None:\n pool = dtrain\n else:\n raise CatboostError(\"Only one of the parameters pool and dtrain should be set.\")\n\n if num_boost_round is not None:\n if iterations is None:\n iterations = num_boost_round\n else:\n raise CatboostError(\"Only one of the parameters iterations and num_boost_round should be set.\")\n if iterations is not None:\n params = deepcopy(params)\n params.update({\n 'iterations': iterations\n })\n\n if early_stopping_rounds is not None:\n params.update({\n 'od_type': 'Iter'\n })\n if 'od_pval' in params:\n del params['od_pval']\n params.update({\n 'od_wait': early_stopping_rounds\n })\n\n if evals is not None:\n if eval_set is not None:\n raise CatboostError('Only one of the parameters evals, eval_set should be set.')\n eval_set = evals\n\n model = CatBoost(params)\n model.fit(X=pool, eval_set=eval_set, logging_level=logging_level, plot=plot, verbose=verbose,\n verbose_eval=verbose_eval, metric_period=metric_period,\n early_stopping_rounds=early_stopping_rounds, save_snapshot=save_snapshot,\n snapshot_file=snapshot_file, snapshot_interval=snapshot_interval)\n return model\n\n\ndef cv(pool=None, params=None, dtrain=None, iterations=None, num_boost_round=None,\n fold_count=None, nfold=None, inverted=False, partition_random_seed=0, seed=None,\n shuffle=True, logging_level=None, stratified=False, as_pandas=True, metric_period=None,\n verbose=None, verbose_eval=None, plot=False, early_stopping_rounds=None,\n save_snapshot=None, snapshot_file=None, snapshot_interval=None, iterations_batch_size=100):\n \"\"\"\n Cross-validate the CatBoost model.\n\n Parameters\n ----------\n pool : catboost.Pool\n Data to cross-validatte.\n\n params : dict\n Parameters for CatBoost.\n CatBoost has many of parameters, all have default values.\n If None, all params still defaults.\n If dict, overriding some (or all) params.\n\n dtrain : catboost.Pool or tuple (X, y)\n Synonym for pool parameter. Only one of these parameters should be set.\n\n iterations : int\n Number of boosting iterations. Can be set in params dict.\n\n num_boost_round : int\n Synonym for iterations. Only one of these parameters should be set.\n\n fold_count : int, optional (default=3)\n The number of folds to split the dataset into.\n\n nfold : int\n Synonym for fold_count.\n\n inverted : bool, optional (default=False)\n Train on the test fold and evaluate the model on the training folds.\n\n partition_random_seed : int, optional (default=0)\n Use this as the seed value for random permutation of the data.\n Permutation is performed before splitting the data for cross validation.\n Each seed generates unique data splits.\n\n seed : int, optional\n Synonym for partition_random_seed. This parameter is deprecated. Use\n partition_random_seed instead.\n If both parameters are initialised partition_random_seed parameter is\n ignored.\n\n shuffle : bool, optional (default=True)\n Shuffle the dataset objects before splitting into folds.\n\n logging_level : string, optional (default=None)\n Possible values:\n - 'Silent'\n - 'Verbose'\n - 'Info'\n - 'Debug'\n\n stratified : bool, optional (default=False)\n Perform stratified sampling.\n\n as_pandas : bool, optional (default=True)\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return dict.\n\n metric_period : int\n Frequency of evaluating metrics.\n\n verbose : bool or int\n If verbose is bool, then if set to True, logging_level is set to Verbose,\n if set to False, logging_level is set to Silent.\n If verbose is int, it determines the frequency of writing metrics to output and\n logging_level is set to Verbose.\n\n verbose_eval : bool or int\n Synonym for verbose. Only one of these parameters should be set.\n\n plot : bool, optional (default=False)\n If True, drow train and eval error in Jupyter notebook\n\n early_stopping_rounds : int\n Activates Iter overfitting detector with od_wait set to early_stopping_rounds.\n\n save_snapshot : bool, [default=None]\n Enable progress snapshoting for restoring progress after crashes or interruptions\n\n snapshot_file : string, [default=None]\n Learn progress snapshot file path, if None will use default filename\n\n snapshot_interval: int, [default=600]\n Interval beetween saving snapshots (seconds)\n\n iterations_batch_size: int [default:100]\n Number of iterations to compute for each fold before aggregating results.\n\n Returns\n -------\n cv results : pandas.core.frame.DataFrame with cross-validation results\n columns are: test-error-mean test-error-std train-error-mean train-error-std\n \"\"\"\n if params is None:\n raise CatboostError(\"params should be set.\")\n\n params = deepcopy(params)\n _process_synonyms(params)\n\n metric_period, verbose, logging_level = _process_verbose(metric_period, verbose, logging_level, verbose_eval)\n\n if verbose is not None:\n params.update({\n 'verbose': verbose\n })\n\n if logging_level is not None:\n params.update({\n 'logging_level': logging_level\n })\n\n if metric_period is not None:\n params.update({\n 'metric_period': metric_period\n })\n\n if early_stopping_rounds is not None:\n params.update({\n 'od_type': 'Iter'\n })\n if 'od_pval' in params:\n del params['od_pval']\n params.update({\n 'od_wait': early_stopping_rounds\n })\n\n if dtrain is not None:\n if pool is None:\n pool = dtrain\n else:\n raise CatboostError(\"Only one of the parameters pool and dtrain should be set.\")\n\n if num_boost_round is not None:\n if iterations is None:\n iterations = num_boost_round\n else:\n raise CatboostError(\"Only one of the parameters iterations and num_boost_round should be set.\")\n\n if iterations is not None:\n params.update({\n 'iterations': iterations\n })\n\n if seed is not None:\n partition_random_seed = seed\n\n if save_snapshot is not None:\n params['save_snapshot'] = save_snapshot\n\n if snapshot_file is not None:\n params['snapshot_file'] = snapshot_file\n\n if snapshot_interval is not None:\n params['snapshot_interval'] = snapshot_interval\n\n if nfold is None and fold_count is None:\n fold_count = 3\n elif fold_count is None:\n fold_count = nfold\n else:\n assert nfold is None or nfold == fold_count\n\n with log_fixup(), plot_wrapper(plot, params):\n return _cv(params, pool, fold_count, inverted, partition_random_seed, shuffle, stratified,\n as_pandas, iterations_batch_size)\n\n\nclass BatchMetricCalcer(_MetricCalcerBase):\n\n def __init__(self, catboost, metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir):\n super(BatchMetricCalcer, self).__init__(catboost)\n if tmp_dir is None:\n tmp_dir = tempfile.mkdtemp()\n delete_temp_dir_flag = True\n else:\n delete_temp_dir_flag = False\n\n if isinstance(metrics, str):\n metrics = [metrics]\n self._create_calcer(metrics, ntree_start, ntree_end, eval_period, thread_count, tmp_dir, delete_temp_dir_flag)\n\n\ndef sum_models(models, weights=None, ctr_merge_policy='IntersectingCountersAverage'):\n result = CatBoost()\n result._sum_models(models, weights, ctr_merge_policy)\n return result\n" ]
[ [ "numpy.expand_dims", "numpy.reshape", "numpy.int32", "numpy.dtype", "numpy.shape", "numpy.mean", "numpy.transpose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ygarrot/plantcv
[ "0e11c7f63d96a52487e01e3b67744aa8697eedb2", "f38f7de53663522eb770870b70823d5fc46d0c0f", "f38f7de53663522eb770870b70823d5fc46d0c0f", "f38f7de53663522eb770870b70823d5fc46d0c0f" ]
[ "plantcv/plantcv/spectral_index/spectral_index.py", "tests/plantcv/visualize/test_pseudocolor.py", "tests/plantcv/morphology/test_segment_angle.py", "tests/plantcv/test_report_size_marker.py" ]
[ "# Extract one of the predefined indices from a hyperspectral datacube\n\nimport os\nimport numpy as np\nimport cv2\nfrom plantcv.plantcv import params\nfrom plantcv.plantcv._debug import _debug\nfrom plantcv.plantcv import fatal_error\nfrom plantcv.plantcv import Spectral_data\nfrom plantcv.plantcv.transform import rescale\nfrom plantcv.plantcv.hyperspectral import _find_closest\n\n\ndef ndvi(hsi, distance=20):\n \"\"\"Normalized Difference Vegetation Index.\n\n NDVI = (R800 - R670) / (R800 + R670)\n\n The theoretical range for NDVI is [-1.0, 1.0]\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 670:\n # Obtain index that best represents NIR and red bands\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800 = (hsi.array_data[:, :, r800_index])\n r670 = (hsi.array_data[:, :, r670_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r670) / (r800 + r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"NDVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating NDVI. Try increasing distance.\")\n\n\ndef gdvi(hsi, distance=20):\n \"\"\"Green Difference Vegetation Index.\n\n GDVI = R800 - R550\n\n The theoretical range for GDVI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 550:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r800 = (hsi.array_data[:, :, r800_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -1 to 1\n index_array_raw = r800 - r550\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"GDVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating GDVI. Try increasing distance.\")\n\n\ndef savi(hsi, distance=20):\n \"\"\"Soil Adjusted Vegetation Index.\n\n SAVI = (1.5 * (R800 - R680)) / (R800 + R680 + 0.5)\n\n The theoretical range for SAVI is [-1.2, 1.2].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800 = (hsi.array_data[:, :, r800_index])\n r680 = (hsi.array_data[:, :, r680_index])\n # Naturally ranges from -1.2 to 1.2\n index_array_raw = (1.5 * (r800 - r680)) / (r800 + r680 + 0.5)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SAVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating SAVI. Try increasing distance.\")\n\n\ndef pri(hsi, distance=20):\n \"\"\"Photochemical Reflectance Index.\n\n PRI = (R531 - R570) / (R531 + R570)\n\n The theoretical range for PRI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 570 and (float(hsi.min_wavelength) - distance) <= 531:\n # Obtain index that best approximates 570 and 531 nm bands\n r570_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 570)\n r531_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 531)\n r570 = (hsi.array_data[:, :, r570_index])\n r531 = (hsi.array_data[:, :, r531_index])\n index_array_raw = (r531 - r570) / (r531 + r570)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating PRI. Try increasing distance.\")\n\n\ndef ari(hsi, distance=20):\n \"\"\"Anthocyanin Reflectance Index.\n\n ARI = (1 / R550) - (1 / R700)\n\n The theoretical range for ARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r550 = (hsi.array_data[:, :, r550_index])\n r700 = (hsi.array_data[:, :, r700_index])\n index_array_raw = (1 / r550) - (1 / r700)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"ARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating ARI. Try increasing distance.\")\n\n\ndef ci_rededge(hsi, distance=20):\n \"\"\"Chlorophyll Index Red Edge.\n\n CI_REDEDGE = (R800 / R700) - 1\n\n The theoretical range for CI_REDEDGE is [-1.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 700:\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r700 = (hsi.array_data[:, :, r700_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to inf\n index_array_raw = (r800 / r700) - 1\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CI_REDEDGE\")\n fatal_error(\"Available wavelengths are not suitable for calculating CI_REDEDGE. Try increasing distance.\")\n\n\ndef cri550(hsi, distance=20):\n \"\"\"Carotenoid Reflectance Index 550.\n\n CRI550 = (1 / R510) - (1 / R550)\n\n The theoretical range for CRI550 is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 550 and (float(hsi.min_wavelength) - distance) <= 510:\n r510_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 510)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r510 = (hsi.array_data[:, :, r510_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (1 / r510) - (1 / r550)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CRI510\")\n fatal_error(\"Available wavelengths are not suitable for calculating CRI510. Try increasing distance.\")\n\n\ndef cri700(hsi, distance=20):\n \"\"\"Carotenoid Reflectance Index 700.\n\n CRI700 = (1 / R510) - (1 / R700)\n\n The theoretical range for CRI700 is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 510:\n r510_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 510)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r510 = (hsi.array_data[:, :, r510_index])\n r700 = (hsi.array_data[:, :, r700_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (1 / r510) - (1 / r700)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"CRI700\")\n fatal_error(\"Available wavelengths are not suitable for calculating CRI700. Try increasing distance.\")\n\n\ndef egi(rgb_img):\n \"\"\"Excess Green Index.\n\n r = R / (R + G + B)\n g = G / (R + G + B)\n b = B / (R + G + B)\n EGI = 2g - r - b\n\n The theoretical range for EGI is (-1, 2).\n\n Inputs:\n rgb_img = Color image (np.array)\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param rgb_img: np.array\n :return index_array: np.array\n \"\"\"\n # Split the RGB image into component channels\n blue, green, red = cv2.split(rgb_img)\n # Calculate float32 sum of all channels\n total = red.astype(np.float32) + green.astype(np.float32) + blue.astype(np.float32)\n # Calculate normalized channels\n r = red.astype(np.float32) / total\n g = green.astype(np.float32) / total\n b = blue.astype(np.float32) / total\n index_array_raw = (2 * g) - r - b\n\n hsi = Spectral_data(array_data=None, max_wavelength=0, min_wavelength=0, max_value=255, min_value=0,\n d_type=np.uint8, wavelength_dict={}, samples=None, lines=None, interleave=None,\n wavelength_units=None, array_type=None, pseudo_rgb=None, filename=None, default_bands=None)\n\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"EGI\")\n\n\ndef evi(hsi, distance=20):\n \"\"\"Enhanced Vegetation index.\n\n EVI = (2.5 * (R800 - R670)) / (1 + R800 + (6 * R670) - (7.5 * R480))\n\n The theoretical range for EVI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 480:\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r480 = (hsi.array_data[:, :, r480_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (2.5 * (r800 - r670)) / (1 + r800 + (6 * r670) - (7.5 * r480))\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"EVI\")\n fatal_error(\"Available wavelengths are not suitable for calculating EVI. Try increasing distance.\")\n\n\ndef mari(hsi, distance=20):\n \"\"\"Modified Anthocyanin Reflectance Index.\n\n MARI = ((1 / R550) - (1 / R700)) * R800\n\n The theoretical range for MARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r550 = (hsi.array_data[:, :, r550_index])\n r700 = (hsi.array_data[:, :, r700_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = ((1 / r550) - (1 / r700)) * r800\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MARI. Try increasing distance.\")\n\n\ndef mcari(hsi, distance=20):\n \"\"\"Modified Chlorophyll Absorption in Reflectance Index.\n\n MCARI = ((R700 - R670) - 0.2 * (R700 - R550)) * (R700 / R670)\n\n The theoretical range for MCARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 700 and (float(hsi.min_wavelength) - distance) <= 550:\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r700_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 700)\n r550 = (hsi.array_data[:, :, r550_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r700 = (hsi.array_data[:, :, r700_index])\n # Naturally ranges from -inf to inf\n index_array_raw = ((r700 - r670) - 0.2 * (r700 - r550)) * (r700 / r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MCARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MCARI. Try increasing distance.\")\n\n\ndef mtci(hsi, distance=20):\n \"\"\"MERIS Terrestrial Chlorophyll Index.\n\n MTCI = (R753.75 - R708.75) / (R708.75 - R681.25)\n\n The theoretical range for MTCI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 753.75 and (float(hsi.min_wavelength) - distance) <= 681.25:\n r681_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 681.25)\n r708_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 708.75)\n r753_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 753.75)\n r681 = (hsi.array_data[:, :, r681_index])\n r708 = (hsi.array_data[:, :, r708_index])\n r753 = (hsi.array_data[:, :, r753_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r753 - r708) / (r708 - r681)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"MTCI\")\n fatal_error(\"Available wavelengths are not suitable for calculating MTCI. Try increasing distance.\")\n\n\ndef ndre(hsi, distance=20):\n \"\"\"Normalized Difference Red Edge.\n\n NDRE = (R790 - R720) / (R790 + R720)\n\n The theoretical range for NDRE is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 790 and (float(hsi.min_wavelength) - distance) <= 720:\n r720_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 720)\n r790_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 790)\n r790 = (hsi.array_data[:, :, r790_index])\n r720 = (hsi.array_data[:, :, r720_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r790 - r720) / (r790 + r720)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"NDRE\")\n fatal_error(\"Available wavelengths are not suitable for calculating NDRE. Try increasing distance.\")\n\n\ndef psnd_chla(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Chlorophyll a.\n\n PSND_CHLA = (R800 - R680) / (R800 + R680)\n\n The theoretical range for PSND_CHLA is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680 = (hsi.array_data[:, :, r680_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r680) / (r800 + r680)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CHLA\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CHLA. Try increasing distance.\")\n\n\ndef psnd_chlb(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Chlorophyll b.\n\n PSND_CHLB = (R800 - R635) / (R800 + R635)\n\n The theoretical range for PSND_CHLB is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 635:\n r635_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 635)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r635 = (hsi.array_data[:, :, r635_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r635) / (r800 + r635)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CHLB\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CHLB. Try increasing distance.\")\n\n\ndef psnd_car(hsi, distance=20):\n \"\"\"Pigment Specific Normalized Difference for Caroteniods.\n\n PSND_CAR = (R800 - R470) / (R800 + R470)\n\n The theoretical range for PSND_CAR is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 470:\n r470_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 470)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r470 = (hsi.array_data[:, :, r470_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r800 - r470) / (r800 + r470)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSND_CAR\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSND_CAR. Try increasing distance.\")\n\n\ndef psri(hsi, distance=20):\n \"\"\"Plant Senescence Reflectance Index.\n\n PSRI = (R678 - R500) / R750\n\n The theoretical range for PSRI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 750 and (float(hsi.min_wavelength) - distance) <= 500:\n r500_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 500)\n r678_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 678)\n r750_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 750)\n r500 = (hsi.array_data[:, :, r500_index])\n r678 = (hsi.array_data[:, :, r678_index])\n r750 = (hsi.array_data[:, :, r750_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r678 - r500) / r750\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSRI. Try increasing distance.\")\n\n\ndef pssr_chla(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Chlorophyll a.\n\n PSSR_CHLA = R800 / R680\n\n The theoretical range for PSSR_CHLA is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 680:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r680_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 680)\n r800 = (hsi.array_data[:, :, r800_index])\n r680 = (hsi.array_data[:, :, r680_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r680\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CHLA\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CHLA. Try increasing distance.\")\n\n\ndef pssr_chlb(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Chlorophyll b.\n\n PSSR_CHLB = R800 / R635\n\n The theoretical range for PSSR_CHLB is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 635:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r635_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 635)\n r800 = (hsi.array_data[:, :, r800_index])\n r635 = (hsi.array_data[:, :, r635_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r635\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CHLB\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CHLB. Try increasing distance.\")\n\n\ndef pssr_car(hsi, distance=20):\n \"\"\"Pigment Specific Simple Ratio for Caroteniods.\n\n PSSR_CAR = R800 / R470\n\n The theoretical range for PSSR_CAR is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 470:\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r470_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 470)\n r800 = (hsi.array_data[:, :, r800_index])\n r470 = (hsi.array_data[:, :, r470_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r470\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"PSSR_CAR\")\n fatal_error(\"Available wavelengths are not suitable for calculating PSSR_CAR. Try increasing distance.\")\n\n\ndef rgri(hsi, distance=20):\n \"\"\"Red/green ratio index (Gamon and Surfus, 1999)\n The theoretical range for RGRI is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 560:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r560_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 560)\n r670 = (hsi.array_data[:, :, r670_index])\n r560 = (hsi.array_data[:, :, r560_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r670 / r560\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"RGRI\")\n fatal_error(\"Available wavelengths are not suitable for calculating RGRI. Try increasing distance.\")\n\n\ndef rvsi(hsi, distance=20):\n \"\"\"Red-Edge Vegetation Stress Index.\n\n RVSI = ((R714 + R752) / 2) - R733\n\n The theoretical range for RVSI is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 752 and (float(hsi.min_wavelength) - distance) <= 714:\n r714_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 714)\n r733_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 733)\n r752_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 752)\n r714 = (hsi.array_data[:, :, r714_index])\n r733 = (hsi.array_data[:, :, r733_index])\n r752 = (hsi.array_data[:, :, r752_index])\n # Naturally ranges from -1 to 1\n index_array_raw = ((r714 + r752) / 2) - r733\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"RVSI\")\n fatal_error(\"Available wavelengths are not suitable for calculating RVSI. Try increasing distance.\")\n\n\ndef sipi(hsi, distance=20):\n \"\"\"Structure-Independent Pigment Index.\n\n SIPI = (R800 - R670) / (R800 - R480)\n\n The theoretical range for SIPI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 480:\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r445 = (hsi.array_data[:, :, r480_index])\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r800 - r670) / (r800 - r445)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SIPI\")\n fatal_error(\"Available wavelengths are not suitable for calculating SIPI. Try increasing distance.\")\n\n\ndef sr(hsi, distance=20):\n \"\"\"Simple Ratio.\n\n SR = R800 / R670\n\n The theoretical range for SR is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 800 and (float(hsi.min_wavelength) - distance) <= 670:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r800_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 800)\n r670 = (hsi.array_data[:, :, r670_index])\n r800 = (hsi.array_data[:, :, r800_index])\n # Naturally ranges from 0 to inf\n index_array_raw = r800 / r670\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"SR\")\n fatal_error(\"Available wavelengths are not suitable for calculating SR. Try increasing distance.\")\n\n\ndef vari(hsi, distance=20):\n \"\"\"Visible Atmospherically Resistant Index.\n\n VARI = (R550 - R670) / (R550 + R670 - R480)\n\n The theoretical range for VARI is (-Inf, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 480:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r480_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 480)\n r670 = (hsi.array_data[:, :, r670_index])\n r550 = (hsi.array_data[:, :, r550_index])\n r480 = (hsi.array_data[:, :, r480_index])\n # Naturally ranges from -inf to inf\n index_array_raw = (r550 - r670) / (r550 + r670 - r480)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"VARI\")\n fatal_error(\"Available wavelengths are not suitable for calculating VARI. Try increasing distance.\")\n\n\ndef vi_green(hsi, distance=20):\n \"\"\"Vegetation Index using green bands.\n\n VIgreen = (R550 - R670) / (R550 + R670)\n\n The theoretical range for VI_GREEN is [-1.0, 1.0].\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 670 and (float(hsi.min_wavelength) - distance) <= 550:\n r670_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 670)\n r550_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 550)\n r670 = (hsi.array_data[:, :, r670_index])\n r550 = (hsi.array_data[:, :, r550_index])\n # Naturally ranges from -1 to 1\n index_array_raw = (r550 - r670) / (r550 + r670)\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"VI_GREEN\")\n fatal_error(\"Available wavelengths are not suitable for calculating VI_GREEN. Try increasing distance.\")\n\n\ndef wi(hsi, distance=20):\n \"\"\"Water Index.\n\n WI = R900 / R970\n\n The theoretical range for WI is [0.0, Inf).\n\n Inputs:\n hsi = hyperspectral image (PlantCV Spectral_data instance)\n distance = how lenient to be if the required wavelengths are not available\n\n Returns:\n index_array = Index data as a Spectral_data instance\n\n :param hsi: __main__.Spectral_data\n :param distance: int\n :return index_array: __main__.Spectral_data\n \"\"\"\n if (float(hsi.max_wavelength) + distance) >= 970 and (float(hsi.min_wavelength) - distance) <= 900:\n r900_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 900)\n r970_index = _find_closest(np.array([float(i) for i in hsi.wavelength_dict.keys()]), 970)\n r900 = (hsi.array_data[:, :, r900_index])\n r970 = (hsi.array_data[:, :, r970_index])\n # Naturally ranges from 0 to Inf\n index_array_raw = r900 / r970\n return _package_index(hsi=hsi, raw_index=index_array_raw, method=\"WI\")\n fatal_error(\"Available wavelengths are not suitable for calculating WBI. Try increasing distance.\")\n\n\ndef _package_index(hsi, raw_index, method):\n \"\"\"Private function to package raw index array as a Spectral_data object.\n Inputs:\n hsi = hyperspectral data (Spectral_data object)\n raw_index = raw index array\n method = index method (e.g. NDVI)\n\n Returns:\n index = index image as a Spectral_data object.\n\n :params hsi: __main__.Spectral_data\n :params raw_index: np.array\n :params method: str\n :params index: __main__.Spectral_data\n \"\"\"\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Resulting array is float 32 from varying natural ranges, transform into uint8 for plotting\n all_positive = np.add(raw_index, 2 * np.ones(np.shape(raw_index)))\n scaled = rescale(all_positive)\n\n # Find array min and max values\n obs_max_pixel = float(np.nanmax(raw_index))\n obs_min_pixel = float(np.nanmin(raw_index))\n\n index = Spectral_data(array_data=raw_index, max_wavelength=0,\n min_wavelength=0, max_value=obs_max_pixel,\n min_value=obs_min_pixel, d_type=np.uint8,\n wavelength_dict={}, samples=hsi.samples,\n lines=hsi.lines, interleave=hsi.interleave,\n wavelength_units=hsi.wavelength_units,\n array_type=\"index_\" + method.lower(),\n pseudo_rgb=scaled, filename=hsi.filename, default_bands=None)\n\n # Restore debug mode\n params.debug = debug\n\n _debug(visual=index.pseudo_rgb,\n filename=os.path.join(params.debug_outdir, str(params.device) + method + \"_index.png\"))\n\n return index\n", "import pytest\nimport cv2\nimport numpy as np\nfrom matplotlib.figure import Figure\nfrom plantcv.plantcv.visualize import pseudocolor\nfrom plantcv.plantcv import params\n\n\[email protected](\"debug,axes\", [[\"print\", True], [\"plot\", False], [None, False]])\ndef test_pseudocolor(debug, axes, tmpdir, visualize_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Create a tmp directory\n cache_dir = tmpdir.mkdir(\"cache\")\n params.debug_outdir = cache_dir\n # Input image\n img = cv2.imread(visualize_test_data.small_bin_img, -1)\n r, c = img.shape\n # generate \"bad\" pixels\n mask_bad = np.zeros((r, c), dtype=np.uint8)\n mask_bad[0:1, 0:1] = 255\n # Debug mode\n params.debug = debug\n pseudo_img = pseudocolor(gray_img=img, mask=None, title=\"Pseudocolored image\", axes=axes, bad_mask=mask_bad)\n # Assert the output is a matplotlib figure\n assert isinstance(pseudo_img, Figure)\n\n\[email protected](\"bkgrd,axes,pad\", [[\"image\", True, \"auto\"], [\"white\", False, 1], [\"black\", True, \"auto\"]])\ndef test_pseudocolor_mask(bkgrd, axes, pad, visualize_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Input image\n img = cv2.imread(visualize_test_data.small_bin_img, -1)\n # Input mask\n mask = cv2.imread(visualize_test_data.small_bin_img, -1)\n # Input contours\n obj_contour = visualize_test_data.load_composed_contours(visualize_test_data.small_composed_contours_file)\n r, c = img.shape\n # generate \"bad\" pixels\n mask_bad = np.zeros((r, c), dtype=np.uint8)\n mask_bad[0:1, 0:1] = 255\n pseudo_img = pseudocolor(gray_img=img, obj=obj_contour, mask=mask, background=bkgrd, bad_mask=mask_bad,\n title=\"Pseudocolored image\", axes=axes, obj_padding=pad)\n # Assert the output is a matplotlib figure\n assert isinstance(pseudo_img, Figure)\n\n\ndef test_pseudocolor_bad_input(visualize_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n img = cv2.imread(visualize_test_data.small_rgb_img)\n with pytest.raises(RuntimeError):\n _ = pseudocolor(gray_img=img)\n\n\ndef test_pseudocolor_bad_background(visualize_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n img = cv2.imread(visualize_test_data.small_bin_img, -1)\n mask = cv2.imread(visualize_test_data.small_bin_img, -1)\n with pytest.raises(RuntimeError):\n _ = pseudocolor(gray_img=img, mask=mask, background=\"pink\")\n\n\ndef test_pseudocolor_bad_padding(visualize_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n img = cv2.imread(visualize_test_data.small_bin_img, -1)\n mask = cv2.imread(visualize_test_data.small_bin_img, -1)\n obj_contour = visualize_test_data.load_composed_contours(visualize_test_data.small_composed_contours_file)\n with pytest.raises(RuntimeError):\n _ = pseudocolor(gray_img=img, mask=mask, obj=obj_contour, obj_padding=\"pink\")\n", "import cv2\nimport numpy as np\nfrom plantcv.plantcv import outputs\nfrom plantcv.plantcv.morphology import segment_angle\n\n\ndef test_segment_angle(morphology_test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Clear previous outputs\n outputs.clear()\n skeleton = cv2.imread(morphology_test_data.skel_img, -1)\n _ = segment_angle(segmented_img=skeleton,\n objects=morphology_test_data.load_segments(morphology_test_data.segments_file, \"leaves\"))\n assert len(outputs.observations['default']['segment_angle']['value']) == 4\n\n\ndef test_segment_angle_overflow():\n \"\"\"Test for PlantCV.\"\"\"\n # Clear previous outputs\n outputs.clear()\n # Don't prune, would usually give overflow error without extra if statement in segment_angle\n skeleton = np.zeros((10, 10), dtype=np.uint8)\n edges = [np.array([[[5, 3]], [[5, 4]], [[5, 5]], [[5, 6]], [[5, 7]], [[5, 6]], [[5, 5]], [[5, 4]]], dtype=np.int32)]\n _ = segment_angle(segmented_img=skeleton, objects=edges)\n assert len(outputs.observations['default']['segment_angle']['value']) == 1\n", "import pytest\nimport cv2\nimport numpy as np\nfrom plantcv.plantcv import report_size_marker_area, outputs\n\n\[email protected](\"marker,exp\", [[\"detect\", 1257], [\"define\", 2601]])\ndef test_report_size_marker(marker, exp, test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Clear outputs\n outputs.clear()\n # Read in test data\n img = cv2.imread(test_data.small_rgb_img)\n # Draw a marker\n img = cv2.circle(img, (50, 100), 20, (0, 0, 255), thickness=-1)\n # ROI contour\n roi_contour = [np.array([[[25, 75]], [[25, 125]], [[75, 125]], [[75, 75]]], dtype=np.int32)]\n roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)\n _ = report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker=marker,\n objcolor='light', thresh_channel='s', thresh=120)\n assert int(outputs.observations[\"default\"][\"marker_area\"][\"value\"]) == exp\n\n\ndef test_report_size_marker_grayscale_input(test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Clear outputs\n outputs.clear()\n # Read in test data\n img = cv2.imread(test_data.small_gray_img, -1)\n # ROI contour\n roi_contour = [np.array([[[25, 75]], [[25, 125]], [[75, 125]], [[75, 75]]], dtype=np.int32)]\n roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)\n _ = report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',\n objcolor='light', thresh_channel='s', thresh=120)\n assert int(outputs.observations[\"default\"][\"marker_area\"][\"value\"]) == 2601\n\n\[email protected](\"marker,channel\", [\n [\"none\", \"s\"], # Invalid marker\n [\"detect\", None] # Invalid channel\n ])\ndef test_report_size_marker_bad_inputs(marker, channel, test_data):\n \"\"\"Test for PlantCV.\"\"\"\n # Read in test data\n img = cv2.imread(test_data.small_rgb_img)\n # ROI contour\n roi_contour = [np.array([[[25, 75]], [[25, 125]], [[75, 125]], [[75, 75]]], dtype=np.int32)]\n roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)\n with pytest.raises(RuntimeError):\n _ = report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker=marker,\n objcolor='light', thresh_channel=channel, thresh=120)\n" ]
[ [ "numpy.nanmax", "numpy.nanmin", "numpy.shape" ], [ "numpy.zeros" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmagine/rf-selection
[ "ba9dcb5ca550916873ce68baa71da983f2dd4be5" ]
[ "sim/pid.py" ]
[ "'''*-----------------------------------------------------------------------*---\n Author: Jason Ma\n Date : Oct 18 2018\n TODO\n\n File Name : pid.py\n Description: TODO\n---*-----------------------------------------------------------------------*'''\n\nimport time\nimport matplotlib.animation as anim\nimport matplotlib.pyplot as plt\nimport threading\nimport math\nimport numpy as np\n\n'''[Global Vars]------------------------------------------------------------'''\nORIGIN_X = 0.0\nORIGIN_Y = 0.0\nC_R = 10\n\n#plt.autoscale(enable=True, axis=\"both\")\n\nfig = plt.figure()\nax = fig.add_subplot(2,1,1)\nax2 = fig.add_subplot(2,1,2)\nscat = ax.scatter([], [])\nax.set_xlim([-1 * C_R - 1, C_R + 1])\nax.set_ylim([-1 * C_R - 1, C_R + 1])\nscat.set_facecolors(['g', 'r'])\nscat.set_sizes([31, 31])\nprev_time = time.time()\nvel = np.array([0.0, 0.0])\n\nerrors = [0, 1]\nerror_plot, = ax2.plot([i for i in range(len(errors))], errors, color=\"g\")\n\nclass drone(): \n def __init__(self, p, vel):\n self.pos = np.array(p)\n self.v = np.array(vel)\n self.prev_error = np.zeros((2))\n self.integral = np.zeros((2))\n self.dt = 0.01\n self.kp = 0.8 * 2.0\n self.ki = 0\n self.kd = 0\n \n #self.ki = 2.0 * self.kp / 2.0\n #self.kd = self.kp * 2.0 / 8.0\n \n #self.ki = 2 * self.kp / 1.0 \n #self.kd = self.kp * 0.01 / 8\n \n def callback(self):\n pass\n\n def run(self, ref_pos, vx=None, vy=None):\n self.pos += self.v\n\n #print(self.integral)\n\n if vx:\n self.v[0] = vx\n \n if vy:\n self.v[1] = vy\n\n #compute PID output\n error = ref_pos - self.pos\n \n self.integral = self.integral * 0.99 + error * self.dt\n '''\n for i in range(2):\n if self.integral[i] > 1:\n self.integral[i] = 1\n elif self.integral[i] < -1:\n self.integral[i] = -1\n '''\n #print(self.integral)\n\n derivative = (error - self.prev_error) / self.dt\n \n for i in range(2):\n if derivative[i] > 0.1:\n derivative[i] = 0.1\n elif derivative[i] < -0.1:\n derivative[i] = -0.1\n self.prev_error = error\n pid_output = (self.kp * error) + (self.ki * self.integral) + (self.kd * derivative)\n print(self.pos, pid_output, self.kp * error, self.ki * self.integral, self.kd * derivative)\n #print(error[0])\n #errors.append(error[0])\n\n return pid_output\n\nd = drone([ORIGIN_X + C_R, ORIGIN_Y], [0.0, 0.0])\n\ndef dist(x1, y1, x2, y2):\n return ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))**(1/2)\n\ndef dist(p1, p2):\n assert len(p1) == len(p2)\n dims = len(p1)\n \n total = 0\n for i in range(dims):\n total += (p2[i] - p1[i]) * (p2[i] - p1[i])\n\n return (total)**(1/2)\n\n#def pid_angle(x, y, ref_x, ref_y, d):\n# return math.atan(-1 * (C_R - dist(x, y, ORIGIN_X, ORIGIN_Y)) / d) + math.atan((y - ORIGIN_Y) / (x - ORIGIN_X)) + math.pi / 2\n\ndef ref(t):\n return np.array([ORIGIN_X + C_R * math.cos(t), ORIGIN_Y + C_R * math.sin(t)])\n\ndef update(i):\n global prev_time, vel\n #update reference point position\n curr_time = time.time()\n ref_point = ref(i / 25.0)\n #ref_x = ref_point[0]\n #ref_y = ref_point[1]\n out = d.run(ref_point)\n \n for i in range(2):\n if out[i] > 10 or out[i] < -10:\n out = out * 10 / out[i]\n\n #print(d.pos, out)\n\n d.v = out\n\n while time.time() - prev_time < d.dt:\n time.sleep(d.dt / 10)\n \n prev_time = time.time()\n #print the desired angle of drone\n #pid_ang = pid_angle(d.x, d.y, ref_point[0], ref_point[1], 0.05)\n #print(math.cos(pid_ang), math.sin(pid_ang))\n #d.run(math.cos(pid_ang), math.sin(pid_ang))\n \n scat.set_offsets([[ref_point[0], ref_point[1]], [d.pos[0], d.pos[1]]])\n\n errors.append(dist(ref_point, d.pos))\n error_plot.set_xdata([i for i in range(len(errors))])\n error_plot.set_ydata(errors)\n ax2.set_xlim([-1, len(errors) + 1])\n ax2.set_ylim([1, min(errors)])\n\ndef main():\n d = drone(ORIGIN_X + C_R, ORIGIN_Y, 1)\n \n\nif __name__ == '__main__':\n #main()\n a = anim.FuncAnimation(fig, update, range(1000), interval=1, blit=False, repeat=False)\n plt.show()\n\n\n" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sfillwo/stog
[ "b965c47c17472eea11ab63aab9aa738af7875f06", "b02e2dbe8989078ccdc3df611d8b08b63d28fcae" ]
[ "stog/modules/initializers.py", "stog/data/fields/label_field.py" ]
[ "\"\"\"\nAdopted from AllenNLP:\n https://github.com/allenai/allennlp/blob/v0.6.1/allennlp/nn/initializers.py\n\nAn initializer is just a PyTorch function.\nHere we implement a proxy class that allows us\nto register them and supply any additional function arguments\n(for example, the ``mean`` and ``std`` of a normal initializer)\nas named arguments to the constructor.\nThe available initialization functions are\n* `\"normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_>`_\n* `\"uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_>`_\n* `\"constant\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_>`_\n* `\"eye\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_>`_\n* `\"dirac\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_>`_\n* `\"xavier_uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_>`_\n* `\"xavier_normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_>`_\n* `\"kaiming_uniform\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_>`_\n* `\"kaiming_normal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_>`_\n* `\"orthogonal\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_>`_\n* `\"sparse\" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_>`_\n* :func:`\"block_orthogonal\" <block_orthogonal>`\n* :func:`\"uniform_unit_scaling\" <uniform_unit_scaling>`\n\"\"\"\nimport re\nimport math\nfrom typing import Callable, List, Tuple, Type, Iterable\nimport itertools\n\nimport torch\nimport torch.nn.init\n\nfrom stog.utils import logging\nfrom stog.utils.checks import ConfigurationError\n\nlogger = logging.init_logger() # pylint: disable=invalid-name\n\n\ndef uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = \"linear\"):\n \"\"\"\n An initaliser which preserves output variance for approximately gaussian\n distributed inputs. This boils down to initialising layers using a uniform\n distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where\n ``dim[0]`` is equal to the input dimension of the parameter and the ``scale``\n is a constant scaling factor which depends on the non-linearity used.\n See `Random Walk Initialisation for Training Very Deep Feedforward Networks\n <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_\n for more information.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n The tensor to initialise.\n nonlinearity : ``str``, optional (default = \"linear\")\n The non-linearity which is performed after the projection that this\n tensor is involved in. This must be the name of a function contained\n in the ``torch.nn.functional`` package.\n Returns\n -------\n The initialised tensor.\n \"\"\"\n size = 1.\n # Estimate the input size. This won't work perfectly,\n # but it covers almost all use cases where this initialiser\n # would be expected to be useful, i.e in large linear and\n # convolutional layers, as the last dimension will almost\n # always be the output size.\n for dimension in list(tensor.size())[:-1]:\n size *= dimension\n\n activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)\n max_value = math.sqrt(3 / size) * activation_scaling\n\n return tensor.uniform_(-max_value, max_value)\n\n\ndef block_orthogonal(tensor: torch.Tensor,\n split_sizes: List[int],\n gain: float = 1.0) -> None:\n \"\"\"\n An initializer which allows initializing model parameters in \"blocks\". This is helpful\n in the case of recurrent models which use multiple gates applied to linear projections,\n which can be computed efficiently if they are concatenated together. However, they are\n separate parameters which should be initialized independently.\n Parameters\n ----------\n tensor : ``torch.Tensor``, required.\n A tensor to initialize.\n split_sizes : List[int], required.\n A list of length ``tensor.ndim()`` specifying the size of the\n blocks along that particular dimension. E.g. ``[10, 20]`` would\n result in the tensor being split into chunks of size 10 along the\n first dimension and 20 along the second.\n gain : float, optional (default = 1.0)\n The gain (scaling) applied to the orthogonal initialization.\n \"\"\"\n data = tensor.data\n sizes = list(tensor.size())\n if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):\n raise ConfigurationError(\"tensor dimensions must be divisible by their respective \"\n \"split_sizes. Found size: {} and split_sizes: {}\".format(sizes, split_sizes))\n indexes = [list(range(0, max_size, split))\n for max_size, split in zip(sizes, split_sizes)]\n # Iterate over all possible blocks within the tensor.\n for block_start_indices in itertools.product(*indexes):\n # A list of tuples containing the index to start at for this block\n # and the appropriate step size (i.e split_size[i] for dimension i).\n index_and_step_tuples = zip(block_start_indices, split_sizes)\n # This is a tuple of slices corresponding to:\n # tensor[index: index + step_size, ...]. This is\n # required because we could have an arbitrary number\n # of dimensions. The actual slices we need are the\n # start_index: start_index + step for each dimension in the tensor.\n block_slice = tuple([slice(start_index, start_index + step)\n for start_index, step in index_and_step_tuples])\n data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)\n\n\ndef zero(tensor: torch.Tensor) -> None:\n return tensor.data.zero_()\n\ndef lstm_hidden_bias(tensor: torch.Tensor) -> None:\n \"\"\"\n Initialize the biases of the forget gate to 1, and all other gates to 0,\n following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures\n \"\"\"\n # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)\n tensor.data.zero_()\n hidden_size = tensor.shape[0] // 4\n tensor.data[hidden_size:(2 * hidden_size)] = 1.0\n", "from typing import Dict, Union, Set\nimport logging\n\nfrom overrides import overrides\nimport torch\n\nfrom stog.data.fields.field import Field\nfrom stog.data.vocabulary import Vocabulary\nfrom stog.utils.checks import ConfigurationError\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass LabelField(Field[torch.Tensor]):\n \"\"\"\n A ``LabelField`` is a categorical label of some kind, where the labels are either strings of\n text or 0-indexed integers (if you wish to skip indexing by passing skip_indexing=True).\n If the labels need indexing, we will use a :class:`Vocabulary` to convert the string labels\n into integers.\n\n This field will get converted into an integer index representing the class label.\n\n Parameters\n ----------\n label : ``Union[str, int]``\n label_namespace : ``str``, optional (default=\"labels\")\n The namespace to use for converting label strings into integers. We map label strings to\n integers for you (e.g., \"entailment\" and \"contradiction\" get converted to 0, 1, ...),\n and this namespace tells the ``Vocabulary`` object which mapping from strings to integers\n to use (so \"entailment\" as a label doesn't get the same integer id as \"entailment\" as a\n word). If you have multiple different label fields in your data, you should make sure you\n use different namespaces for each one, always using the suffix \"labels\" (e.g.,\n \"passage_labels\" and \"question_labels\").\n skip_indexing : ``bool``, optional (default=False)\n If your labels are 0-indexed integers, you can pass in this flag, and we'll skip the indexing\n step. If this is ``False`` and your labels are not strings, this throws a ``ConfigurationError``.\n \"\"\"\n # Most often, you probably don't want to have OOV/PAD tokens with a LabelField, so we warn you\n # about it when you pick a namespace that will getting these tokens by default. It is\n # possible, however, that you _do_ actually want OOV/PAD tokens with this Field. This class\n # variable is used to make sure that we only log a single warning for this per namespace, and\n # not every time you create one of these Field objects.\n _already_warned_namespaces: Set[str] = set()\n\n def __init__(self,\n label: Union[str, int],\n label_namespace: str = 'labels',\n skip_indexing: bool = False) -> None:\n self.label = label\n self._label_namespace = label_namespace\n self._label_id = None\n self._maybe_warn_for_namespace(label_namespace)\n\n if skip_indexing:\n if not isinstance(label, int):\n raise ConfigurationError(\"In order to skip indexing, your labels must be integers. \"\n \"Found label = {}\".format(label))\n else:\n self._label_id = label\n else:\n if not isinstance(label, str):\n raise ConfigurationError(\"LabelFields must be passed a string label if skip_indexing=False. \"\n \"Found label: {} with type: {}.\".format(label, type(label)))\n\n def _maybe_warn_for_namespace(self, label_namespace: str) -> None:\n if not (self._label_namespace.endswith(\"labels\") or self._label_namespace.endswith(\"tags\")):\n if label_namespace not in self._already_warned_namespaces:\n logger.warning(\"Your label namespace was '%s'. We recommend you use a namespace \"\n \"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by \"\n \"default to your vocabulary. See documentation for \"\n \"`non_padded_namespaces` parameter in Vocabulary.\",\n self._label_namespace)\n self._already_warned_namespaces.add(label_namespace)\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n if self._label_id is None:\n counter[self._label_namespace][self.label] += 1 # type: ignore\n\n @overrides\n def index(self, vocab: Vocabulary):\n if self._label_id is None:\n self._label_id = vocab.get_token_index(self.label, self._label_namespace) # type: ignore\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]: # pylint: disable=no-self-use\n return {}\n\n @overrides\n def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:\n # pylint: disable=unused-argument,not-callable\n tensor = torch.tensor(self._label_id, dtype=torch.long)\n return tensor\n\n @overrides\n def empty_field(self):\n return LabelField(-1, self._label_namespace, skip_indexing=True)\n\n def __str__(self) -> str:\n return f\"LabelField with label: {self.label} in namespace: '{self._label_namespace}'.'\"\n" ]
[ [ "torch.nn.init.calculate_gain" ], [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mesnardo/petibm-decoupledibpm
[ "675615a882cc8418b15a34e1100ccfb421f1d9d1", "675615a882cc8418b15a34e1100ccfb421f1d9d1", "675615a882cc8418b15a34e1100ccfb421f1d9d1" ]
[ "runs/cylinder2dRe40/scripts/plot_pressure_coefficients.py", "runs/oscillatingcylinderRe100/algo3/scripts/plot_vorticity.py", "runs/oscillatingsphere/scripts/plot_pressure.py" ]
[ "\"\"\"Plot the surface pressure coefficient at final time step.\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\nimport rodney\n\n\ndef get_pressure(simudir, timestep):\n name = 'p' # name of the field variable to load\n datadir = simudir / 'output'\n # Load the gridlines from file.\n filepath = datadir / 'grid.h5'\n x, y = petibmpy.read_grid_hdf5(filepath, name)\n # Load the field from file.\n filepath = datadir / f'{timestep:0>7}.h5'\n p = petibmpy.read_field_hdf5(filepath, name)\n return (x, y), p\n\n\ndef compute_surface_pressure_coefficient(p, x, y):\n # Define circle outside support region of delta function.\n N = 500\n dx = 1.5 / 90 # grid-spacing size in the uniform region\n R = 0.5 + 3 * dx # radius 3 cells away from real boundary\n theta = numpy.linspace(0.0, 2 * numpy.pi, num=N + 1)[:-1]\n xc, yc = 0.0, 0.0\n xb_ext, yb_ext = xc + R * numpy.cos(theta), yc + R * numpy.sin(theta)\n\n # Interpolate the field on extended boundary.\n pb = numpy.empty_like(xb_ext)\n for i, (xbi, ybi) in enumerate(zip(xb_ext, yb_ext)):\n pi = petibmpy.linear_interpolation(p, y, ybi)\n pb[i] = petibmpy.linear_interpolation(pi, x, xbi)\n\n # Compute the pressure coefficient.\n rho = 1.0 # fluid density\n U_inf = 1.0 # freestream speed\n p_inf = 0.0 # far-away pressure\n cp = (pb - p_inf) / (0.5 * rho * U_inf**2)\n return theta, cp\n\n\ndef split_lower_upper(theta, cp):\n mask = numpy.where((theta >= numpy.pi) & (theta < 2 * numpy.pi))[0]\n theta_lower = theta[mask] % numpy.pi\n cp_lower = cp[mask]\n mask = numpy.where((theta >= 0.0) & (theta < numpy.pi))[0]\n theta_upper = numpy.flip(numpy.pi - theta[mask])\n cp_upper = numpy.flip(cp[mask])\n return (dict(theta=theta_lower, cp=cp_lower),\n dict(theta=theta_upper, cp=cp_upper))\n\n\nargs = rodney.parse_command_line()\nmaindir = pathlib.Path(__file__).absolute().parents[1]\ntimestep = 5000 # final time-step index\n\nlabel1 = r'500 markers ($\\Delta s \\approx 0.38 \\Delta x$)'\nsimudir1 = maindir / '500_markers'\ngrid, p = get_pressure(simudir1, timestep)\ntheta, cp = compute_surface_pressure_coefficient(p, *grid)\nlower1, upper1 = split_lower_upper(theta, cp)\n\nlabel2 = r'189 markers ($\\Delta s \\approx \\Delta x$)'\nsimudir2 = maindir / '189_markers'\ngrid, p = get_pressure(simudir2, timestep)\ntheta, cp = compute_surface_pressure_coefficient(p, *grid)\nlower2, upper2 = split_lower_upper(theta, cp)\n\n# Plot the distribution of the surface pressure coefficient.\npyplot.rc('font', family='serif', size=14)\nfig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(10.0, 4.0))\nax1.set_title(label1, fontsize=14)\nax1.set_xlabel(r'$\\theta$')\nax1.set_ylabel('$C_p$')\nax1.plot(numpy.degrees(lower1['theta']), lower1['cp'],\n label='lower surface')\nax1.plot(numpy.degrees(upper1['theta']), upper1['cp'],\n label='upper surface', linestyle='--')\n\nax2.set_title(label2, fontsize=14)\nax2.set_xlabel(r'$\\theta$')\nax2.set_ylabel('$C_p$')\nax2.plot(numpy.degrees(lower2['theta']), lower2['cp'],\n label='lower surface')\nax2.plot(numpy.degrees(upper2['theta']), upper2['cp'],\n label='upper surface', linestyle='--')\n\nif args.extra_data:\n # Load digitized values from Li et al. (2016).\n theta_li, cp_li = rodney.lietal2016_load_cp(40)\n ax1.scatter(theta_li, cp_li, label='Li et al. (2016)',\n c='black', marker='s', s=10)\n ax2.scatter(theta_li, cp_li, label='Li et al. (2016)',\n c='black', marker='s', s=10)\n\nfor ax in (ax1, ax2):\n ax.set_xlim(0.0, 180.0)\n ax.set_ylim(-1.5, 1.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\nax2.legend(frameon=False)\nfig.tight_layout()\n\nif args.save_figures:\n # Save the figure.\n figdir = maindir / 'figures'\n figdir.mkdir(parents=True, exist_ok=True)\n filepath = figdir / f'cp_{timestep:0>7}.png'\n fig.savefig(filepath, dpi=300, bbox_inches='tight')\n\nif args.show_figures:\n pyplot.show()\n", "\"\"\"Plot the vorticity at phase 0 and 288 degrees.\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\n\nshow_figure = True # if True, display the figure(s)\nsimudir = pathlib.Path(__file__).absolute().parents[1]\ndatadir = simudir / 'solution'\n\n# Load the grid from file.\nname = 'wz' # name of the vorticity variable\nfilepath = simudir / 'grid.h5'\nx, y = petibmpy.read_grid_hdf5(filepath, name)\n\n# Load the vorticity at phase angles 0 deg during the last period.\ntimestep = 7500\nfilepath = datadir / '{:0>7}.h5'.format(timestep)\nwz1 = petibmpy.read_field_hdf5(filepath, name)\n# Load the body coordinates at the same time.\nfilepath = datadir / 'circle_{:0>7}.2D'.format(timestep)\nbody1 = petibmpy.read_body(filepath)\n\n# Load the vorticity at phase angles 288 deg during the last period.\ntimestep = 9500\nfilepath = datadir / '{:0>7}.h5'.format(timestep)\nwz2 = petibmpy.read_field_hdf5(filepath, name)\n# Load the body coordinates at the same time.\nfilepath = datadir / 'circle_{:0>7}.2D'.format(timestep)\nbody2 = petibmpy.read_body(filepath)\n\n# Plot the contours of the vorticity at the two time steps.\npyplot.rc('font', family='serif', size=16)\nfig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(8.0, 4.0))\nlevels = numpy.linspace(-20.0, 20.0, num=30)\nax1.set_xlabel('x')\nax1.set_ylabel('y')\nax1.contour(x, y, wz1, levels=levels, colors='black', linewidths=0.75)\nax1.plot(*body1, color='red')\nax1.axis('scaled', adjustable='box')\nax1.set_xlim(-1.0, 1.5)\nax1.set_ylim(-1.2, 1.2)\nax2.set_xlabel('x')\nax2.set_ylabel('y')\nax2.contour(x, y, wz2, levels=levels, colors='black', linewidths=0.75)\nax2.plot(*body2, color='red')\nax2.axis('scaled', adjustable='box')\nax2.set_xlim(-1.0, 1.5)\nax2.set_ylim(-1.2, 1.2)\nfig.tight_layout()\n\n# Save the figure.\nfigdir = simudir / 'figures'\nfigdir.mkdir(parents=True, exist_ok=True)\nfilepath = figdir / 'vorticity.png'\nfig.savefig(filepath, dpi=300)\n\nif show_figure:\n pyplot.show()\n", "\"\"\"Plot the contours of the pressure at the middle symmetric plane.\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\n\nimport petibmpy\n\nfrom kinematics import D, Am, Um, rho, f, dt\n\n\nsimudir = pathlib.Path(__file__).absolute().parents[1]\nname = 'p' # name of the pressure variable in HDF5 files\n\n# Load the grid from file.\nfilepath = simudir / 'grid.h5'\nx, y, z = petibmpy.read_grid_hdf5(filepath, name)\n\ntimesteps = [2000, 2200, 2400]\nlabels = ['$0^o$', '$144^o$', '$288^o$']\n\n# Initialize the figure and axes.\npyplot.rc('font', family='serif', size=14)\nfig, ax = pyplot.subplots(ncols=3, figsize=(10.0, 4.0))\n# Define the level of the contours to plot.\nlevels = numpy.linspace(-2 / rho / Um**2, 2 / rho / Um**2, num=30)\n\nfor i, (label, timestep) in enumerate(zip(labels, timesteps)):\n # Load the pressure field from file.\n filepath = simudir / 'solution' / '{:0>7}.h5'.format(timestep)\n p = petibmpy.read_field_hdf5(filepath, name)\n p -= p.mean() # set the mean value to 0.\n # Interpolate the field along the z-direction at z=0.\n p = petibmpy.linear_interpolation(p, z, 0.0)\n # Load the boundary coordinates from file.\n filepath = simudir / 'solution' / 'sphere_{:0>7}.3D'.format(timestep)\n xb, yb, zb = petibmpy.read_body(filepath)\n # Generate the circle at present time step.\n t = timestep * dt\n theta = numpy.linspace(0.0, 2 * numpy.pi, num=51)\n xc, yc = D / 2 * numpy.cos(theta), D / 2 * numpy.sin(theta)\n xc += Am * numpy.sin(2 * numpy.pi * f * t)\n # Plot the contours of the pressure and the boundary.\n ax[i].set_title(label)\n ax[i].set_xlabel('$x$')\n ax[i].set_ylabel('$y$')\n ax[i].contour(x, y, p, levels=levels, colors='black', zorder=1)\n ax[i].scatter(xb, yb, c='C0', s=0.1, zorder=3)\n ax[i].fill(xc, yc, color='grey', zorder=2)\n ax[i].axis('scaled', adjustable='box')\n ax[i].set_xlim(-2.0, 2.0)\n ax[i].set_ylim(-2.0, 2.0)\n\nfig.tight_layout()\n\n# Save the figure.\nfigdir = simudir / 'figures'\nfigdir.mkdir(parents=True, exist_ok=True)\nfilepath = figdir / 'pressure.png'\nfig.savefig(filepath, dpi=300, bbox_inches='tight')\n\npyplot.show()\n" ]
[ [ "numpy.linspace", "numpy.empty_like", "numpy.degrees", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "matplotlib.pyplot.show", "numpy.flip", "numpy.where" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.linspace" ], [ "numpy.linspace", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kristianeschenburg/parcellearning
[ "93811f7d11c1c5583d8f541c7629dbbaa1785304" ]
[ "parcellearning/fn/ops.py" ]
[ "import torch\nimport dgl\n\n\ndef cosine(nodes):\n \"\"\"\n Compute the cosine distance between all pairs of nodes adjacent on a source node.\n \"\"\"\n\n # ```m``` is a matrix of N nodes x E edges x F features\n # representing the messages incident on source nodes with E edges\n m = nodes.mailbox['m']\n\n N = m.shape[1]\n N = (N*(N-1))/2\n\n if m.ndim > 3:\n m = m.transpose(1,2)\n e = torch.matmul(m, m.transpose(2, 3))\n else:\n e = torch.matmul(m, m.transpose(1,2))\n\n e = torch.triu(e, diagonal=1).sum(-1).sum(-1)\n e = e/N\n\n return {'cos': e}\n" ]
[ [ "torch.triu" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jin530/pytorch-widedeep
[ "4ff1008ba6c62e64383267c924cfd3cf3cbc609c", "4ff1008ba6c62e64383267c924cfd3cf3cbc609c", "4ff1008ba6c62e64383267c924cfd3cf3cbc609c" ]
[ "tests/test_data_utils/test_du_deep_image.py", "tests/test_warm_up/test_warm_up_routines.py", "pytorch_widedeep/models/_warmup.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\n\nfrom pytorch_widedeep.preprocessing import ImagePreprocessor\n\n\nfull_path = os.path.realpath(__file__)\npath = os.path.split(full_path)[0]\ndf = pd.DataFrame({\"galaxies\": [\"galaxy1.png\", \"galaxy2.png\"]})\nimg_col = \"galaxies\"\nimd_dir = os.path.join(path, \"images\")\nprocessor = ImagePreprocessor(img_col=img_col, img_path=imd_dir)\nX_imgs = processor.fit_transform(df)\n\n\n###############################################################################\n# There is not much to test here, since I only resize.\n###############################################################################\ndef test_sizes():\n img_width = X_imgs.shape[1]\n img_height = X_imgs.shape[2]\n assert np.all((img_width == processor.width, img_height == processor.height))\n", "import pytest\nimport numpy as np\nimport string\nimport torch\nimport torch.nn.functional as F\n\nfrom torch import nn\nfrom sklearn.utils import Bunch\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom pytorch_widedeep.models import Wide, DeepDense\nfrom pytorch_widedeep.models.deep_image import conv_layer\nfrom pytorch_widedeep.metrics import BinaryAccuracy\nfrom pytorch_widedeep.models._warmup import WarmUp\n\nuse_cuda = torch.cuda.is_available()\n\n\n# Define a series of simple models to quickly test the WarmUp class\nclass TestDeepText(nn.Module):\n def __init__(self):\n super(TestDeepText, self).__init__()\n self.word_embed = nn.Embedding(5, 16, padding_idx=0)\n self.rnn = nn.LSTM(16, 8, batch_first=True)\n self.linear = nn.Linear(8, 1)\n\n def forward(self, X):\n embed = self.word_embed(X.long())\n o, (h, c) = self.rnn(embed)\n return self.linear(h).view(-1, 1)\n\n\nclass TestDeepImage(nn.Module):\n def __init__(self):\n super(TestDeepImage, self).__init__()\n\n self.conv_block = nn.Sequential(\n conv_layer(3, 64, 3),\n conv_layer(64, 128, 1, maxpool=False, adaptiveavgpool=True),\n )\n self.linear = nn.Linear(128, 1)\n\n def forward(self, X):\n x = self.conv_block(X)\n x = x.view(x.size(0), -1)\n return self.linear(x)\n\n\n#  Define a simple WideDeep Dataset\nclass WDset(Dataset):\n def __init__(self, X_wide, X_deep, X_text, X_img, target):\n\n self.X_wide = X_wide\n self.X_deep = X_deep\n self.X_text = X_text\n self.X_img = X_img\n self.Y = target\n\n def __getitem__(self, idx: int):\n\n X = Bunch(wide=self.X_wide[idx])\n X.deepdense = self.X_deep[idx]\n X.deeptext = self.X_text[idx]\n X.deepimage = self.X_img[idx]\n y = self.Y[idx]\n return X, y\n\n def __len__(self):\n return len(self.X_deep)\n\n\n# Remember that the WarmUp class will be instantiated inside the WideDeep and\n# will take, among others, the activation_fn and the loss_fn of that class as\n# parameters. Therefore, we define equivalent classes to replicate the\n# scenario\ndef activ_fn(inp):\n return torch.sigmoid(inp)\n\n\ndef loss_fn(y_pred, y_true):\n return F.binary_cross_entropy(y_pred, y_true.view(-1, 1))\n\n\n#  Define the data components:\n\n# target\ntarget = torch.empty(100, 1).random_(0, 2)\n\n# wide\nX_wide = torch.empty(100, 10).random_(0, 2)\n\n# deep\ncolnames = list(string.ascii_lowercase)[:10]\nembed_cols = [np.random.choice(np.arange(5), 100) for _ in range(5)]\ncont_cols = [np.random.rand(100) for _ in range(5)]\nembed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]\ndeep_column_idx = {k: v for v, k in enumerate(colnames[:10])}\ncontinuous_cols = colnames[-5:]\nX_deep = torch.from_numpy(np.vstack(embed_cols + cont_cols).transpose())\n\n# text\nX_text = torch.cat((torch.zeros([100, 1]), torch.empty(100, 4).random_(1, 4)), axis=1)\n\n# image\nX_image = torch.rand(100, 3, 28, 28)\n\n# Define the model components\n\n# wide\nwide = Wide(10, 1)\nif use_cuda:\n wide.cuda()\n\n# deep\ndeepdense = DeepDense(\n hidden_layers=[16, 8],\n dropout=[0.5, 0.2],\n deep_column_idx=deep_column_idx,\n embed_input=embed_input,\n continuous_cols=continuous_cols,\n)\ndeepdense = nn.Sequential(deepdense, nn.Linear(8, 1))\nif use_cuda:\n deepdense.cuda()\n\n# text\ndeeptext = TestDeepText()\nif use_cuda:\n deeptext.cuda()\n\n# image\ndeepimage = TestDeepImage()\nif use_cuda:\n deepimage.cuda()\n\n# Define the loader\nwdset = WDset(X_wide, X_deep, X_text, X_image, target)\nwdloader = DataLoader(wdset, batch_size=10, shuffle=True)\n\n# Instantiate the WarmUp class\nwarmer = WarmUp(activ_fn, loss_fn, BinaryAccuracy(), \"binary\", False)\n\n# List the layers for the warm_gradual method\ntext_layers = [c for c in list(deeptext.children())[1:]][::-1]\nimage_layers = [c for c in list(deepimage.children())][::-1]\n\n\n###############################################################################\n#  Simply test that warm_all runs\n###############################################################################\[email protected](\n \"model, modelname, loader, n_epochs, max_lr\",\n [\n (wide, \"wide\", wdloader, 1, 0.01),\n (deepdense, \"deepdense\", wdloader, 1, 0.01),\n (deeptext, \"deeptext\", wdloader, 1, 0.01),\n (deepimage, \"deepimage\", wdloader, 1, 0.01),\n ],\n)\ndef test_warm_all(model, modelname, loader, n_epochs, max_lr):\n has_run = True\n try:\n warmer.warm_all(model, modelname, loader, n_epochs, max_lr)\n except:\n has_run = False\n assert has_run\n\n\n###############################################################################\n#  Simply test that warm_gradual runs\n###############################################################################\[email protected](\n \"model, modelname, loader, max_lr, layers, routine\",\n [\n (deeptext, \"deeptext\", wdloader, 0.01, text_layers, \"felbo\"),\n (deeptext, \"deeptext\", wdloader, 0.01, text_layers, \"howard\"),\n (deepimage, \"deepimage\", wdloader, 0.01, image_layers, \"felbo\"),\n (deepimage, \"deepimage\", wdloader, 0.01, image_layers, \"howard\"),\n ],\n)\ndef test_warm_gradual(model, modelname, loader, max_lr, layers, routine):\n has_run = True\n try:\n warmer.warm_gradual(model, modelname, loader, max_lr, layers, routine)\n except:\n has_run = False\n assert has_run\n", "import numpy as np\nimport torch\n\nfrom ..metrics import Metric, MultipleMetrics\nfrom ..wdtypes import *\n\nfrom tqdm import trange\nfrom torch import nn\n\nuse_cuda = torch.cuda.is_available()\n\n\nclass WarmUp(object):\n r\"\"\"\n 'Warm up' methods to be applied to the individual models before the joined\n training. There are 3 warm up routines available:\n 1) Warm up all trainable layers at once\n 2) Gradual warm up inspired by the work of Felbo et al., 2017\n 3) Gradual warm up inspired by the work of Howard & Ruder 2018\n\n The structure of the code in this class is designed to be instantiated within\n the class WideDeep. This is not ideal, but represents a compromise towards\n implementing a 'warm up' functionality for the current overall structure of\n the package without having to re-structure most of the existing code.\n\n Parameters\n ----------\n activation_fn: Any\n any function with the same strucure as '_activation_fn' in the main class\n WideDeep at pytorch_widedeep.models.wide_deep\n loss_fn: Any\n any function with the same strucure as '_loss_fn' in the main class WideDeep\n at pytorch_widedeep.models.wide_deep\n metric: Metric\n object of class Metric (see Metric in pytorch_widedeep.metrics)\n method: str\n one of 'binary', 'regression' or 'multiclass'\n verbose: Boolean\n \"\"\"\n\n def __init__(\n self,\n activation_fn: Any,\n loss_fn: Any,\n metric: Union[Metric, MultipleMetrics],\n method: str,\n verbose: int,\n ):\n super(WarmUp, self).__init__()\n self.activation_fn = activation_fn\n self.loss_fn = loss_fn\n self.metric = metric\n self.method = method\n self.verbose = verbose\n\n def warm_all(\n self,\n model: nn.Module,\n model_name: str,\n loader: DataLoader,\n n_epochs: int,\n max_lr: float,\n ):\n r\"\"\"\n Warm up all trainable layers in a model using a one cyclic learning rate\n with a triangular pattern. This is refereed as Slanted Triangular learing\n rate in Jeremy Howard & Sebastian Ruder 2018\n (https://arxiv.org/abs/1801.06146). The cycle is described as follows:\n 1-The learning rate will gradually increase for 10% of the training steps\n from max_lr/10 to max_lr.\n 2-It will then gradually decrease to max_lr/10 for the remaining 90% of the\n steps.\n The optimizer used in the process is AdamW\n\n Parameters:\n ----------\n model: nn.Module\n nn.Module object containing one the WideDeep model components (wide,\n deepdense, deeptext or deepimage)\n model_name: Str\n string indicating the model name to access the corresponding parameters.\n One of 'wide', 'deepdense', 'deeptext' or 'deepimage'\n loader: DataLoader\n Pytorch DataLoader containing the data used to warm up\n n_epochs: Int\n number of epochs used to warm up the model\n max_lr: Float\n maximum learning rate value during the triangular cycle.\n \"\"\"\n if self.verbose:\n print(\"Warming up {} for {} epochs\".format(model_name, n_epochs))\n model.train()\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=max_lr / 10.0) # type: ignore\n step_size_up, step_size_down = self._steps_up_down(len(loader), n_epochs)\n scheduler = torch.optim.lr_scheduler.CyclicLR(\n optimizer,\n base_lr=max_lr / 10.0,\n max_lr=max_lr,\n step_size_up=step_size_up,\n step_size_down=step_size_down,\n cycle_momentum=False,\n )\n\n self._warm(model, model_name, loader, optimizer, scheduler, n_epochs=n_epochs)\n\n def warm_gradual(\n self,\n model: nn.Module,\n model_name: str,\n loader: DataLoader,\n last_layer_max_lr: float,\n layers: List[nn.Module],\n routine: str,\n ):\n r\"\"\"\n Warm up certain layers within the model following a gradual warm up routine.\n The approaches implemented in this method are inspired by the work of Felbo\n et al., 2017 in their DeepEmoji paper (https://arxiv.org/abs/1708.00524) and\n Howard & Sebastian Ruder 2018 ULMFit paper\n (https://arxiv.org/abs/1801.06146).\n\n A one cycle triangular learning rate is used. In both Felbo's and Howard's\n routines a gradually decreasing learning rate is used as we go deeper into\n the network. The 'closest' layer to the output neuron(s) will use a maximum\n learning rate of 'last_layer_max_lr'. The learning rate will then decrease by a factor\n of 2.5 per layer\n\n 1) The 'Felbo' routine:\n warm up the first layer in 'layers' for one epoch. Then warm up the next\n layer in 'layers' for one epoch freezing the already warmed up layer(s).\n Repeat untill all individual layers are warmed. Then warm one last epoch\n with all warmed layers trainable\n 2) The 'Howard' routine:\n warm up the first layer in 'layers' for one epoch. Then warm the next layer\n in the model for one epoch while keeping the already warmed up layer(s)\n trainable. Repeat.\n\n Parameters:\n ----------\n model: nn.Module\n nn.Module object containing one the WideDeep model components (wide,\n deepdense, deeptext or deepimage)\n model_name: Str\n string indicating the model name to access the corresponding parameters.\n One of 'wide', 'deepdense', 'deeptext' or 'deepimage'\n loader: DataLoader\n Pytorch DataLoader containing the data to warm up with.\n last_layer_max_lr: Float\n maximum learning rate value during the triangular cycle for the layer\n closest to the output neuron(s). Deeper layers in 'model' will be trained\n with a gradually descending learning rate. The descending factor is fixed\n and is 2.5\n layers: List\n List of nn.Module objects containing the layers that will be warmed up.\n This must be in 'WARM-UP ORDER'.\n routine: str\n one of 'howard' or 'felbo'\n \"\"\"\n model.train()\n step_size_up, step_size_down = self._steps_up_down(len(loader))\n original_setup = {}\n for n, p in model.named_parameters():\n original_setup[n] = p.requires_grad\n layers_max_lr = [last_layer_max_lr] + [\n last_layer_max_lr / (2.5 * n) for n in range(1, len(layers))\n ]\n\n for layer in layers:\n for p in layer.parameters():\n p.requires_grad = False\n\n if routine == \"howard\":\n params: List = []\n max_lr: List = []\n base_lr: List = []\n\n for i, (lr, layer) in enumerate(zip(layers_max_lr, layers)):\n if self.verbose:\n print(\n \"Warming up {}, layer {} of {}\".format(\n model_name, i + 1, len(layers)\n )\n )\n for p in layer.parameters():\n p.requires_grad = True\n if routine == \"felbo\":\n params, max_lr, base_lr = layer.parameters(), lr, lr / 10.0 # type: ignore\n elif routine == \"howard\":\n params += [{\"params\": layer.parameters(), \"lr\": lr / 10.0}]\n max_lr += [lr]\n base_lr += [lr / 10.0]\n optimizer = torch.optim.AdamW(params) # type: ignore\n scheduler = torch.optim.lr_scheduler.CyclicLR(\n optimizer,\n base_lr=base_lr, # type: ignore\n max_lr=max_lr, # type: ignore\n step_size_up=step_size_up,\n step_size_down=step_size_down, # type: ignore\n cycle_momentum=False,\n )\n self._warm(model, model_name, loader, optimizer, scheduler)\n if routine == \"felbo\":\n for p in layer.parameters():\n p.requires_grad = False\n\n if routine == \"felbo\":\n if self.verbose:\n print(\"Warming up one last epoch with all warmed up layers trainable\")\n for layer in layers:\n for p in layer.parameters():\n p.requires_grad = True\n params, max_lr, base_lr = [], [], []\n for lr, layer in zip(layers_max_lr, layers):\n params += [{\"params\": layer.parameters(), \"lr\": lr / 10.0}]\n max_lr += [lr]\n base_lr += [lr / 10.0]\n optimizer = torch.optim.AdamW(params) # type: ignore\n scheduler = torch.optim.lr_scheduler.CyclicLR(\n optimizer,\n base_lr=base_lr, # type: ignore\n max_lr=max_lr, # type: ignore\n step_size_up=step_size_up,\n step_size_down=step_size_down, # type: ignore\n cycle_momentum=False,\n )\n self._warm(model, model_name, loader, optimizer, scheduler)\n\n for n, p in model.named_parameters():\n p.requires_grad = original_setup[n]\n\n def _warm(\n self,\n model: nn.Module,\n model_name: str,\n loader: DataLoader,\n optimizer: Optimizer,\n scheduler: LRScheduler,\n n_epochs: int = 1,\n ):\n r\"\"\"\n Standard Pytorch training loop\n \"\"\"\n steps = len(loader)\n for epoch in range(n_epochs):\n running_loss = 0.0\n with trange(steps, disable=self.verbose != 1) as t:\n for batch_idx, (data, target) in zip(t, loader):\n t.set_description(\"epoch %i\" % (epoch + 1))\n X = data[model_name].cuda() if use_cuda else data[model_name]\n y = target.float() if self.method != \"multiclass\" else target\n y = y.cuda() if use_cuda else y\n\n optimizer.zero_grad()\n y_pred = self.activation_fn(model(X))\n loss = self.loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n scheduler.step() # type: ignore\n\n running_loss += loss.item()\n avg_loss = running_loss / (batch_idx + 1)\n\n if self.metric is not None:\n acc = self.metric(y_pred, y)\n t.set_postfix(metrics=acc, loss=avg_loss)\n else:\n t.set_postfix(loss=np.sqrt(avg_loss))\n\n def _steps_up_down(self, steps: int, n_epochs: int = 1) -> Tuple[int, int]:\n r\"\"\"\n Calculate the number of steps up and down during the one cycle warm up for a\n given number of epochs\n\n Parameters:\n ----------\n steps: Int\n steps per epoch\n n_epochs: Int. Default=1\n number of warm up epochs\n\n Returns:\n -------\n up, down: Tuple, Int\n number of steps increasing/decreasing the learning rate during the cycle\n \"\"\"\n up = round((steps * n_epochs) * 0.1)\n down = (steps * n_epochs) - up\n return up, down\n" ]
[ [ "numpy.all", "pandas.DataFrame" ], [ "torch.sigmoid", "sklearn.utils.Bunch", "torch.empty", "torch.nn.LSTM", "torch.zeros", "numpy.arange", "torch.utils.data.DataLoader", "torch.nn.Embedding", "torch.nn.Linear", "numpy.random.rand", "torch.rand", "torch.cuda.is_available", "numpy.vstack" ], [ "torch.optim.lr_scheduler.CyclicLR", "torch.optim.AdamW", "numpy.sqrt", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ethansimpson285/StarlingPy
[ "0afba0444b695c431227f4e28d6d3edde5b56af7" ]
[ "src/starlingpy/StarlingClass.py" ]
[ "from requests import get\nimport datetime\n\nimport pandas as pd \n\nfrom starlingpy.StarlingAPIs import Account_APIs\n\nBASE_PATH = \"https://api.starlingbank.com/api/v2/\"\n\n\nclass TransactionHistory:\n\n \"\"\"\n A history of transactions associated with the Starling account, between stipulated datetimes.\n Requires the StarlingAccount object to be passed\n \"\"\"\n\n\n def __init__(self,Account,**kwargs):\n\n self.associated_Account = Account \n output = Account.get_transactions(**kwargs)\n self.start_date , self.end_date = output[0] , output[1]\n self.transaction_List = output[2]\n self.full_Dataframe = self.generate_transaction_dataframe(**kwargs)\n self.summary_Dataframe = self.summary_transaction_dataframe()\n\n\n def generate_transaction_dataframe(self,**kwargs):\n\n \"\"\" Generates full transaction dataframe between dates \"\"\"\n\n df = pd.DataFrame(self.transaction_List)\n running_balance = self.generate_running_balance_list(self.transaction_List)\n df['Balance Before'] = running_balance[1:]\n df['Balance After'] = running_balance[:-1]\n df[\"transactionTime\"]= pd.to_datetime(df[\"transactionTime\"])\n\n return df\n\n \n def summary_transaction_dataframe(self):\n\n \"\"\" Generates an abridged summary dataframe for using with plotting macros \"\"\"\n\n df = self.full_Dataframe\n\n Amounts_df=df[\"amount\"].apply(pd.Series)\n dfN = pd.concat([Amounts_df,df[\"transactionTime\"],df[\"spendingCategory\"]],ignore_index=False,axis=1)\n pd.to_numeric(dfN['minorUnits'],downcast='float')\n dfN.loc[dfN['spendingCategory'].isin(['INCOME']),'minorUnits'] = -dfN[\"minorUnits\"]\n\n return dfN\n\n\n def generate_running_balance_list(self,transaction_list,**kwargs):\n\n \"\"\" Computes running balance based on totalEffectiveBalance field \"\"\"\n\n balance = self.associated_Account.get_balance()[\"totalEffectiveBalance\"][\"minorUnits\"]\n running_balance = [balance]\n for trans in transaction_list:\n amount = trans[\"amount\"][\"minorUnits\"] \n if trans[\"spendingCategory\"]=='INCOME': amount = -amount\n balance += amount\n running_balance.append(balance)\n \n return running_balance\n\n\n def discrete_time_summary(self,time_block):\n\n # Get the range of times\n rng = pd.date_range(self.start_date, self.end_date, freq=time_block)\n\n # Split the summary_Dataframe by the time blcok\n g=self.summary_Dataframe.groupby(pd.Grouper(key='transactionTime', freq=time_block))\n dfs = [group for _,group in g]\n\n summary_dict = {}\n\n for i,T_df in enumerate(dfs):\n\n if T_df.empty:\n continue\n\n m = T_df['spendingCategory'] != 'INCOME'\n out_df, in_df = T_df[m], T_df[~m]\n\n total_expend = out_df['minorUnits'].sum()/1e2\n total_income = in_df['minorUnits'].sum()/1e2\n if total_income < 0: total_income = -total_income\n\n summary_dict[rng[i]] = {'outgoings': out_df , 'incomings': in_df, 'expenditure': total_expend,'income':total_income} \n \n return pd.DataFrame(summary_dict).T\n\n\n \n\n\n\nclass StarlingAccount:\n\n \"\"\"\n Class which aligns to Starling bank account, with relevant attributes for that bank account, \n Class methods for extracting information from the \n \"\"\"\n\n def fetch(self,url):\n r = get(url,headers=self.headers)\n r.raise_for_status()\n return r.json()\n\n def access_account_details(self):\n url = BASE_PATH + \"accounts\"\n return self.fetch(url)\n\n def __init__(self,PAT,**kwargs):\n self.PAT = PAT\n self.headers = {\"Authorization\": \"Bearer \" + PAT}\n self.requests_object = self.access_account_details()\n self.account_details = self.requests_object['accounts'][0]\n self.accountUid = self.account_details['accountUid']\n self.defaultCategory = self.account_details['defaultCategory']\n\n def get_balance(self):\n url = BASE_PATH + Account_APIs[\"Account Balance\"].format(self.accountUid)\n return self.fetch(url)\n\n def show_balance(self):\n tEF = self.get_balance()[\"totalEffectiveBalance\"]\n print(str( tEF[\"minorUnits\"]/1e2) + \" \" + tEF[\"currency\"])\n\n\n def get_recurring_payments(self):\n url = BASE_PATH + Account_APIs[\"Recurring Payments\"].format(self.accountUid)\n return self.fetch(url) \n\n\n def get_feed(self):\n url = BASE_PATH + Account_APIs[\"Feed\"].format(self.accountUid,self.defaultCategory)\n return self.fetch(url) \n\n\n def get_payees(self):\n url = BASE_PATH + Account_APIs[\"Payees\"]\n return self.fetch(url) \n\n\n def get_transactions(self,**kwargs):\n start_date = kwargs[\"start_date\"] if \"start_date\" in kwargs else (datetime.datetime.now()-datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\") + \"T00:00:00Z\"\n end_date = kwargs[\"end_date\"] if \"end_date\" in kwargs else datetime.datetime.now().strftime(\"%Y-%m-%d\") + \"T00:00:00Z\"\n url = BASE_PATH + Account_APIs[\"Transactions Between\"].format(self.accountUid,self.defaultCategory,start_date,end_date)\n return start_date,end_date,self.fetch(url)['feedItems']\n\n\n\n\n\n\n\n \n\n\n\n\n\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.Grouper", "pandas.DataFrame", "pandas.date_range", "pandas.to_numeric" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
RichardOkubo/DS-Scripts
[ "adf3845802b52e8901d381ffff60f9c1276dabe1" ]
[ "machine-learning-algorithms/reducao-rbm.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neural_network import BernoulliRBM\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\n\nbase = datasets.load_digits()\nprevisores = np.asarray(base.data, 'float32')\nclasse = base.target\n\nnormalizador = MinMaxScaler(feature_range=(0,1))\nprevisores = normalizador.fit_transform(previsores)\n\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size = 0.2, random_state=0)\n\nrbm = BernoulliRBM(random_state = 0)\nrbm.n_iter = 25\nrbm.n_components = 50\nnaive_rbm = GaussianNB()\nclassificador_rbm = Pipeline(steps = [('rbm', rbm), ('naive', naive_rbm)])\nclassificador_rbm.fit(previsores_treinamento, classe_treinamento)\n\nplt.figure(figsize=(20,20))\nfor i, comp in enumerate(rbm.components_):\n plt.subplot(10, 10, i + 1)\n plt.imshow(comp.reshape((8,8)), cmap=plt.cm.gray_r)\n plt.xticks(())\n plt.yticks(())\nplt.show()\n\nprevisoes_rbm = classificador_rbm.predict(previsores_teste)\nprecisao_rbm = metrics.accuracy_score(previsoes_rbm, classe_teste)\n\nnaive_simples = GaussianNB()\nnaive_simples.fit(previsores_treinamento, classe_treinamento)\nprevisoes_naive = naive_simples.predict(previsores_teste)\nprecisao_naive = metrics.accuracy_score(previsoes_naive, classe_teste)" ]
[ [ "matplotlib.pyplot.yticks", "sklearn.naive_bayes.GaussianNB", "numpy.asarray", "sklearn.metrics.accuracy_score", "sklearn.model_selection.train_test_split", "sklearn.pipeline.Pipeline", "sklearn.neural_network.BernoulliRBM", "matplotlib.pyplot.subplot", "sklearn.datasets.load_digits", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mmezic/netket
[ "629e885212d981d7748d155310abca4a1f9d5481", "629e885212d981d7748d155310abca4a1f9d5481" ]
[ "test/jax/test_vjp_batched.py", "netket/sampler/base.py" ]
[ "import pytest\n\nimport jax\nimport netket as nk\nimport numpy as np\nfrom functools import partial\n\n\[email protected](\"jit\", [False, True])\[email protected](\"batch_size\", [None, 16, 10000, 1000000])\[email protected](\"return_forward\", [False, True])\[email protected](\"batch_argnums\", [1, (1,)])\[email protected](\"nondiff_argnums\", [1, (1,)])\ndef test_vjp_batched(batch_size, jit, return_forward, batch_argnums, nondiff_argnums):\n @partial(jax.vmap, in_axes=(None, 0))\n def f(p, x):\n return jax.lax.log(p.dot(jax.lax.sin(x)))\n\n k = jax.random.split(jax.random.PRNGKey(123), 4)\n p = jax.random.uniform(k[0], shape=(8,))\n X = jax.random.uniform(k[2], shape=(10000, 8))\n w = jax.random.uniform(k[3], shape=(10000,))\n\n vjp_fun_batched = nk.jax.vjp_batched(\n f,\n p,\n X,\n batch_argnums=batch_argnums,\n batch_size=batch_size,\n nondiff_argnums=nondiff_argnums,\n return_forward=return_forward,\n )\n y_expected, vjp_fun = jax.vjp(f, p, X)\n\n if jit:\n vjp_fun_batched = jax.jit(vjp_fun_batched)\n vjp_fun = jax.jit(vjp_fun)\n\n res_expected = vjp_fun(w)[:1]\n\n if return_forward:\n y, res = vjp_fun_batched(w)\n np.testing.assert_allclose(y, y_expected)\n else:\n res = vjp_fun_batched(w)\n\n np.testing.assert_allclose(res, res_expected)\n", "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom typing import Optional, Union, Tuple, Callable, Iterator\nfrom functools import partial\n\nimport jax\nimport numpy as np\n\nfrom flax import linen as nn\nfrom jax import numpy as jnp\n\nfrom netket import jax as nkjax\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils import mpi, get_afun_if_module, wrap_afun\nfrom netket.utils.types import PyTree, DType, SeedT\nfrom netket.jax import HashablePartial\nfrom netket.utils import struct, numbers\n\nfancy = []\n\n\[email protected]\nclass SamplerState:\n \"\"\"\n Base class holding the state of a sampler.\n \"\"\"\n\n pass\n\n\ndef autodoc(clz):\n pass\n\n\[email protected]\nclass Sampler(abc.ABC):\n \"\"\"\n Abstract base class for all samplers.\n\n It contains the fields that all of them should possess, defining the common\n API.\n Note that fields marked with pytree_node=False are treated as static arguments\n when jitting.\n \"\"\"\n\n hilbert: AbstractHilbert = struct.field(pytree_node=False)\n \"\"\"Hilbert space to be sampled.\"\"\"\n\n n_chains_per_rank: int = struct.field(pytree_node=False, default=None)\n \"\"\"Number of independent chains on every MPI rank.\"\"\"\n\n machine_pow: int = struct.field(default=2)\n \"\"\"Exponent of the pdf sampled.\"\"\"\n\n dtype: DType = struct.field(pytree_node=False, default=np.float64)\n \"\"\"DType of the computed samples.\"\"\"\n\n def __pre_init__(\n self, hilbert: AbstractHilbert, n_chains: Optional[int] = None, **kwargs\n ):\n \"\"\"\n Construct a Monte Carlo sampler.\n\n Args:\n hilbert: Hilbert space to be sampled\n n_chains: The total number of independent chains across all MPI ranks\n n_chains_per_rank: Number of independent chains on every MPI rank\n machine_pow: Exponent of the pdf sampled\n dtype: DType of the computed samples\n \"\"\"\n\n # Default number of total chains\n if \"n_chains_per_rank\" in kwargs:\n if n_chains is not None:\n raise ValueError(\n \"Cannot specify both `n_chains` and `n_chains_per_rank`\"\n )\n else:\n\n # DEFAULT VALUE\n if n_chains is None:\n n_chains = 16\n\n n_chains_per_rank = max(int(np.ceil(n_chains / mpi.n_nodes)), 1)\n if mpi.n_nodes > 1 and mpi.rank == 0:\n if n_chains_per_rank * mpi.n_nodes != n_chains:\n import warnings\n\n warnings.warn(\n f\"Using {n_chains_per_rank} chains per rank among {mpi.n_nodes} ranks (total=\"\n f\"{n_chains_per_rank*mpi.n_nodes} instead of n_chains={n_chains}).\"\n f\"To directly control the number of chains on every rank, specify \"\n f\"`n_chains_per_rank` when constructing the sampler. \"\n f\"To silence this warning, either use `n_chains_per_rank` or use `n_chains` \"\n f\"that is a multiple of the number of mpi ranks.\",\n category=UserWarning,\n )\n\n kwargs[\"n_chains_per_rank\"] = n_chains_per_rank\n\n return (hilbert,), kwargs\n\n def __post_init__(self):\n # Raise errors if hilbert is not an Hilbert\n if not isinstance(self.hilbert, AbstractHilbert):\n raise ValueError(\n \"hilbert must be a subtype of netket.hilbert.AbstractHilbert, \"\n + \"instead, type {} is not.\".format(type(self.hilbert))\n )\n\n # workaround Jax bug under pmap\n # might be removed in the future\n if not type(self.machine_pow) == object:\n if not np.issubdtype(numbers.dtype(self.machine_pow), np.integer):\n raise ValueError(\n f\"machine_pow ({self.machine_pow}) must be a positive integer\"\n )\n\n @property\n def n_chains(self) -> int:\n \"\"\"\n The total number of chains across all MPI ranks.\n\n If you are not using MPI, this is equal to `n_chains_per_rank`\n \"\"\"\n return self.n_chains_per_rank * mpi.n_nodes\n\n @property\n def n_batches(self) -> int:\n r\"\"\"\n The batch size of the configuration $\\sigma$ used by this sampler.\n\n In general, it is equivalent to :attr:`~Sampler.n_chains`.\n \"\"\"\n return self.n_chains_per_rank\n\n @property\n def is_exact(self) -> bool:\n \"\"\"\n Returns `True` if the sampler is exact.\n\n The sampler is exact if all the samples are exactly distributed according to the\n chosen power of the variational state, and there is no correlation among them.\n \"\"\"\n return False\n\n def log_pdf(self, model: Union[Callable, nn.Module]) -> Callable:\n \"\"\"\n Returns a closure with the log_pdf function encoded by this sampler.\n\n Note: the result is returned as an HashablePartial so that the closure\n does not trigger recompilation.\n\n Args:\n model: The machine, or apply_fun\n\n Returns:\n the log probability density function\n \"\"\"\n apply_fun = get_afun_if_module(model)\n log_pdf = HashablePartial(\n lambda apply_fun, pars, σ: self.machine_pow * apply_fun(pars, σ).real,\n apply_fun,\n )\n return log_pdf\n\n def init_state(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n seed: Optional[SeedT] = None,\n ) -> SamplerState:\n \"\"\"\n Creates the structure holding the state of the sampler.\n\n If you want reproducible samples, you should specify `seed`, otherwise the state\n will be initialised randomly.\n\n If running across several MPI processes, all sampler_states are guaranteed to be\n in a different (but deterministic) state.\n This is achieved by first reducing (summing) the seed provided to every MPI rank,\n then generating n_rank seeds starting from the reduced one, and every rank is\n initialized with one of those seeds.\n\n The resulting state is guaranteed to be a frozen python dataclass (in particular,\n a flax's dataclass), and it can be serialized using Flax serialization methods.\n\n Args:\n machine: a Flax module or callable with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n seed: An optional seed or jax PRNGKey. If not specified, a random seed will be used.\n\n Returns:\n The structure holding the state of the sampler. In general you should not expect\n it to be in a valid state, and should reset it before use.\n \"\"\"\n key = nkjax.PRNGKey(seed)\n key = nkjax.mpi_split(key)\n\n return sampler._init_state(wrap_afun(machine), parameters, key)\n\n def reset(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: Optional[SamplerState] = None,\n ) -> SamplerState:\n \"\"\"\n Resets the state of the sampler. To be used every time the parameters are changed.\n\n Args:\n machine: a Flax module or callable with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n state: The current state of the sampler. If it's not provided, it will be constructed\n by calling :code:`sampler.init_state(machine, parameters)` with a random seed.\n\n Returns:\n A valid sampler state.\n \"\"\"\n if state is None:\n state = sampler_state(sampler, machine, parameters)\n\n return sampler._reset(wrap_afun(machine), parameters, state)\n\n def sample_next(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: Optional[SamplerState] = None,\n ) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples the next state in the markov chain.\n\n Args:\n machine: a Flax module or callable apply function with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n state: The current state of the sampler. If it's not provided, it will be constructed\n by calling :code:`sampler.reset(machine, parameters)` with a random seed.\n\n Returns:\n state: The new state of the sampler\n σ: The next batch of samples.\n \"\"\"\n # Note: the return order is inverted wrt `.sample` because when called inside of\n # a scan function the first returned argument should be the state.\n\n if state is None:\n state = sampler_state(sampler, machine, parameters)\n\n return sampler._sample_next(wrap_afun(machine), parameters, state)\n\n def sample(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n *,\n state: Optional[SamplerState] = None,\n chain_length: int = 1,\n ) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples chain_length elements along the chains.\n\n Arguments:\n sampler: The Monte Carlo sampler.\n machine: The model or callable to sample from (if it's a function it should have\n the signature :code:`f(parameters, σ) -> jnp.ndarray`).\n parameters: The PyTree of parameters of the model.\n state: current state of the sampler. If None, then initialises it.\n chain_length: (default=1), the length of the chains.\n\n Returns:\n σ: The next batch of samples.\n state: The new state of the sampler\n \"\"\"\n\n return sample(\n sampler, machine, parameters, state=state, chain_length=chain_length\n )\n\n def _sample_chain(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: SamplerState,\n chain_length: int,\n ) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples chain_length elements along the chains.\n\n In general this should not be overridden unless you want to modify the logic by which\n the whole sampling is performed.\n If using Jax, this function should be jitted\n\n Arguments:\n sampler: The Monte Carlo sampler.\n machine: The model or callable to sample from (if it's a function it should have\n the signature :code:`f(parameters, σ) -> jnp.ndarray`).\n parameters: The PyTree of parameters of the model.\n state: current state of the sampler. If None, then initialises it.\n chain_length: (default=1), the length of the chains.\n\n Returns:\n σ: The next batch of samples.\n state: The new state of the sampler\n \"\"\"\n return _sample_chain(sampler, machine, parameters, state, chain_length)\n\n @abc.abstractmethod\n def _init_state(sampler, machine, params, seed) -> SamplerState:\n \"\"\"\n Implementation of init_state for subclasses of Sampler.\n\n If you sub-class Sampler, you should define this and not init_state\n itself, because init_state contains some common logic.\n \"\"\"\n raise NotImplementedError(\"init_state Not Implemented\")\n\n @abc.abstractmethod\n def _reset(sampler, machine, parameters, state):\n \"\"\"\n Implementation of reset for subclasses of Sampler.\n\n If you sub-class Sampler, you should define _reset and not reset\n itself, because reset contains some common logic.\n \"\"\"\n raise NotImplementedError(\"reset Not Implemented\")\n\n @abc.abstractmethod\n def _sample_next(sampler, machine, parameters, state=None):\n \"\"\"\n Implementation of sample_next for subclasses of Sampler.\n\n If you sub-class Sampler, you should define _sample_next and not sample_next\n itself, because reset contains some common logic.\n \"\"\"\n raise NotImplementedError(\"sample_next Not Implemented\")\n\n\ndef sampler_state(\n sampler: Sampler, machine: Union[Callable, nn.Module], parameters: PyTree\n) -> SamplerState:\n \"\"\"\n Creates the structure holding the state of the sampler.\n\n If you want reproducible samples, you should specify `seed`, otherwise the state\n will be initialised randomly.\n\n If running across several MPI processes, all sampler_states are guaranteed to be\n in a different (but deterministic) state.\n\n This is achieved by first reducing (summing) the seed provided to every MPI rank,\n then generating n_rank seeds starting from the reduced one, and every rank is\n initialized with one of those seeds.\n\n Args:\n sampler: The Monte Carlo sampler.\n machine: a Flax module or callable with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n seed: An optional seed or jax PRNGKey. If not specified, a random seed will be used.\n\n Returns:\n The structure holding the state of the sampler. In general you should not expect\n it to be in a valid state, and should reset it before use.\n \"\"\"\n return sampler.init_state(machine, parameters)\n\n\ndef reset(\n sampler: Sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: Optional[SamplerState] = None,\n) -> SamplerState:\n \"\"\"\n Resets the state of the sampler. To be used every time the parameters are changed.\n\n Args:\n sampler: The Monte Carlo sampler.\n machine: a Flax module or Callable with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n state: The current state of the sampler. If it's not provided, it will be constructed\n by calling :code:`sampler.init_state(machine, parameters)` with a random seed.\n\n Returns:\n A valid sampler state.\n \"\"\"\n sampler.reset(machine, parameters, state)\n\n\ndef sample_next(\n sampler: Sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: Optional[SamplerState] = None,\n) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples the next state in the markov chain.\n\n Args:\n sampler: The Monte Carlo sampler.\n machine: a Flax module or callable with the forward pass of the log-pdf.\n parameters: The PyTree of parameters of the model.\n state: The current state of the sampler. If it's not provided, it will be constructed\n by calling :code:`sampler.reset(machine, parameters)` with a random seed.\n\n Returns:\n state: The new state of the sampler\n σ: The next batch of samples.\n \"\"\"\n return sampler.sample_next(machine, parameters, state)\n\n\ndef sample(\n sampler: Sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n *,\n state: Optional[SamplerState] = None,\n chain_length: int = 1,\n) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples chain_length elements along the chains.\n\n Arguments:\n sampler: The Monte Carlo sampler.\n machine: The model or Callable to sample from (if it's a function it should have\n the signature :code:`f(parameters, σ) -> jnp.ndarray`).\n parameters: The PyTree of parameters of the model.\n state: current state of the sampler. If None, then initialises it.\n chain_length: (default=1), the length of the chains.\n\n Returns:\n σ: The next batch of samples.\n state: The new state of the sampler\n \"\"\"\n if state is None:\n state = sampler.reset(machine, parameters, state)\n\n return sampler._sample_chain(machine, parameters, state, chain_length)\n\n\n@partial(jax.jit, static_argnums=(1, 4))\ndef _sample_chain(\n sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n state: SamplerState,\n chain_length: int,\n) -> Tuple[jnp.ndarray, SamplerState]:\n \"\"\"\n Samples chain_length elements along the chains.\n\n Internal method used for jitting calls\n\n Arguments:\n sampler: The Monte Carlo sampler.\n machine: The model or Callable to sample from (if it's a function it should have\n the signature :code:`f(parameters, σ) -> jnp.ndarray`).\n parameters: The PyTree of parameters of the model.\n state: current state of the sampler. If None, then initialises it.\n chain_length: (default=1), the length of the chains.\n\n Returns:\n σ: The next batch of samples.\n state: The new state of the sampler\n \"\"\"\n _sample_next = lambda state, _: sampler.sample_next(machine, parameters, state)\n\n state, samples = jax.lax.scan(\n _sample_next,\n state,\n xs=None,\n length=chain_length,\n )\n\n return samples, state\n\n\ndef samples(\n sampler: Sampler,\n machine: Union[Callable, nn.Module],\n parameters: PyTree,\n *,\n state: Optional[SamplerState] = None,\n chain_length: int = 1,\n) -> Iterator[np.ndarray]:\n \"\"\"\n Returns a generator sampling chain_length elements along the chains.\n\n Arguments:\n sampler: The Monte Carlo sampler.\n machine: The model or Callable to sample from (if it's a function it should have\n the signature :code:`f(parameters, σ) -> jnp.ndarray`).\n parameters: The PyTree of parameters of the model.\n state: current state of the sampler. If None, then initialises it.\n chain_length: (default=1), the length of the chains.\n \"\"\"\n if state is None:\n state = sampler.reset(machine, parameters, state)\n\n for i in range(chain_length):\n samples, state = sampler._sample_chain(machine, parameters, state, 1)\n yield samples[0, :, :]\n" ]
[ [ "numpy.testing.assert_allclose" ], [ "numpy.ceil" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FSEC-Photovoltaics/pvrpm-lcoe
[ "dbe0bb30ffa1041ec004f84c57aac44f47bdf6d2" ]
[ "pvrpm/core/simulation.py" ]
[ "import os\nimport time\nimport warnings\nimport multiprocessing as mp\nfrom typing import List\n\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nfrom pvrpm.core.enums import ConfigKeys as ck\nfrom pvrpm.core.case import SamCase\nfrom pvrpm.core.components import Components\nfrom pvrpm.core.utils import summarize_dc_energy, component_degradation\nfrom pvrpm.core.logger import logger\n\n\ndef cf_interval(alpha: float, std: float, num_samples: int) -> float:\n \"\"\"\n Calculates the two tails margin of error given the desired input. The margin of error is the value added and subtracted by the sample mean to obtain the confidence interval\n\n Sample sizes less then equal to 30 use t score, greater then 30 use z score\n\n Args:\n alpha (float): The significance level for the interval\n std (float): The standard deviation of the data\n num_samples (int): The number of samples in the data\n\n Returns:\n float: The margin of error\n \"\"\"\n # two tails\n alpha = alpha / 2\n\n if num_samples > 30:\n score = stats.norm.ppf(alpha)\n else:\n score = stats.t.ppf(1 - alpha, num_samples - 1)\n\n return score * std / np.sqrt(num_samples)\n\n\ndef simulate_day(case: SamCase, comp: Components, day: int):\n \"\"\"\n Updates and increments the simulation by a day, performing all neccesary component updates.\n\n Args:\n case (:obj:`SamCase`): The current Sam Case of the simulation\n comp (:obj:`Components`): The components class containing all the outputs for this simulation\n day (int): Current day in the simulation\n \"\"\"\n # static monitoring starts the day, if available. This is updated independently of component levels\n comp.update_indep_monitor(day)\n\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n df = comp.comps[c]\n # if component can't fail, just continue\n if case.config[c][ck.CAN_FAIL]:\n # decrement time to failures for operational modules\n # fail components when their time has come\n comp.update_fails(c, day)\n\n # update monitoring\n comp.update_monitor(c, day)\n\n if case.config[c][ck.CAN_REPAIR]:\n # repair components when they are done and can be repaired\n comp.update_repairs(c, day)\n\n if case.config[c].get(ck.WARRANTY, None):\n df[\"time_left_on_warranty\"] -= 1\n\n # availability\n if c == ck.GRID:\n # for the grid only, the availability is based on the full 24-hour day.\n df.loc[df[\"state\"] == 0, \"avail_downtime\"] += 24\n else:\n # else, use the sun hours for this day\n df.loc[df[\"state\"] == 0, \"avail_downtime\"] += case.daylight_hours[day % 365]\n\n # module can still degrade even if it cant fail\n if case.config[c].get(ck.DEGRADE, None):\n df[\"days_of_degradation\"] += 1\n df[\"degradation_factor\"] = [\n component_degradation(case.config[c][ck.DEGRADE] / 365, d) for d in df[\"days_of_degradation\"]\n ]\n\n\ndef run_system_realization(\n case: SamCase, seed: bool = False, realization_num: int = 0, progress_bar: bool = False, debug: int = 0,\n) -> Components:\n \"\"\"\n Run a full realization for calculating costs\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n seed (bool, Optional): Whether to seed the random number generator, for multiprocessing\n realization_num (int, Optional): Current realization number, used for multiprocessing\n progress_bar (bool, Optional): Whether to display progress bar during the realization\n debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)\n\n Returns:\n :obj:`Components`: The components object which contains all the data for this realization\n \"\"\"\n if seed:\n np.random.seed()\n\n # data storage\n comp = Components(case)\n lifetime = case.config[ck.LIFETIME_YRS]\n\n if case.config[ck.TRACKING]:\n comp.tracker_power_loss_factor[0] = 1\n comp.tracker_availability[0] = 1\n\n # initial timestep\n comp.module_degradation_factor[0] = comp.current_degradation()\n comp.dc_power_availability[0] = comp.dc_availability()\n comp.ac_power_availability[0] = comp.ac_availability()\n\n if progress_bar:\n iterator = tqdm(\n range(1, lifetime * 365),\n ascii=True,\n desc=f\"Running realization {realization_num}\",\n unit=\"day\",\n position=mp.current_process()._identity[0],\n leave=False,\n )\n else:\n logger.info(f\"Running realization {realization_num}...\")\n iterator = range(1, lifetime * 365)\n\n for i in iterator:\n # calculate new labor rate each year\n if i == 1 or i % 365 == 0:\n year = np.floor(i / 365)\n inflation = np.power(1 + case.config[ck.INFLATION] / 100, year)\n comp.update_labor_rates(case.config[ck.LABOR_RATE] * inflation)\n # Decided to remove since it doesnt make sense for only trackers to rise with inflation and not\n # all other failures. Plus, this was broken.\n # need to store original cost of tracker failures for each failure and increase based on that cost\n # also need to take in concurrent failures\n # if case.config[ck.TRACKING]:\n # for fail in case.config[ck.TRACKER][ck.FAILURE].keys():\n # case.config[ck.TRACKER][ck.FAILURE][fail][ck.COST] *= inflation\n\n # save state if debugging\n if debug > 0 and i % debug == 0:\n state_dict = comp.snapshot()\n folder = f\"debug_day_{i}\"\n save_path = os.path.join(case.config[ck.RESULTS_FOLDER], folder)\n os.makedirs(save_path, exist_ok=True)\n for key, val in state_dict.items():\n val.to_csv(os.path.join(save_path, f\"{key}_state.csv\"), index=True)\n\n # timestep is applied each day\n simulate_day(case, comp, i)\n\n if case.config[ck.TRACKING]:\n comp.tracker_availability[i], comp.tracker_power_loss_factor[i] = comp.tracker_power_loss(i)\n\n comp.module_degradation_factor[i] = comp.current_degradation()\n comp.dc_power_availability[i] = comp.dc_availability()\n comp.ac_power_availability[i] = comp.ac_availability()\n\n # create same performance adjustment tables for avail, degradation, tracker losses\n if case.config[ck.TRACKING]:\n daily_dc_loss = 100 * (\n 1 - (comp.dc_power_availability * comp.module_degradation_factor * comp.tracker_power_loss_factor)\n )\n else:\n daily_dc_loss = 100 * (1 - (comp.dc_power_availability * comp.module_degradation_factor))\n\n daily_ac_loss = 100 * (1 - comp.ac_power_availability)\n\n case.value(\"en_dc_lifetime_losses\", 1)\n case.value(\"dc_lifetime_losses\", list(daily_dc_loss))\n\n case.value(\"en_ac_lifetime_losses\", 1)\n case.value(\"ac_lifetime_losses\", list(daily_ac_loss))\n\n o_m_yearly_costs = np.zeros(lifetime)\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n comp_yearly_cost = np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1)\n o_m_yearly_costs += comp_yearly_cost\n\n case.value(\"om_fixed\", list(o_m_yearly_costs))\n\n case.simulate()\n\n # add the results of the simulation to the components class and return\n comp.timeseries_dc_power = case.output(\"dc_net\")\n comp.timeseries_ac_power = case.value(\"gen\")\n comp.lcoe = case.output(\"lcoe_real\")\n comp.npv = case.get_npv()\n\n # remove the first element from cf_energy_net because it is always 0, representing year 0\n comp.annual_energy = np.array(case.output(\"cf_energy_net\")[1:])\n\n # more results, for graphing and what not\n try:\n comp.tax_cash_flow = case.output(\"cf_after_tax_cash_flow\")\n except AttributeError:\n comp.tax_cash_flow = case.output(\"cf_pretax_cashflow\")\n\n for loss in ck.losses:\n try:\n comp.losses[loss] = case.output(loss)\n except:\n comp.losses[loss] = 0\n\n return comp\n\n\ndef gen_results(case: SamCase, results: List[Components]) -> List[pd.DataFrame]:\n \"\"\"\n Generates results for the given SAM case and list of component objects containing the results of each realization.\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n results (:obj:`list(Components)`): List of component objects that contain the results for each realization\n\n Returns:\n :obj:`list(pd.DataFrame)`: List of dataframes containing the results.\n\n Note:\n The order of the returned dataframes is:\n - Summary Results\n - Degradation Results\n - DC Power\n - AC Power\n - Yearly Costs\n \"\"\"\n summary_index = [\"Base Case\"]\n summary_data = {\"lcoe\": [case.base_lcoe], \"npv\": [case.base_npv]}\n lifetime = case.config[ck.LIFETIME_YRS]\n p_vals = [99, 95, 90, 75, 50, 10]\n\n # ac energy\n cumulative_ac_energy = np.cumsum(case.base_annual_energy)\n\n for i in range(int(lifetime)):\n summary_data[f\"annual_ac_energy_{i+1}\"] = [case.base_annual_energy[i]]\n # split up so the order of columns is nicer\n for i in range(int(lifetime)):\n summary_data[f\"cumulative_ac_energy_{i+1}\"] = [cumulative_ac_energy[i]]\n\n # dc energy\n for i in range(len(case.base_dc_energy)):\n summary_data[f\"dc_energy_{i+1}\"] = [case.base_dc_energy[i]]\n\n # TODO: also, need to clean this up, i just use dictionaries and fill in blanks for base case, but this can be much cleaner\n # per realization results\n day_index = np.arange(lifetime * 365) + 1\n timeseries_index = np.arange(len(results[0].timeseries_dc_power))\n year_index = np.arange(lifetime) + 1\n yearly_cost_index = []\n degradation_data = {}\n timeseries_dc_data = {}\n timeseries_ac_data = {}\n yearly_cost_data = {}\n yearly_fail_data = {}\n for i, comp in enumerate(results):\n # daily degradation\n degradation_data[f\"Realization {i+1}\"] = comp.module_degradation_factor\n\n # power\n timeseries_dc_data[f\"Realization {i+1}\"] = comp.timeseries_dc_power\n timeseries_ac_data[f\"Realization {i+1}\"] = comp.timeseries_ac_power\n\n # yearly cost and total fails for each component\n yearly_cost_index.append(f\"Realization {i+1}\")\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n if c not in yearly_cost_data:\n yearly_cost_data[c] = []\n if c not in yearly_fail_data:\n yearly_fail_data[c] = []\n\n yearly_cost_data[c] += list(np.sum(np.reshape(comp.costs[c], (lifetime, 365)), axis=1))\n # add total fails per year for each failure mode for this component level\n total_fails = np.zeros(lifetime * 365)\n for f in comp.summarize_failures(c).values():\n total_fails += f\n yearly_fail_data[c] += list(np.sum(np.reshape(total_fails, (lifetime, 365)), axis=1))\n\n # summary\n summary_index.append(f\"Realization {i+1}\")\n summary_data[\"lcoe\"] += [comp.lcoe]\n summary_data[\"npv\"] += [comp.npv]\n\n # ac energy\n # remove the first element from cf_energy_net because it is always 0, representing year 0\n cumulative_ac_energy = np.cumsum(comp.annual_energy)\n\n for i in range(int(lifetime)):\n summary_data[f\"annual_ac_energy_{i+1}\"] += [comp.annual_energy[i]]\n summary_data[f\"cumulative_ac_energy_{i+1}\"] += [cumulative_ac_energy[i]]\n\n # dc energy\n dc_energy = summarize_dc_energy(comp.timeseries_dc_power, lifetime)\n for i in range(len(dc_energy)):\n summary_data[f\"dc_energy_{i+1}\"] += [dc_energy[i]]\n\n # calculate total failures, availability, mttr, mtbf, etc\n for c in ck.component_keys:\n if not case.config.get(c, None):\n continue\n\n if f\"{c}_total_failures\" not in summary_data:\n summary_data[f\"{c}_total_failures\"] = [None] # no failures for base case\n if f\"{c}_mtbf\" not in summary_data:\n summary_data[f\"{c}_mtbf\"] = [None]\n if f\"{c}_mttr\" not in summary_data:\n summary_data[f\"{c}_mttr\"] = [None]\n if f\"{c}_mttd\" not in summary_data:\n summary_data[f\"{c}_mttd\"] = [None]\n\n if case.config[c][ck.CAN_FAIL]:\n sum_fails = comp.comps[c][\"cumulative_failures\"].sum()\n summary_data[f\"{c}_total_failures\"] += [sum_fails]\n for fail in case.config[c].get(ck.FAILURE, {}).keys():\n if f\"{c}_failures_by_type_{fail}\" not in summary_data:\n summary_data[f\"{c}_failures_by_type_{fail}\"] = [None]\n summary_data[f\"{c}_failures_by_type_{fail}\"] += [comp.comps[c][f\"failure_by_type_{fail}\"].sum()]\n\n # partial failures\n for fail in case.config[c].get(ck.PARTIAL_FAIL, {}).keys():\n if f\"{c}_failures_by_type_{fail}\" not in summary_data:\n summary_data[f\"{c}_failures_by_type_{fail}\"] = [None]\n summary_data[f\"{c}_failures_by_type_{fail}\"] += [comp.comps[c][f\"failure_by_type_{fail}\"].sum()]\n\n # if the component had no failures, set everything here and continue\n if sum_fails == 0:\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365]\n summary_data[f\"{c}_mttr\"] += [0]\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n # mean time between failure\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365 * case.config[c][ck.NUM_COMPONENT] / sum_fails]\n\n # mean time to repair\n if case.config[c][ck.CAN_REPAIR]:\n # take the number of fails minus whatever components have not been repaired by the end of the simulation to get the number of repairs\n sum_repairs = sum_fails - len(comp.comps[c].loc[(comp.comps[c][\"state\"] == 0)])\n if sum_repairs > 0:\n summary_data[f\"{c}_mttr\"] += [comp.total_repair_time[c] / sum_repairs]\n else:\n summary_data[f\"{c}_mttr\"] += [0]\n else:\n summary_data[f\"{c}_mttr\"] += [0]\n\n # mean time to detection (mean time to acknowledge)\n if (\n case.config[c][ck.CAN_MONITOR]\n or case.config[c].get(ck.COMP_MONITOR, None)\n or case.config[c].get(ck.INDEP_MONITOR, None)\n ):\n # take the number of fails minus the components that have not been repaired and also not be detected by monitoring\n mask = (comp.comps[c][\"state\"] == 0) & (comp.comps[c][\"time_to_detection\"] > 1)\n sum_monitor = sum_fails - len(comp.comps[c].loc[mask])\n if sum_monitor > 0:\n summary_data[f\"{c}_mttd\"] += [comp.total_monitor_time[c] / sum_monitor]\n else:\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n summary_data[f\"{c}_mttd\"] += [0]\n else:\n # mean time between failure\n summary_data[f\"{c}_total_failures\"] += [0]\n summary_data[f\"{c}_mtbf\"] += [lifetime * 365]\n summary_data[f\"{c}_mttr\"] += [0]\n summary_data[f\"{c}_mttd\"] += [0]\n\n # availability\n if f\"{c}_availability\" not in summary_data:\n summary_data[f\"{c}_availability\"] = [None]\n summary_data[f\"{c}_availability\"] += [\n (\n 1\n - (comp.comps[c][\"avail_downtime\"].sum() / (lifetime * case.annual_daylight_hours))\n / case.config[c][ck.NUM_COMPONENT]\n )\n ]\n\n # generate dataframes\n summary_results = pd.DataFrame(index=summary_index, data=summary_data)\n summary_results.index.name = \"Realization\"\n # reorder columns for summary results\n reorder = list(summary_results.columns[0:2]) # lcoe and npv\n reorder += list(summary_results.columns[lifetime * 3 + 2 :]) # failures and avail\n reorder += list(summary_results.columns[2 : lifetime * 3 + 2]) # energy\n summary_results = summary_results[reorder]\n\n degradation_results = pd.DataFrame(index=day_index, data=degradation_data)\n dc_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_dc_data)\n ac_power_results = pd.DataFrame(index=timeseries_index, data=timeseries_ac_data)\n dc_power_results.index.name = \"Hour\"\n ac_power_results.index.name = \"Hour\"\n degradation_results.index.name = \"Day\"\n\n cost_index = pd.MultiIndex.from_product([yearly_cost_index, year_index], names=[\"Realization\", \"Year\"])\n yearly_cost_results = pd.DataFrame(index=cost_index, data=yearly_cost_data)\n yearly_cost_results[\"total\"] = yearly_cost_results.sum(axis=1)\n\n # fails per year, same multi index as cost\n yearly_fail_results = pd.DataFrame(index=cost_index, data=yearly_fail_data)\n yearly_fail_results[\"total\"] = yearly_fail_results.sum(axis=1)\n\n stats_append = []\n summary_no_base = summary_results.iloc[1:]\n min = summary_no_base.min()\n min.name = \"min\"\n stats_append.append(min)\n\n max = summary_no_base.max()\n max.name = \"max\"\n stats_append.append(max)\n\n mean = summary_no_base.mean()\n mean.name = \"mean\"\n stats_append.append(mean)\n\n median = summary_no_base.median()\n median.name = \"median\"\n stats_append.append(median)\n\n std = summary_no_base.std()\n std.name = \"stddev\"\n stats_append.append(std)\n\n conf_interval = case.config[ck.CONF_INTERVAL]\n conf_int = cf_interval(1 - (conf_interval / 100), std, case.config[ck.NUM_REALIZATION])\n\n lower_conf = mean - conf_int\n lower_conf.name = f\"{conf_interval}% lower confidence interval of mean\"\n stats_append.append(lower_conf)\n\n upper_conf = mean + conf_int\n upper_conf.name = f\"{conf_interval}% upper confidence interval of mean\"\n stats_append.append(upper_conf)\n\n # p test, which is using the ppf of the normal distribituion with our calculated mean and std. We use scipy's functions for this\n # see https://help.helioscope.com/article/141-creating-a-p50-and-p90-with-helioscope\n for p in p_vals:\n values = []\n # calculate the p value for every column\n for m, s in zip(mean, std):\n if s != 0: # for columns with no STDDEV\n values.append(stats.norm.ppf((1 - p / 100), loc=m, scale=s))\n else:\n values.append(None)\n # save results\n values = pd.Series(values, index=mean.index)\n values.name = f\"P{p}\"\n stats_append.append(values)\n\n # since pandas wants to depercate append, gotta convert series into dataframes\n summary_results = pd.concat([summary_results, *[s.to_frame().transpose() for s in stats_append]])\n\n return [\n summary_results,\n degradation_results,\n dc_power_results,\n ac_power_results,\n yearly_cost_results,\n yearly_fail_results,\n ]\n\n\ndef graph_results(case: SamCase, results: List[Components], save_path: str = None) -> None:\n \"\"\"\n Generate graphs from a list of Component objects from each realization\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n results (:obj:`list(Components)`): List of component objects that contain the results for each realization\n save_path (str, Optional): Path to save graphs to, if provided\n \"\"\"\n lifetime = case.config[ck.LIFETIME_YRS]\n colors = [\n \"r\",\n \"g\",\n \"b\",\n \"c\",\n \"m\",\n \"y\",\n \"k\",\n \"tab:orange\",\n \"tab:brown\",\n \"lime\",\n \"tab:gray\",\n \"indigo\",\n \"navy\",\n \"pink\",\n \"coral\",\n \"yellow\",\n \"teal\",\n \"fuchsia\",\n \"palegoldenrod\",\n \"darkgreen\",\n ]\n # base case data to compare to\n base_losses = case.base_losses\n base_load = np.array(case.base_load) if case.base_load is not None else None\n base_ac_energy = np.array(case.base_ac_energy)\n base_annual_energy = np.array(case.base_annual_energy)\n base_tax_cash_flow = np.array(case.base_tax_cash_flow)\n\n # parse data\n avg_ac_energy = np.zeros(len(case.base_ac_energy)) # since length is variable based on frequency of weather file\n avg_annual_energy = np.zeros(lifetime)\n avg_losses = np.zeros(len(ck.losses))\n avg_tax_cash_flow = np.zeros(lifetime + 1) # add 1 for year 0\n avg_failures = np.zeros((len(ck.component_keys), lifetime * 365)) # 7 types of components\n\n # computing the average across every realization\n for comp in results:\n avg_ac_energy += np.array(comp.timeseries_ac_power)\n avg_annual_energy += np.array(comp.annual_energy)\n avg_losses += np.array(list(comp.losses.values()))\n avg_tax_cash_flow += np.array(comp.tax_cash_flow)\n for i, c in enumerate(ck.component_keys):\n if not case.config.get(c, None):\n continue\n for f in comp.summarize_failures(c).values():\n avg_failures[i] += f\n\n # monthly and annual energy\n avg_ac_energy /= len(results)\n avg_annual_energy /= len(results)\n avg_losses /= len(results)\n avg_tax_cash_flow /= len(results)\n avg_failures /= len(results)\n\n # sum up failures to be per year\n avg_failures = np.sum(np.reshape(avg_failures, (len(ck.component_keys), lifetime, 365)), axis=2)\n # determine the frequency of the data, same as frequncy of supplied weather file\n total = int(len(avg_ac_energy) / lifetime)\n if total == 8760:\n freq = 1\n else:\n freq = 0\n while total > 8760:\n freq += 1\n total /= freq\n\n avg_ac_energy = np.reshape(avg_ac_energy[0::freq], (lifetime, 8760)) # yearly energy by hour\n avg_ac_energy = np.sum(avg_ac_energy, axis=0) / lifetime # yearly energy average\n avg_ac_energy = np.reshape(avg_ac_energy, (365, 24)) # day energy by hour\n avg_day_energy_by_hour = avg_ac_energy.copy() # copy for heatmap yearly energy generation\n avg_ac_energy = np.sum(avg_ac_energy, axis=1) # energy per day\n\n base_ac_energy = np.reshape(base_ac_energy[0::freq], (lifetime, 8760))\n base_ac_energy = np.sum(base_ac_energy, axis=0) / lifetime\n base_ac_energy = np.reshape(base_ac_energy, (365, 24))\n base_day_energy_by_hour = base_ac_energy.copy() # copy for heatmap yearly energy generation\n base_ac_energy = np.sum(base_ac_energy, axis=1)\n\n # daily load, load is the same between realizations and base\n if base_load is not None:\n base_load = np.reshape(base_load, (365, 24))\n base_load = np.sum(base_load, axis=1)\n\n avg_losses = {k: v for k, v in zip(ck.losses, avg_losses)} # create losses dictionary\n\n # calculate per month energy averaged across every year on every realization\n current_month = datetime(datetime.utcnow().year, 1, 1)\n # relative deltas allow dynamic month lengths such that each month has the proper number of days\n delta = relativedelta(months=1)\n start = 0\n monthly_energy = {}\n monthly_load = {}\n base_monthly_energy = {}\n for _ in range(12):\n month = current_month.strftime(\"%b\")\n num_days = ((current_month + delta) - current_month).days # number of days in this month\n\n monthly_energy[month] = np.sum(avg_ac_energy[start : start + num_days])\n base_monthly_energy[month] = np.sum(base_ac_energy[start : start + num_days])\n\n if base_load is not None:\n monthly_load[month] = np.sum(base_load[start : start + num_days])\n\n current_month += delta\n start += num_days\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n ax1.bar(list(monthly_energy.keys()), list(monthly_energy.values()))\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Month\")\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(list(monthly_energy.keys()), list(base_monthly_energy.values()))\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Month\")\n ax2.set_ylabel(\"kWh\")\n\n fig.suptitle(\"Monthly Energy Production\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Monthly Energy Production.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # graph the monthly energy against the monthly load\n if base_load is not None:\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n ind = np.arange(len(monthly_energy))\n ax1.bar(ind - 0.2, list(monthly_energy.values()), width=0.4, label=\"AC Energy\")\n ax1.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color=\"tab:gray\", label=\"Electricity Load\")\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Month\")\n ax1.set_xticks(ind)\n ax1.set_xticklabels(labels=list(monthly_energy.keys()))\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(ind - 0.2, list(base_monthly_energy.values()), width=0.4)\n ax2.bar(ind + 0.2, list(monthly_load.values()), width=0.4, color=\"tab:gray\")\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Month\")\n ax2.set_xticks(ind)\n ax2.set_xticklabels(labels=list(monthly_energy.keys()))\n ax2.set_ylabel(\"kWh\")\n\n fig.legend()\n fig.suptitle(\"Monthly Energy and Load\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Monthly Energy and Load.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n # add 1 to have years 1->25\n ax1.bar(np.arange(lifetime) + 1, avg_annual_energy)\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"kWh\")\n\n ax2.bar(np.arange(lifetime) + 1, base_annual_energy)\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"kWh\")\n\n fig.suptitle(\"Annual Energy Production\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Average Annual Energy Production.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # this helper function just makes it easier since the base case requires this as well\n def gen_loss_data(losses):\n # losses\n loss_data = {\n \"POA front-side shading loss\": losses[\"annual_poa_shading_loss_percent\"],\n \"POA front-side soiling loss\": losses[\"annual_poa_soiling_loss_percent\"],\n \"POA front-side reflection (IAM) loss\": losses[\"annual_poa_cover_loss_percent\"],\n \"DC module deviation from STC\": losses[\"annual_dc_module_loss_percent\"],\n \"DC inverter MPPT clipping loss\": losses[\"annual_dc_mppt_clip_loss_percent\"],\n \"DC mismatch loss\": losses[\"annual_dc_mismatch_loss_percent\"],\n \"DC diodes and connections loss\": losses[\"annual_dc_diodes_loss_percent\"],\n \"DC wiring loss\": losses[\"annual_dc_wiring_loss_percent\"],\n \"DC tracking loss\": losses[\"annual_dc_tracking_loss_percent\"],\n \"DC nameplate loss\": losses[\"annual_dc_nameplate_loss_percent\"],\n \"DC power optimizer loss\": losses[\"annual_dc_optimizer_loss_percent\"],\n \"DC performance adjustment loss\": losses[\"annual_dc_perf_adj_loss_percent\"],\n \"AC inverter power clipping loss\": losses[\"annual_ac_inv_clip_loss_percent\"],\n \"AC inverter power consumption loss\": losses[\"annual_ac_inv_pso_loss_percent\"],\n \"AC inverter night tare loss\": losses[\"annual_ac_inv_pnt_loss_percent\"],\n \"AC inverter efficiency loss\": losses[\"annual_ac_inv_eff_loss_percent\"],\n \"AC wiring loss\": losses[\"ac_loss\"],\n \"Transformer loss percent\": losses[\"annual_xfmr_loss_percent\"],\n \"AC performance adjustment loss\": losses[\"annual_ac_perf_adj_loss_percent\"],\n \"AC transmission loss\": losses[\"annual_transmission_loss_percent\"],\n }\n\n return loss_data\n\n loss_data = gen_loss_data(avg_losses)\n base_loss_data = gen_loss_data(base_losses)\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n for i, (k, c) in enumerate(zip(sorted(list(loss_data.keys())), colors)):\n ax1.bar(i, loss_data[k], width=0.3, color=c, label=k)\n ax2.bar(i, base_loss_data[k], width=0.3, color=c)\n\n ax1.set_title(\"Realization Average\")\n ax2.set_title(\"Base Case\")\n\n # remove x axis labels\n ax1.xaxis.set_visible(False)\n ax2.xaxis.set_visible(False)\n\n ax1.set_ylabel(\"Percent\")\n ax2.set_ylabel(\"Percent\")\n\n fig.legend(bbox_to_anchor=(0.8, 0.0, 0.5, 0.5))\n fig.suptitle(\"Annual Energy Loss\")\n fig.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, \"Annual Energy Loss.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # heatmap of ac energy averaged throughout each year\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n # calculate the min/max value of the base case and realizations for coloring consistency\n vmin = np.amin([np.amin(avg_day_energy_by_hour), np.amin(base_day_energy_by_hour)])\n vmax = np.amax([np.amax(avg_day_energy_by_hour), np.amax(base_day_energy_by_hour)])\n\n # transpose so the x axis is day\n cb = ax1.pcolormesh(\n np.arange(365), np.arange(24), avg_day_energy_by_hour.T, cmap=\"plasma\", vmin=vmin, vmax=vmax, shading=\"auto\"\n )\n ax2.pcolormesh(\n np.arange(365), np.arange(24), base_day_energy_by_hour.T, cmap=\"plasma\", vmin=vmin, vmax=vmax, shading=\"auto\"\n )\n\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Day\")\n ax1.set_ylabel(\"Hour\")\n\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Day\")\n ax2.set_ylabel(\"Hour\")\n\n fig.suptitle(\"Yearly System Power Generated (kW)\")\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([1, 0.15, 0.05, 0.7])\n fig.colorbar(cb, cax=cbar_ax)\n\n with warnings.catch_warnings(): # matplotlib sucks\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n\n if save_path:\n plt.savefig(os.path.join(save_path, \"Yearly System Power Generated.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n fig.set_figheight(5)\n fig.set_figwidth(10)\n\n ax1.bar(np.arange(lifetime + 1), avg_tax_cash_flow)\n ax1.set_title(\"Realization Average\")\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"USD\")\n\n ax2.bar(np.arange(lifetime + 1), base_tax_cash_flow)\n ax2.set_title(\"Base Case\")\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"USD\")\n\n # determine if stored value is pretax or after tax cash flow, depending on financial model\n flow = None\n try:\n case.output(\"cf_after_tax_cash_flow\")\n flow = \"After\"\n except AttributeError:\n case.output(\"cf_pretax_cashflow\")\n flow = \"Pre\"\n\n fig.suptitle(f\"{flow} Tax Cash Flow for System Lifetime\")\n fig.tight_layout()\n if save_path:\n plt.savefig(\n os.path.join(save_path, f\"{flow} Tax Cash Flow for System Lifetime.png\"), bbox_inches=\"tight\", dpi=200\n )\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # box plot for lcoe\n lcoe = np.array([comp.lcoe for comp in results])\n plt.boxplot(lcoe, vert=True, labels=[\"LCOE\"])\n plt.title(\"LCOE Box Plot for Realizations\")\n plt.ylabel(\"LCOE (cents/kWh)\")\n plt.tight_layout()\n\n if save_path:\n plt.savefig(os.path.join(save_path, \"LCOE Box Plot.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close() # clear plot\n\n # number of failures per component per year averaged across the realizations\n for i, c in enumerate(ck.component_keys):\n if not case.config.get(c, None) or np.count_nonzero(avg_failures[i]) == 0:\n continue\n plt.plot(np.arange(lifetime) + 1, avg_failures[i], marker=\"o\", markersize=5, color=colors[i])\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of Failures\")\n plt.title(f\"Number of failures for {c} per year\")\n plt.tight_layout()\n if save_path:\n plt.savefig(\n os.path.join(save_path, f\"{c.capitalize()} Failures Per Year.png\"), bbox_inches=\"tight\", dpi=200\n )\n else:\n plt.show()\n\n plt.close()\n\n # plot total number of failures\n plt.plot(\n np.arange(lifetime) + 1, np.sum(avg_failures, axis=0).T, label=\"total\", marker=\"o\", markersize=5, color=\"lime\"\n )\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of Failures\")\n plt.title(f\"Total number of failures per year\")\n plt.tight_layout()\n if save_path:\n plt.savefig(os.path.join(save_path, f\"Total Failures Per Year.png\"), bbox_inches=\"tight\", dpi=200)\n else:\n plt.show()\n\n plt.close()\n\n\ndef pvrpm_sim(\n case: SamCase,\n save_results: bool = False,\n save_graphs: bool = False,\n progress_bar: bool = False,\n debug: int = 0,\n threads: int = 1,\n) -> List[Components]:\n \"\"\"\n Run the PVRPM simulation on a specific case. Results will be saved to the folder specified in the configuration.\n\n Args:\n case (:obj:`SamCase`): The loaded and verified case to use with the simulation\n save_results (bool, Optional): Whether to save output csv results\n save_graphs (bool, Optional): Whether to save output graphs\n progress_bar (bool, Optional): Whether to display progress bar for each realization\n debug (int, Optional): Whether to save simulation state every `debug` days (0 to turn off)\n threads (int, Optional): Number of threads to use for paralizing realizations\n\n Returns:\n :obj:`list(Components)`: Returns the list of results Component objects for each realization\n \"\"\"\n # tqdm multiprocessing setup\n mp.freeze_support() # for Windows support\n tqdm.set_lock(mp.RLock()) # for managing output contention\n\n save_path = case.config[ck.RESULTS_FOLDER]\n lifetime = case.config[ck.LIFETIME_YRS]\n if threads <= -1:\n threads = mp.cpu_count()\n elif threads == 0:\n threads = 1\n\n logger.info(\"Running base case simulation...\")\n start = time.time()\n case.base_case_sim()\n logger.info(\"Base case simulation took: {:.2f} seconds\".format(time.time() - start))\n\n # realize what we are doing in life\n results = []\n args = [(case, True, i + 1, progress_bar, debug) for i in range(case.config[ck.NUM_REALIZATION])]\n with mp.Pool(threads, initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),)) as p:\n results = p.starmap(run_system_realization, args)\n\n logger.info(\"Generating results...\")\n\n # gen all those results\n (\n summary_results,\n degradation_results,\n dc_power_results,\n ac_power_results,\n yearly_cost_results,\n yearly_fail_results,\n ) = gen_results(case, results,)\n\n # finally, graph results\n if save_graphs:\n graph_results(case, results, save_path=save_path)\n logger.info(f\"Graphs saved to {save_path}\")\n else:\n graph_results(case, results)\n\n # save results\n if save_results:\n summary_results.to_csv(os.path.join(save_path, \"PVRPM_Summary_Results.csv\"), index=True)\n degradation_results.to_csv(os.path.join(save_path, \"Daily_Degradation.csv\"), index=True)\n dc_power_results.to_csv(os.path.join(save_path, \"Timeseries_DC_Power.csv\"), index=True)\n ac_power_results.to_csv(os.path.join(save_path, \"Timeseries_AC_Power.csv\"), index=True)\n yearly_cost_results.to_csv(os.path.join(save_path, \"Yearly_Costs_By_Component.csv\"), index=True)\n yearly_fail_results.to_csv(os.path.join(save_path, \"Yearly_Failures_By_Component.csv\"), index=True)\n logger.info(f\"Results saved to {save_path}\")\n\n return results\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.amax", "numpy.sqrt", "pandas.Series", "numpy.cumsum", "pandas.DataFrame", "matplotlib.pyplot.tight_layout", "numpy.reshape", "numpy.arange", "matplotlib.pyplot.close", "numpy.count_nonzero", "numpy.zeros", "matplotlib.pyplot.title", "numpy.power", "numpy.amin", "pandas.MultiIndex.from_product", "numpy.floor", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.boxplot", "numpy.random.seed", "matplotlib.pyplot.subplots", "scipy.stats.t.ppf", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
tempcyc/networkx
[ "cae83ba501c242567cb2454f97f851898276f06e" ]
[ "networkx/linalg/laplacianmatrix.py" ]
[ "\"\"\"Laplacian matrix of graphs.\n\"\"\"\n# Copyright (C) 2004-2013 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n__author__ = \"\\n\".join(['Aric Hagberg <[email protected]>',\n 'Pieter Swart ([email protected])',\n 'Dan Schult ([email protected])',\n 'Alejandro Weinstein <[email protected]>'])\n__all__ = ['laplacian_matrix',\n 'normalized_laplacian_matrix',\n 'directed_laplacian_matrix']\n\n@not_implemented_for('directed')\ndef laplacian_matrix(G, nodelist=None, weight='weight'):\n \"\"\"Return the Laplacian matrix of G.\n\n The graph Laplacian is the matrix L = D - A, where\n A is the adjacency matrix and D is the diagonal matrix of node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : SciPy sparse matrix\n The Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges weights are summed.\n\n See Also\n --------\n to_numpy_matrix\n normalized_laplacian_matrix\n \"\"\"\n import scipy.sparse\n if nodelist is None:\n nodelist = G.nodes()\n A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n format='csr')\n n,m = A.shape\n diags = A.sum(axis=1)\n D = scipy.sparse.spdiags(diags.flatten(), [0], m, n, format='csr')\n return D - A\n\n@not_implemented_for('directed')\ndef normalized_laplacian_matrix(G, nodelist=None, weight='weight'):\n r\"\"\"Return the normalized Laplacian matrix of G.\n\n The normalized graph Laplacian is the matrix\n\n .. math::\n\n NL = D^{-1/2} L D^{-1/2}\n\n where `L` is the graph Laplacian and `D` is the diagonal matrix of\n node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : NumPy matrix\n The normalized Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges weights are summed.\n See to_numpy_matrix for other options.\n\n If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is\n the adjencency matrix [2]_.\n\n See Also\n --------\n laplacian_matrix\n\n References\n ----------\n .. [1] Fan Chung-Graham, Spectral Graph Theory,\n CBMS Regional Conference Series in Mathematics, Number 92, 1997.\n .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized\n Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,\n March 2007.\n \"\"\"\n import scipy\n import scipy.sparse\n if nodelist is None:\n nodelist = G.nodes()\n A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n format='csr')\n # the convention for normalized Laplacian is to not count self loops\n # twice in the diagonal. So we remove one here.\n for n,_ in G.selfloop_edges():\n A[n,n] -= 1\n n,m = A.shape\n diags = A.sum(axis=1).flatten()\n D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')\n L = D - A\n with scipy.errstate(divide='ignore'):\n diags_sqrt = 1.0/scipy.sqrt(diags)\n diags_sqrt[scipy.isinf(diags_sqrt)] = 0\n DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')\n return DH.dot(L.dot(DH))\n\n###############################################################################\n# Code based on\n# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/\n\n@not_implemented_for('undirected')\n@not_implemented_for('multigraph')\ndef directed_laplacian_matrix(G, nodelist=None, weight='weight',\n walk_type=None, alpha=0.95):\n r\"\"\"Return the directed Laplacian matrix of G.\n\n The graph directed Laplacian is the matrix\n\n .. math::\n\n L = I - (\\Phi^{1/2} P \\Phi^{-1/2} + \\Phi^{-1/2} P^T \\Phi^{1/2} ) / 2\n\n where `I` is the identity matrix, `P` is the transition matrix of the\n graph, and `\\Phi` a matrix with the Perron vector of `P` in the diagonal and\n zeros elsewhere.\n\n Depending on the value of walk_type, `P` can be the transition matrix\n induced by a random walk, a lazy random walk, or a random walk with\n teleportation (PageRank).\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n walk_type : string or None, optional (default=None)\n If None, `P` is selected depending on the properties of the\n graph. Otherwise is one of 'random', 'lazy', or 'pagerank'\n\n alpha : real\n (1 - alpha) is the teleportation probability used with pagerank\n\n Returns\n -------\n L : NumPy array\n Normalized Laplacian of G.\n\n Raises\n ------\n NetworkXError\n If NumPy cannot be imported\n\n NetworkXNotImplemnted\n If G is not a DiGraph\n\n Notes\n -----\n Only implemented for DiGraphs\n\n See Also\n --------\n laplacian_matrix\n\n References\n ----------\n .. [1] Fan Chung (2005).\n Laplacians and the Cheeger inequality for directed graphs.\n Annals of Combinatorics, 9(1), 2005\n \"\"\"\n import scipy as sp\n from scipy.sparse import identity, spdiags, linalg\n if walk_type is None:\n if nx.is_strongly_connected(G):\n if nx.is_aperiodic(G):\n walk_type = \"random\"\n else:\n walk_type = \"lazy\"\n else:\n walk_type = \"pagerank\"\n\n M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,\n dtype=float)\n n, m = M.shape\n if walk_type in [\"random\", \"lazy\"]:\n DI = spdiags(1.0/sp.array(M.sum(axis=1).flat), [0], n, n)\n if walk_type == \"random\":\n P = DI * M\n else:\n I = identity(n)\n P = (I + DI * M) / 2.0\n\n elif walk_type == \"pagerank\":\n if not (0 < alpha < 1):\n raise nx.NetworkXError('alpha must be between 0 and 1')\n # this is using a dense representation\n M = M.todense()\n # add constant to dangling nodes' row\n dangling = sp.where(M.sum(axis=1) == 0)\n for d in dangling[0]:\n M[d] = 1.0 / n\n # normalize\n M = M / M.sum(axis=1)\n P = alpha * M + (1 - alpha) / n\n else:\n raise nx.NetworkXError(\"walk_type must be random, lazy, or pagerank\")\n\n evals, evecs = linalg.eigs(P.T, k=1)\n v = evecs.flatten().real\n p = v / v.sum()\n sqrtp = sp.sqrt(p)\n Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0/sqrtp, [0], n, n)\n I = sp.identity(len(G))\n\n return I - (Q + Q.T) /2.0\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n except:\n raise SkipTest(\"NumPy not available\")\n" ]
[ [ "scipy.errstate", "scipy.isinf", "scipy.sparse.spdiags", "scipy.sqrt", "scipy.sparse.identity", "scipy.sparse.linalg.eigs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
simo-tuomisto/portfolio
[ "7f70bdfe027fcab75970e5f8a81036ca905c893b" ]
[ "Computational Nanoscience 2013 - Final project/Code/plot_data.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as mpl\nfrom matplotlib.ticker import NullFormatter\nimport sys\nfrom glob import glob\nimport re\nimport os\n\nif __name__==\"__main__\":\n\n\t# Plot these ranges\n\tplotranges = {\n\t5.0\t:\t[0.009\t,\t0.011],\n\t10.0:\t[0.007\t,\t0.011],\n\t15.0:\t[0.005\t,\t0.010],\n\t20.0:\t[0.005\t,\t0.010],\n\t25.0:\t[0.005\t,\t0.010],\n\t30.0:\t[0.005\t,\t0.010]\n\t}\n\t\n\tcolormap_f={\n\t0.005 \t: 'b',\n\t0.006\t: 'c',\n\t0.007\t: 'g',\n\t0.008\t: 'y',\n\t0.009\t: 'r',\n\t0.010\t: 'm'\n\t}\n\t\n\tcolormap_r={\n\t5 \t: 'b',\n\t10\t: 'c',\n\t15\t: 'g',\n\t20\t: 'y',\n\t25\t: 'r',\n\t30\t: 'm'\n\t}\n\n\tstrainreg\t= re.compile('.*-r_(?P<r>.+)-f_(?P<f>.+)-t_(?P<t>.+)_strain.npy')\n\t\n\tstrainfiles = glob('*_strain.npy')\n\t\n\tavglen = 3\n\t\n\trdict = dict()\n\t\n\tfor strainfile in strainfiles:\n\t\tmatch\t= strainreg.match(strainfile)\n\t\tif match != None:\n\t\t\tstressfile = strainfile[:-10] + 'stress.npy'\n\t\t\tif os.path.exists(stressfile):\n\t\t\t\tgroups \t\t= match.groupdict()\n\t\t\t\tr\t\t\t= float(groups['r'])\n\t\t\t\tif r not in rdict:\n\t\t\t\t\trdict[r] = []\n\t\t\t\tf\t\t\t= float(groups['f'])\n\t\t\t\tt\t\t\t= int(groups['t'])\n\t\t\t\tstraindata\t= np.load(strainfile)\n\t\t\t\tstressdata\t= np.load(stressfile)\n\t\t\t\trdict[r].append([f,t,straindata,stressdata])\n\t\t\t\t\n\tmeasured_data\t= []\n\t\t\t\t\n\tfor r,dataarrays in rdict.items():\n\t\t\n\t\tif r not in plotranges:\n\t\t\tcontinue\n\t\t\n\t\tlowlimit \t= plotranges[r][0]\n\t\thighlimit \t= plotranges[r][1]\n\t\t\n\t\tfig1 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfig2 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfig3 = mpl.figure(facecolor='white',figsize=(12,9))\n\t\tfor dataarray in dataarrays:\n\t\t\tf,t,straindata,stressdata = dataarray\n\t\t\tstressdata = stressdata/(np.pi*np.power(r,2))\n\t\t\tif ((f<lowlimit) or (f>highlimit)):\n\t\t\t\tcontinue\n\t\t\tavgstress\t= np.zeros_like(stressdata)\n\t\t\tfor i in np.arange(avglen,len(stressdata)-avglen-1):\n\t\t\t\tavgstress[i] = np.average(stressdata[i-avglen:i+avglen+1])\n\t\t\t#mpl.loglog(straindata, stressdata)\n\t\t\tstressmax\t= np.amax(stressdata)\n\t\t\tstrain = (straindata - straindata[0])/straindata[0]\n\t\t\tstrainmax\t= np.amax(strain)\n\t\t\tstrainrate\t= strainmax/len(strain)\n\t\t\tmeasured_data.append([r,f,stressmax,strainmax,strainrate])\n\t\t\tmpl.figure(fig1.number)\n\t\t\tmpl.plot(strain[avglen:-avglen-1], avgstress[avglen:-avglen-1], label='f=%f' % f, color=colormap_f.get(f,'k'))\n\n\t\t\tmpl.figure(fig2.number)\n\t\t\tmpl.plot(0.5*np.arange(0, len(strain)),strain, label='f=%f' % f, color=colormap_f.get(f,'k'))\n\t\t\t\n\t\t\tif (f == 0.008):\n\t\t\t\tmpl.figure(fig3.number)\n\t\t\t\tt = 0.5*np.arange(avglen, len(avgstress)+avglen)\n\t\t\t\tmpl.plot(t[avgstress>0],avgstress[avgstress>0],label='f=%f' % f, color=colormap_f.get(f,'k'))\n\t\tmpl.figure(fig1.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('strain')\n\t\tmpl.ylabel('stress')\n\t\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\tmpl.legend(loc=1)\n\t\tmpl.savefig('strain_vs_stress-r_%d.pdf' % int(r))\n\t\t\n\t\tmpl.figure(fig2.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('time')\n\t\tmpl.ylabel('strain')\n\t\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\tmpl.legend(loc=3)\n\t\tmpl.savefig('time_vs_strain-r_%d.pdf' % int(r))\n\t\t\n\t\tmpl.figure(fig3.number)\n\t\tmpl.title('r=%d' % int(r))\n\t\tmpl.xlabel('time')\n\t\tmpl.ylabel('strain')\n\t\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\t\t#mpl.legend(loc=3)\n\t\tmpl.savefig('time_vs_stress-r_%d.pdf' % int(r))\n\t\t#break\n\t\t\n\tmeasured_data = np.asfarray(measured_data)\n\t\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstressmax = measured_data[measured_data[:,1] == f,2]\n\t\tif (f==0.009):\n\t\t\tfit \t= np.polyfit(np.log(r), np.log(stressmax), deg=1)\n\t\t\tfitr \t= r\n\t\tmpl.plot(r,stressmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,stressmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.plot(fitr,np.exp(np.polyval(fit,np.log(fitr))), label='Fit with exponent %f' % fit[0])\n\tmpl.xlabel('r')\n\tmpl.ylabel('Maximum stress')\n\tmpl.legend(loc=1)\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_stressmax.pdf')\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\t\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstressmax = measured_data[measured_data[:,1] == f,2]\n\t\tmpl.loglog(r,stressmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.loglog(r,stressmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.xlabel('r')\n\tmpl.ylabel('Maximum stress')\n\tmpl.legend(loc=4)\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_stressmax_loglog.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstressmax = measured_data[measured_data[:,0] == r,2]\n\t\tmpl.plot(f,stressmax,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,stressmax,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Maximum stress')\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=4)\n\tmpl.savefig('f_vs_stressmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstrainmax = measured_data[measured_data[:,1] == f,3]\n\t\tmpl.plot(r,strainmax,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,strainmax,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.xlabel('r')\n\tmpl.ylabel('Strain at the time of failure')\n\tmpl.legend(loc=0)\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_strainmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstrainmax = measured_data[measured_data[:,0] == r,3]\n\t\tmpl.plot(f,strainmax,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,strainmax,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Strain at the time of failure')\n\t#mpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=0)\n\tmpl.savefig('f_vs_strainmax.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor f in np.unique(measured_data[:,1]):\n\t\tr = measured_data[measured_data[:,1] == f,0]\n\t\tstrainrate = measured_data[measured_data[:,1] == f,4]\n\t\tif (f==0.010):\n\t\t\tfit \t= np.polyfit(np.log(r), np.log(strainrate), deg=1)\n\t\t\tfitr \t= r\n\t\tmpl.plot(r,strainrate,'^', color=colormap_f.get(f,'k'),label='f=%f' % f)\n\t\tmpl.plot(r,strainrate,linestyle='--', color=colormap_f.get(f,'k'))\n\tmpl.plot(fitr,np.exp(np.polyval(fit,np.log(fitr))), label='Fit with exponent %f' % fit[0])\n\tmpl.xlabel('r')\n\tmpl.ylabel('Strain rate')\n\tmpl.legend(loc=0)\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.savefig('r_vs_strainrate.pdf')\n\t\n\tmpl.figure(facecolor='white',figsize=(12,9))\n\tfor r in np.unique(measured_data[:,0]):\n\t\tf = measured_data[measured_data[:,0] == r,1]\n\t\tstrainrate = measured_data[measured_data[:,0] == r,4]\n\t\tmpl.plot(f,strainrate,'^', color=colormap_r.get(r,'k'),label='r=%d' % r)\n\t\tmpl.plot(f,strainrate,linestyle='--', color=colormap_r.get(r,'k'))\n\t\tmpl.xlabel('f')\n\t\tmpl.ylabel('Strain rate')\n\tmpl.gca().yaxis.set_major_formatter(NullFormatter())\n\tmpl.legend(loc=3)\n\tmpl.savefig('f_vs_strainrate.pdf')\n\t" ]
[ [ "matplotlib.pyplot.legend", "numpy.amax", "numpy.log", "matplotlib.pyplot.gca", "numpy.unique", "numpy.power", "matplotlib.pyplot.figure", "numpy.load", "matplotlib.pyplot.savefig", "numpy.asfarray", "matplotlib.ticker.NullFormatter", "numpy.zeros_like", "matplotlib.pyplot.xlabel", "numpy.average", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Guangxuan-Xiao/deepsnap
[ "b0b222c8093b6273c648c51585c9ebbb2f4112fa" ]
[ "deepsnap/batch.py" ]
[ "import torch\nfrom deepsnap.graph import Graph\nfrom deepsnap.hetero_graph import HeteroGraph\nfrom typing import (\n Callable,\n Dict,\n List\n)\n\n\nclass Batch(Graph):\n r\"\"\"\n A plain old python object modeling a batch of\n :class:`deepsnap.graph.Graph` objects as one big (disconnected) graph,\n with :class:`torch_geometric.data.Data` being the\n base class, all its methods can also be used here.\n In addition, single graphs can be reconstructed via the assignment vector\n :obj:`batch`, which maps each node to its respective graph identifier.\n \"\"\"\n def __init__(self, batch=None, **kwargs):\n super(Batch, self).__init__(**kwargs)\n\n self.batch = batch\n self.__data_class__ = Graph\n self.__slices__ = None\n\n @staticmethod\n def collate(follow_batch=[], transform=None, **kwargs):\n return lambda batch: Batch.from_data_list(\n batch, follow_batch, transform, **kwargs\n )\n\n @staticmethod\n def from_data_list(\n data_list: List[Graph],\n follow_batch: List = None,\n transform: Callable = None,\n **kwargs\n ):\n r\"\"\"\n Constructs A :class:`deepsnap.batch.Batch` object from a python list\n holding :class:`torch_geometric.data.Data` objects.\n The assignment vector :obj:`batch` is created on the fly.\n Additionally, creates assignment batch vectors for each key in\n :obj:`follow_batch`.\n\n Args:\n data_list (list): A list of `deepsnap.graph.Graph` objects.\n follow_batch (list, optional): Creates assignment batch vectors\n for each key.\n transform: If apply transform when batching.\n **kwargs: Other parameters.\n \"\"\"\n if follow_batch is None:\n follow_batch = []\n if transform is not None:\n data_list = [\n data.apply_transform(\n transform,\n deep_copy=True,\n **kwargs,\n )\n for data in data_list\n ]\n # is_train is in data.keys, but it shouldn't be.\n keys = [set(data.keys) for data in data_list]\n keys = list(set.union(*keys))\n assert \"batch\" not in keys\n\n batch, cumsum = Batch._init_batch_fields(keys, follow_batch)\n batch.__data_class__ = data_list[0].__class__\n batch.batch = []\n for i, data in enumerate(data_list):\n # Note: in heterogeneous graph, __inc__ logic is different\n Batch._collate_dict(\n data, cumsum,\n batch.__slices__, batch,\n data, follow_batch, i=i\n )\n if isinstance(data, Graph):\n if isinstance(data, HeteroGraph):\n num_nodes = sum(data.num_nodes().values())\n else:\n num_nodes = data.num_nodes\n else:\n raise TypeError(\n \"element in self.graphs of unexpected type\"\n )\n if num_nodes is not None:\n item = torch.full((num_nodes, ), i, dtype=torch.long)\n batch.batch.append(item)\n\n if num_nodes is None:\n batch.batch = None\n\n Batch._dict_list_to_tensor(batch, data_list[0])\n\n return batch.contiguous()\n\n @staticmethod\n def _init_batch_fields(keys, follow_batch):\n batch = Batch()\n batch.__slices__ = {key: [0] for key in keys}\n\n for key in keys:\n batch[key] = []\n\n for key in follow_batch:\n batch[f\"{key}_batch\"] = []\n\n cumsum = {key: 0 for key in keys}\n return batch, cumsum\n\n @staticmethod\n def _collate_dict(\n curr_dict,\n cumsum: Dict[str, int],\n slices,\n batched_dict,\n graph,\n follow_batch,\n i=None\n ):\n r\"\"\" Called in from_data_list to collate a dictionary.\n This can also be applied to Graph object, since it has support for\n keys and __getitem__().\n\n Args:\n curr_dict: current dictionary to be added to the\n collated dictionary.\n cumsum: cumulative sum to be used for indexing.\n slices: a dictionary of the same structure as batched_dict,\n slices[key] indicates the indices to slice batch[key] into\n tensors for all graphs in the batch.\n batched_dict: the batched dictionary of the same structure\n as curr_dict. But all graph data are batched together.\n \"\"\"\n if isinstance(curr_dict, dict):\n keys = curr_dict.keys()\n else:\n keys = curr_dict.keys\n for key in keys:\n item = curr_dict[key]\n if isinstance(item, dict):\n # recursively collate every key in the dictionary\n if isinstance(batched_dict[key], list):\n # nested dictionary not initialized yet\n assert len(batched_dict[key]) == 0\n # initialize the nested dictionary for batch\n cumsum[key] = {inner_key: 0 for inner_key in item.keys()}\n slices[key] = {inner_key: [0] for inner_key in item.keys()}\n batched_dict[key] = {}\n for inner_key in item.keys():\n batched_dict[key][inner_key] = []\n for inner_key in follow_batch:\n batched_dict[key][f\"{key}_batch\"] = []\n Batch._collate_dict(\n item, cumsum[key],\n slices[key], batched_dict[key],\n graph, follow_batch, i=i\n )\n continue\n if torch.is_tensor(item) and item.dtype != torch.bool:\n item = item + cumsum[key]\n if torch.is_tensor(item):\n size = item.size(graph.__cat_dim__(key, curr_dict[key]))\n else:\n size = 1\n slices[key].append(size + slices[key][-1])\n cumsum[key] = cumsum[key] + graph.__inc__(key, item)\n batched_dict[key].append(item)\n\n if key in follow_batch:\n item = torch.full((size, ), i, dtype=torch.long)\n batched_dict[f\"{key}_batch\"].append(item)\n\n @staticmethod\n def _dict_list_to_tensor(dict_of_list, graph):\n r\"\"\"Convert a dict/Graph with list as values to a dict/Graph with\n concatenated/stacked tensor as values.\n \"\"\"\n if isinstance(dict_of_list, dict):\n keys = dict_of_list.keys()\n else:\n keys = dict_of_list.keys\n for key in keys:\n if isinstance(dict_of_list[key], dict):\n # recursively convert the dictionary of list to dict of tensor\n Batch._dict_list_to_tensor(dict_of_list[key], graph)\n continue\n item = dict_of_list[key][0]\n if torch.is_tensor(item):\n if (\n Graph._is_graph_attribute(key)\n and item.ndim == 1\n and (not item.dtype == torch.long)\n and \"feature\" in key\n ):\n # special consideration: 1D tensor for graph\n # attribute (classification)\n # named as: \"graph_xx_feature\"\n # batch by stacking the first dim\n dict_of_list[key] = torch.stack(\n dict_of_list[key],\n dim=0\n )\n else:\n # concat at the __cat_dim__\n dict_of_list[key] = torch.cat(\n dict_of_list[key],\n dim=graph.__cat_dim__(key, item)\n )\n elif isinstance(item, (float, int)):\n dict_of_list[key] = torch.tensor(dict_of_list[key])\n\n def to_data_list(self):\n r\"\"\"\n Reconstructs the list of :class:`torch_geometric.data.Data` objects\n from the batch object.\n The batch object must have been created via :meth:`from_data_list` in\n order to be able reconstruct the initial objects.\n \"\"\"\n if self.__slices__ is None:\n raise RuntimeError(\n \"Cannot reconstruct data list from batch because the \"\n \"batch object was not created using Batch.from_data_list()\"\n )\n\n keys = [key for key in self.keys if key[-5:] != \"batch\"]\n cumsum = {key: 0 for key in keys}\n data_list = []\n for i in range(len(self.__slices__[keys[0]]) - 1):\n # i: from 0 up to num graphs in the batch\n data = self.__data_class__()\n self._reconstruct_dict(\n i, keys, data, cumsum, self.__slices__, self, data\n )\n data_list.append(data)\n\n return data_list\n\n def _reconstruct_dict(\n self, graph_idx: int, keys, data_dict,\n cumsum: Dict[str, int], slices, batched_dict, graph):\n\n for key in keys:\n if isinstance(batched_dict[key], dict):\n # recursively unbatch the dict\n data_dict[key] = {}\n inner_keys = [\n inner_key\n for inner_key in batched_dict[key].keys()\n if inner_key[-5:] != \"batch\"\n ]\n inner_cumsum = {inner_key: 0 for inner_key in inner_keys}\n inner_slices = slices[key]\n self._reconstruct_dict(\n graph_idx, inner_keys,\n data_dict[key], inner_cumsum,\n inner_slices, batched_dict[key], graph\n )\n continue\n\n if torch.is_tensor(batched_dict[key]):\n data_dict[key] = batched_dict[key].narrow(\n graph.__cat_dim__(key, batched_dict[key]),\n slices[key][graph_idx],\n slices[key][graph_idx + 1] - slices[key][graph_idx]\n )\n if batched_dict[key].dtype != torch.bool:\n data_dict[key] = data_dict[key] - cumsum[key]\n else:\n data_dict[key] = (\n batched_dict[key][\n slices[key][graph_idx]:slices[key][graph_idx + 1]\n ]\n )\n cumsum[key] = cumsum[key] + graph.__inc__(key, data_dict[key])\n\n @property\n def num_graphs(self) -> int:\n r\"\"\"\n Returns the number of graphs in the batch.\n\n Returns:\n int: The number of graphs in the batch.\n \"\"\"\n return self.batch[-1].item() + 1\n\n def apply_transform(\n self,\n transform,\n update_tensor: bool = True,\n update_graph: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Applies a transformation to each graph object in parallel by first\n calling `to_data_list`, applying the transform, and then perform\n re-batching again to a `Batch`.\n A transform should edit the graph object,\n including changing the graph structure, and adding\n node/edge/graph attributes.\n The rest are automatically handled by the\n :class:`deepsnap.graph.Graph` object, including everything\n ended with index.\n\n Args:\n transform: Transformation function applied to each graph object.\n update_tensor: Whether use nx graph to update tensor attributes.\n update_graph: Whether use tensor attributes to update nx graphs.\n deep_copy: :obj:`True` if a new deep copy of batch is returned.\n This option allows modifying the batch of graphs without\n changing the graphs in the original dataset.\n kwargs: Parameters used in transform function in\n :class:`deepsnap.graph.Graph` objects.\n\n Returns:\n a batch object containing all transformed graph objects.\n\n \"\"\"\n # TODO: transductive setting, assert update_tensor == True\n return self.from_data_list(\n [\n Graph(graph).apply_transform(\n transform, update_tensor, update_graph, deep_copy, **kwargs\n )\n for graph in self.G\n ]\n )\n\n def apply_transform_multi(\n self,\n transform,\n update_tensors: bool = True,\n update_graphs: bool = False,\n deep_copy: bool = False,\n **kwargs\n ):\n r\"\"\"\n Comparison to apply_transform, this allows multiple graph objects\n to be returned by the supplied transform function.\n\n Args:\n transform: (Multiple return value) tranformation function\n applied to each graph object. It needs to return a tuple of\n Graph objects or internal .G (NetworkX) objects.\n\n Returns:\n a tuple of batch objects. The i-th batch object contains the i-th\n return value of the transform function applied to all graphs\n in the batch.\n \"\"\"\n g_lists = (\n zip(\n *[\n Graph(graph).apply_transform_multi(\n transform, update_tensors, update_graphs,\n deep_copy, **kwargs,\n )\n for graph in self.G\n ]\n )\n )\n return (self.from_data_list(g_list) for g_list in g_lists)\n" ]
[ [ "torch.stack", "torch.is_tensor", "torch.full", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
furkanc/Yolov3-Face-Recognition
[ "d3074490a6a7bf83925319ed521b557919d0af7e", "d3074490a6a7bf83925319ed521b557919d0af7e" ]
[ "face_module/mtcnn_pytorch/src/get_nets.py", "face_module/utils.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nimport numpy as np\n\n\nclass Flatten(nn.Module):\n\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, c, h, w].\n Returns:\n a float tensor with shape [batch_size, c*h*w].\n \"\"\"\n\n # without this pretrained model isn't working\n x = x.transpose(3, 2).contiguous()\n\n return x.view(x.size(0), -1)\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n\n super(PNet, self).__init__()\n\n # suppose we have input with size HxW, then\n # after first layer: H - 2,\n # after pool: ceil((H - 2)/2),\n # after second conv: ceil((H - 2)/2) - 2,\n # after last conv: ceil((H - 2)/2) - 4,\n # and the same for W\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 10, 3, 1)),\n ('prelu1', nn.PReLU(10)),\n ('pool1', nn.MaxPool2d(2, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(10, 16, 3, 1)),\n ('prelu2', nn.PReLU(16)),\n\n ('conv3', nn.Conv2d(16, 32, 3, 1)),\n ('prelu3', nn.PReLU(32))\n ]))\n\n self.conv4_1 = nn.Conv2d(32, 2, 1, 1)\n self.conv4_2 = nn.Conv2d(32, 4, 1, 1)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/pnet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n b: a float tensor with shape [batch_size, 4, h', w'].\n a: a float tensor with shape [batch_size, 2, h', w'].\n \"\"\"\n x = self.features(x)\n a = self.conv4_1(x)\n b = self.conv4_2(x)\n a = F.softmax(a, dim=-1)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n\n super(RNet, self).__init__()\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 28, 3, 1)),\n ('prelu1', nn.PReLU(28)),\n ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(28, 48, 3, 1)),\n ('prelu2', nn.PReLU(48)),\n ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(48, 64, 2, 1)),\n ('prelu3', nn.PReLU(64)),\n\n ('flatten', Flatten()),\n ('conv4', nn.Linear(576, 128)),\n ('prelu4', nn.PReLU(128))\n ]))\n\n self.conv5_1 = nn.Linear(128, 2)\n self.conv5_2 = nn.Linear(128, 4)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/rnet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n b: a float tensor with shape [batch_size, 4].\n a: a float tensor with shape [batch_size, 2].\n \"\"\"\n x = self.features(x)\n a = self.conv5_1(x)\n b = self.conv5_2(x)\n a = F.softmax(a, dim=-1)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n\n super(ONet, self).__init__()\n\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 32, 3, 1)),\n ('prelu1', nn.PReLU(32)),\n ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv2', nn.Conv2d(32, 64, 3, 1)),\n ('prelu2', nn.PReLU(64)),\n ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),\n\n ('conv3', nn.Conv2d(64, 64, 3, 1)),\n ('prelu3', nn.PReLU(64)),\n ('pool3', nn.MaxPool2d(2, 2, ceil_mode=True)),\n\n ('conv4', nn.Conv2d(64, 128, 2, 1)),\n ('prelu4', nn.PReLU(128)),\n\n ('flatten', Flatten()),\n ('conv5', nn.Linear(1152, 256)),\n ('drop5', nn.Dropout(0.25)),\n ('prelu5', nn.PReLU(256)),\n ]))\n\n self.conv6_1 = nn.Linear(256, 2)\n self.conv6_2 = nn.Linear(256, 4)\n self.conv6_3 = nn.Linear(256, 10)\n\n weights = np.load('face_module/mtcnn_pytorch/src/weights/onet.npy')[()]\n for n, p in self.named_parameters():\n p.data = torch.FloatTensor(weights[n])\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x: a float tensor with shape [batch_size, 3, h, w].\n Returns:\n c: a float tensor with shape [batch_size, 10].\n b: a float tensor with shape [batch_size, 4].\n a: a float tensor with shape [batch_size, 2].\n \"\"\"\n x = self.features(x)\n a = self.conv6_1(x)\n b = self.conv6_2(x)\n c = self.conv6_3(x)\n a = F.softmax(a, dim = -1)\n return c, b, a\n", "from datetime import datetime\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport io\nfrom torchvision import transforms as trans\nfrom face_module.data.data_pipe import de_preprocess\nimport torch\nfrom face_module.model import l2_norm\nimport pdb\nimport cv2\n\ndef separate_bn_paras(modules):\n if not isinstance(modules, list):\n modules = [*modules.modules()]\n paras_only_bn = []\n paras_wo_bn = []\n for layer in modules:\n if 'model' in str(layer.__class__):\n continue\n if 'container' in str(layer.__class__):\n continue\n else:\n if 'batchnorm' in str(layer.__class__):\n paras_only_bn.extend([*layer.parameters()])\n else:\n paras_wo_bn.extend([*layer.parameters()])\n return paras_only_bn, paras_wo_bn\n\ndef prepare_facebank(conf, model, mtcnn, tta = True):\n model.eval()\n embeddings = []\n names = [\"person\"]\n for path in conf.facebank_path.iterdir():\n if path.is_file():\n continue\n else:\n embs = []\n for file in path.iterdir():\n if not file.is_file():\n continue\n else:\n try:\n img = Image.open(file)\n except:\n continue\n if img.size != (112, 112):\n _, img = mtcnn.align(img)\n with torch.no_grad():\n if tta:\n mirror = trans.functional.hflip(img)\n emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))\n emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))\n embs.append(l2_norm(emb + emb_mirror))\n else:\n embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))\n if len(embs) == 0:\n continue\n embedding = torch.cat(embs).mean(0,keepdim=True)\n embeddings.append(embedding)\n names.append(path.name)\n embeddings = torch.cat(embeddings)\n names = np.array(names)\n torch.save(embeddings, str(conf.facebank_path/'facebank.pth'))\n np.save(conf.facebank_path/'names', names)\n return embeddings, names\n\ndef load_facebank(conf):\n embeddings = torch.load(str(conf.facebank_path/'facebank.pth'))\n names = np.load(conf.facebank_path/'names.npy')\n return embeddings, names\n\ndef face_reader(conf, conn, flag, boxes_arr, result_arr, learner, mtcnn, targets, tta):\n while True:\n try:\n image = conn.recv()\n except:\n continue\n try:\n bboxes, faces = mtcnn.align_multi(image, limit=conf.face_limit)\n except:\n bboxes = []\n\n results = learner.infer(conf, faces, targets, tta)\n\n if len(bboxes) > 0:\n print('bboxes in reader : {}'.format(bboxes))\n bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces\n bboxes = bboxes.astype(int)\n bboxes = bboxes + [-1,-1,1,1] # personal choice\n assert bboxes.shape[0] == results.shape[0],'bbox and faces number not same'\n bboxes = bboxes.reshape([-1])\n for i in range(len(boxes_arr)):\n if i < len(bboxes):\n boxes_arr[i] = bboxes[i]\n else:\n boxes_arr[i] = 0\n for i in range(len(result_arr)):\n if i < len(results):\n result_arr[i] = results[i]\n else:\n result_arr[i] = -1\n else:\n for i in range(len(boxes_arr)):\n boxes_arr[i] = 0 # by default,it's all 0\n for i in range(len(result_arr)):\n result_arr[i] = -1 # by default,it's all -1\n print('boxes_arr : {}'.format(boxes_arr[:4]))\n print('result_arr : {}'.format(result_arr[:4]))\n flag.value = 0\n\nhflip = trans.Compose([\n de_preprocess,\n trans.ToPILImage(),\n trans.functional.hflip,\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n\ndef hflip_batch(imgs_tensor):\n hfliped_imgs = torch.empty_like(imgs_tensor)\n for i, img_ten in enumerate(imgs_tensor):\n hfliped_imgs[i] = hflip(img_ten)\n return hfliped_imgs\n\ndef get_time():\n return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-')\n\ndef gen_plot(fpr, tpr):\n \"\"\"Create a pyplot plot and save to buffer.\"\"\"\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf\n\ndef draw_box_name(bbox,name,frame):\n frame = cv2.rectangle(frame,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),6)\n frame = cv2.putText(frame,\n name,\n (bbox[0],bbox[1]),\n cv2.FONT_HERSHEY_SIMPLEX,\n 2,\n (0,255,0),\n 3,\n cv2.LINE_AA)\n return frame\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.Dropout", "torch.nn.PReLU", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.FloatTensor", "numpy.load" ], [ "torch.empty_like", "matplotlib.pyplot.title", "torch.cat", "matplotlib.pyplot.switch_backend", "numpy.save", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "torch.no_grad", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.close", "numpy.load", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Xarthisius/yt
[ "aad3cfa3b4ebab7838352ab467275a27c26ff363", "321643c3abff64a6f132d98d0747f3558f7552a3", "321643c3abff64a6f132d98d0747f3558f7552a3", "aad3cfa3b4ebab7838352ab467275a27c26ff363", "aad3cfa3b4ebab7838352ab467275a27c26ff363", "aad3cfa3b4ebab7838352ab467275a27c26ff363", "321643c3abff64a6f132d98d0747f3558f7552a3", "aad3cfa3b4ebab7838352ab467275a27c26ff363" ]
[ "yt/frontends/halo_catalog/io.py", "yt/frontends/boxlib/io.py", "doc/source/cookbook/hse_field.py", "yt/frontends/moab/io.py", "yt/visualization/volume_rendering/utils.py", "yt/geometry/tests/test_particle_deposit.py", "yt/fields/field_detector.py", "yt/utilities/lib/cykdtree/tests/scaling.py" ]
[ "from collections import defaultdict\n\nimport numpy as np\n\nfrom yt.frontends.gadget_fof.io import IOHandlerGadgetFOFHaloHDF5\nfrom yt.funcs import parse_h5_attr\nfrom yt.units.yt_array import uvstack\nfrom yt.utilities.io_handler import BaseIOHandler\nfrom yt.utilities.on_demand_imports import _h5py as h5py\n\n\nclass IOHandlerYTHaloCatalog(BaseIOHandler):\n _dataset_type = \"ythalocatalog\"\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n raise NotImplementedError\n\n def _read_particle_coords(self, chunks, ptf):\n # This will read chunks and yield the results.\n chunks = list(chunks)\n data_files = set()\n # Only support halo reading for now.\n assert len(ptf) == 1\n assert list(ptf.keys())[0] == \"halos\"\n ptype = \"halos\"\n for chunk in chunks:\n for obj in chunk.objs:\n data_files.update(obj.data_files)\n pn = \"particle_position_%s\"\n for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n pos = data_file._get_particle_positions(ptype, f=f)\n x, y, z = (self.ds.arr(pos[:, i], units) for i in range(3))\n yield \"halos\", (x, y, z)\n\n def _yield_coordinates(self, data_file):\n pn = \"particle_position_%s\"\n with h5py.File(data_file.filename, mode=\"r\") as f:\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n x, y, z = (\n self.ds.arr(f[pn % ax][()].astype(\"float64\"), units) for ax in \"xyz\"\n )\n pos = uvstack([x, y, z]).T\n pos.convert_to_units(\"code_length\")\n yield \"halos\", pos\n\n def _read_particle_fields(self, chunks, ptf, selector):\n # Now we have all the sizes, and we can allocate\n chunks = list(chunks)\n data_files = set()\n # Only support halo reading for now.\n assert len(ptf) == 1\n assert list(ptf.keys())[0] == \"halos\"\n for chunk in chunks:\n for obj in chunk.objs:\n data_files.update(obj.data_files)\n pn = \"particle_position_%s\"\n for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):\n si, ei = data_file.start, data_file.end\n with h5py.File(data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(ptf.items()):\n units = parse_h5_attr(f[pn % \"x\"], \"units\")\n pos = data_file._get_particle_positions(ptype, f=f)\n x, y, z = (self.ds.arr(pos[:, i], units) for i in range(3))\n mask = selector.select_points(x, y, z, 0.0)\n del x, y, z\n if mask is None:\n continue\n for field in field_list:\n data = f[field][si:ei][mask].astype(\"float64\")\n yield (ptype, field), data\n\n def _count_particles(self, data_file):\n si, ei = data_file.start, data_file.end\n nhalos = data_file.header[\"num_halos\"]\n if None not in (si, ei):\n nhalos = np.clip(nhalos - si, 0, ei - si)\n return {\"halos\": nhalos}\n\n def _identify_fields(self, data_file):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n fields = [\n (\"halos\", field) for field in f if not isinstance(f[field], h5py.Group)\n ]\n units = {(\"halos\", field): parse_h5_attr(f[field], \"units\") for field in f}\n return fields, units\n\n\nclass HaloDatasetIOHandler:\n \"\"\"\n Base class for io handlers to load halo member particles.\n \"\"\"\n\n def _read_particle_coords(self, chunks, ptf):\n pass\n\n def _read_particle_fields(self, dobj, ptf):\n # separate member particle fields from scalar fields\n scalar_fields = defaultdict(list)\n member_fields = defaultdict(list)\n for ptype, field_list in sorted(ptf.items()):\n for field in field_list:\n if (ptype, field) in self.ds.scalar_field_list:\n scalar_fields[ptype].append(field)\n else:\n member_fields[ptype].append(field)\n\n all_data = self._read_scalar_fields(dobj, scalar_fields)\n all_data.update(self._read_member_fields(dobj, member_fields))\n\n for field, field_data in all_data.items():\n yield field, field_data\n\n # This will be refactored.\n _read_particle_selection = IOHandlerGadgetFOFHaloHDF5._read_particle_selection\n\n\nclass IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog):\n _dataset_type = \"ythalo\"\n\n def _identify_fields(self, data_file):\n with h5py.File(data_file.filename, mode=\"r\") as f:\n scalar_fields = [\n (\"halos\", field) for field in f if not isinstance(f[field], h5py.Group)\n ]\n units = {(\"halos\", field): parse_h5_attr(f[field], \"units\") for field in f}\n if \"particles\" in f:\n id_fields = [(\"halos\", field) for field in f[\"particles\"]]\n else:\n id_fields = []\n\n return scalar_fields + id_fields, scalar_fields, id_fields, units\n\n def _read_member_fields(self, dobj, member_fields):\n all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64))\n if not member_fields:\n return all_data\n field_start = 0\n for i, data_file in enumerate(dobj.field_data_files):\n start_index = dobj.field_data_start[i]\n end_index = dobj.field_data_end[i]\n pcount = end_index - start_index\n if pcount == 0:\n continue\n field_end = field_start + end_index - start_index\n with h5py.File(data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(member_fields.items()):\n for field in field_list:\n field_data = all_data[(ptype, field)]\n my_data = f[\"particles\"][field][start_index:end_index].astype(\n \"float64\"\n )\n field_data[field_start:field_end] = my_data\n field_start = field_end\n return all_data\n\n def _read_scalar_fields(self, dobj, scalar_fields):\n all_data = {}\n if not scalar_fields:\n return all_data\n with h5py.File(dobj.scalar_data_file.filename, mode=\"r\") as f:\n for ptype, field_list in sorted(scalar_fields.items()):\n for field in field_list:\n data = np.array([f[field][dobj.scalar_index]]).astype(\"float64\")\n all_data[(ptype, field)] = data\n return all_data\n", "import os\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom yt.frontends.chombo.io import parse_orion_sinks\nfrom yt.funcs import mylog\nfrom yt.geometry.selection_routines import GridSelector\nfrom yt.utilities.io_handler import BaseIOHandler\n\n\ndef _remove_raw(all_fields, raw_fields):\n centered_fields = set(all_fields)\n for raw in raw_fields:\n centered_fields.discard(raw)\n return list(centered_fields)\n\n\nclass IOHandlerBoxlib(BaseIOHandler):\n\n _dataset_type = \"boxlib_native\"\n\n def __init__(self, ds, *args, **kwargs):\n super().__init__(ds)\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n if any((not (ftype == \"boxlib\" or ftype == \"raw\") for ftype, fname in fields)):\n raise NotImplementedError\n rv = {}\n raw_fields = []\n for field in fields:\n if field[0] == \"raw\":\n nodal_flag = self.ds.nodal_flags[field[1]]\n num_nodes = 2 ** sum(nodal_flag)\n rv[field] = np.empty((size, num_nodes), dtype=\"float64\")\n raw_fields.append(field)\n else:\n rv[field] = np.empty(size, dtype=\"float64\")\n centered_fields = _remove_raw(fields, raw_fields)\n ng = sum(len(c.objs) for c in chunks)\n mylog.debug(\n \"Reading %s cells of %s fields in %s grids\",\n size,\n [f2 for f1, f2 in fields],\n ng,\n )\n ind = 0\n for chunk in chunks:\n data = self._read_chunk_data(chunk, centered_fields)\n for g in chunk.objs:\n for field in fields:\n if field in centered_fields:\n ds = data[g.id].pop(field)\n else:\n ds = self._read_raw_field(g, field)\n nd = g.select(selector, ds, rv[field], ind)\n ind += nd\n data.pop(g.id)\n return rv\n\n def _read_raw_field(self, grid, field):\n field_name = field[1]\n base_dir = self.ds.index.raw_file\n\n nghost = self.ds.index.raw_field_nghost[field_name]\n box_list = self.ds.index.raw_field_map[field_name][0]\n fn_list = self.ds.index.raw_field_map[field_name][1]\n offset_list = self.ds.index.raw_field_map[field_name][2]\n\n lev = grid.Level\n filename = base_dir + \"Level_%d/\" % lev + fn_list[grid.id]\n offset = offset_list[grid.id]\n box = box_list[grid.id]\n\n lo = box[0] - nghost\n hi = box[1] + nghost\n shape = hi - lo + 1\n with open(filename, \"rb\") as f:\n f.seek(offset)\n f.readline() # always skip the first line\n arr = np.fromfile(f, \"float64\", np.product(shape))\n arr = arr.reshape(shape, order=\"F\")\n return arr[\n tuple(\n slice(None) if (nghost[dim] == 0) else slice(nghost[dim], -nghost[dim])\n for dim in range(self.ds.dimensionality)\n )\n ]\n\n def _read_chunk_data(self, chunk, fields):\n data = {}\n grids_by_file = defaultdict(list)\n if len(chunk.objs) == 0:\n return data\n for g in chunk.objs:\n if g.filename is None:\n continue\n grids_by_file[g.filename].append(g)\n dtype = self.ds.index._dtype\n bpr = dtype.itemsize\n for filename in grids_by_file:\n grids = grids_by_file[filename]\n grids.sort(key=lambda a: a._offset)\n f = open(filename, \"rb\")\n for grid in grids:\n data[grid.id] = {}\n local_offset = grid._get_offset(f) - f.tell()\n count = grid.ActiveDimensions.prod()\n size = count * bpr\n for field in self.ds.index.field_order:\n if field in fields:\n # We read it ...\n f.seek(local_offset, os.SEEK_CUR)\n v = np.fromfile(f, dtype=dtype, count=count)\n v = v.reshape(grid.ActiveDimensions, order=\"F\")\n data[grid.id][field] = v\n local_offset = 0\n else:\n local_offset += size\n return data\n\n def _read_particle_coords(self, chunks, ptf):\n yield from self._read_particle_fields(chunks, ptf, None)\n\n def _read_particle_fields(self, chunks, ptf, selector):\n for chunk in chunks: # These should be organized by grid filename\n for g in chunk.objs:\n for ptype, field_list in sorted(ptf.items()):\n npart = g._pdata[ptype][\"NumberOfParticles\"]\n if npart == 0:\n continue\n\n fn = g._pdata[ptype][\"particle_filename\"]\n offset = g._pdata[ptype][\"offset\"]\n pheader = self.ds.index.particle_headers[ptype]\n\n with open(fn, \"rb\") as f:\n # read in the position fields for selection\n f.seek(offset + pheader.particle_int_dtype.itemsize * npart)\n rdata = np.fromfile(\n f, pheader.real_type, pheader.num_real * npart\n )\n x = np.asarray(rdata[0 :: pheader.num_real], dtype=np.float64)\n y = np.asarray(rdata[1 :: pheader.num_real], dtype=np.float64)\n if g.ds.dimensionality == 2:\n z = np.ones_like(y)\n z *= 0.5 * (g.LeftEdge[2] + g.RightEdge[2])\n else:\n z = np.asarray(\n rdata[2 :: pheader.num_real], dtype=np.float64\n )\n\n if selector is None:\n # This only ever happens if the call is made from\n # _read_particle_coords.\n yield ptype, (x, y, z)\n continue\n mask = selector.select_points(x, y, z, 0.0)\n if mask is None:\n continue\n for field in field_list:\n # handle the case that this is an integer field\n int_fnames = [\n fname for _, fname in pheader.known_int_fields\n ]\n if field in int_fnames:\n ind = int_fnames.index(field)\n f.seek(offset)\n idata = np.fromfile(\n f, pheader.int_type, pheader.num_int * npart\n )\n data = np.asarray(\n idata[ind :: pheader.num_int], dtype=np.float64\n )\n yield (ptype, field), data[mask].flatten()\n\n # handle case that this is a real field\n real_fnames = [\n fname for _, fname in pheader.known_real_fields\n ]\n if field in real_fnames:\n ind = real_fnames.index(field)\n data = np.asarray(\n rdata[ind :: pheader.num_real], dtype=np.float64\n )\n yield (ptype, field), data[mask].flatten()\n\n\nclass IOHandlerOrion(IOHandlerBoxlib):\n _dataset_type = \"orion_native\"\n\n _particle_filename = None\n\n @property\n def particle_filename(self):\n fn = self.ds.output_dir + \"/StarParticles\"\n if not os.path.exists(fn):\n fn = self.ds.output_dir + \"/SinkParticles\"\n self._particle_filename = fn\n return self._particle_filename\n\n _particle_field_index = None\n\n @property\n def particle_field_index(self):\n\n index = parse_orion_sinks(self.particle_filename)\n\n self._particle_field_index = index\n return self._particle_field_index\n\n def _read_particle_selection(self, chunks, selector, fields):\n rv = {}\n chunks = list(chunks)\n\n if isinstance(selector, GridSelector):\n\n if not (len(chunks) == len(chunks[0].objs) == 1):\n raise RuntimeError\n\n grid = chunks[0].objs[0]\n\n for ftype, fname in fields:\n rv[ftype, fname] = self._read_particles(grid, fname)\n\n return rv\n\n rv = {f: np.array([]) for f in fields}\n for chunk in chunks:\n for grid in chunk.objs:\n for ftype, fname in fields:\n data = self._read_particles(grid, fname)\n rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))\n return rv\n\n def _read_particles(self, grid, field):\n \"\"\"\n parses the Orion Star Particle text files\n\n \"\"\"\n\n particles = []\n\n if grid.NumberOfParticles == 0:\n return np.array(particles)\n\n def read(line, field):\n entry = line.strip().split(\" \")[self.particle_field_index[field]]\n return float(entry)\n\n try:\n lines = self._cached_lines\n for num in grid._particle_line_numbers:\n line = lines[num]\n particles.append(read(line, field))\n return np.array(particles)\n except AttributeError:\n fn = self.particle_filename\n with open(fn) as f:\n lines = f.readlines()\n self._cached_lines = lines\n for num in grid._particle_line_numbers:\n line = lines[num]\n particles.append(read(line, field))\n return np.array(particles)\n", "import numpy as np\n\nimport yt\n\n# Open a dataset from when there's a lot of sloshing going on.\n\nds = yt.load(\"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350\")\n\n# Define the components of the gravitational acceleration vector field by\n# taking the gradient of the gravitational potential\ngrad_fields = ds.add_gradient_fields((\"gas\", \"gravitational_potential\"))\n\n# We don't need to do the same for the pressure field because yt already\n# has pressure gradient fields. Now, define the \"degree of hydrostatic\n# equilibrium\" field.\n\n\ndef _hse(field, data):\n # Remember that g is the negative of the potential gradient\n gx = -data[(\"gas\", \"density\")] * data[(\"gas\", \"gravitational_potential_gradient_x\")]\n gy = -data[(\"gas\", \"density\")] * data[(\"gas\", \"gravitational_potential_gradient_y\")]\n gz = -data[(\"gas\", \"density\")] * data[(\"gas\", \"gravitational_potential_gradient_z\")]\n hx = data[(\"gas\", \"pressure_gradient_x\")] - gx\n hy = data[(\"gas\", \"pressure_gradient_y\")] - gy\n hz = data[(\"gas\", \"pressure_gradient_z\")] - gz\n h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))\n return h\n\n\nds.add_field(\n (\"gas\", \"HSE\"),\n function=_hse,\n units=\"\",\n take_log=False,\n display_name=\"Hydrostatic Equilibrium\",\n sampling_type=\"cell\",\n)\n\n# The gradient operator requires periodic boundaries. This dataset has\n# open boundary conditions.\nds.force_periodicity()\n\n# Take a slice through the center of the domain\nslc = yt.SlicePlot(ds, 2, [(\"gas\", \"density\"), (\"gas\", \"HSE\")], width=(1, \"Mpc\"))\n\nslc.save(\"hse\")\n", "import numpy as np\n\nfrom yt.funcs import mylog\nfrom yt.utilities.io_handler import BaseIOHandler\n\n\ndef field_dname(field_name):\n return f\"/tstt/elements/Hex8/tags/{field_name}\"\n\n\n# TODO all particle bits were removed\nclass IOHandlerMoabH5MHex8(BaseIOHandler):\n _dataset_type = \"moab_hex8\"\n\n def __init__(self, ds):\n super().__init__(ds)\n self._handle = ds._handle\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n assert len(chunks) == 1\n fhandle = self._handle\n rv = {}\n for field in fields:\n ftype, fname = field\n rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)\n ngrids = sum(len(chunk.objs) for chunk in chunks)\n mylog.debug(\n \"Reading %s cells of %s fields in %s blocks\",\n size,\n [fname for ft, fn in fields],\n ngrids,\n )\n for field in fields:\n ftype, fname = field\n ds = np.array(fhandle[field_dname(fname)][:], dtype=\"float64\")\n ind = 0\n for chunk in chunks:\n for g in chunk.objs:\n ind += g.select(selector, ds, rv[field], ind) # caches\n return rv\n\n\nclass IOHandlerMoabPyneHex8(BaseIOHandler):\n _dataset_type = \"moab_hex8_pyne\"\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n assert len(chunks) == 1\n rv = {}\n pyne_mesh = self.ds.pyne_mesh\n for field in fields:\n rv[field] = np.empty(size, dtype=\"float64\")\n ngrids = sum(len(chunk.objs) for chunk in chunks)\n mylog.debug(\n \"Reading %s cells of %s fields in %s blocks\",\n size,\n [fname for ftype, fname in fields],\n ngrids,\n )\n for field in fields:\n ftype, fname = field\n if pyne_mesh.structured:\n tag = pyne_mesh.mesh.tag_get_handle(\"idx\")\n hex_list = [ent for ent in pyne_mesh.structured_iterate_hex()]\n indices = pyne_mesh.mesh.tag_get_data(tag, hex_list).flatten()\n else:\n indices = slice(None)\n ds = np.asarray(getattr(pyne_mesh, fname)[indices], \"float64\")\n\n ind = 0\n for chunk in chunks:\n for g in chunk.objs:\n ind += g.select(selector, ds, rv[field], ind) # caches\n return rv\n", "import numpy as np\n\nfrom yt.data_objects.selection_objects.data_selection_objects import (\n YTSelectionContainer3D,\n)\nfrom yt.data_objects.static_output import Dataset\nfrom yt.utilities.lib import bounding_volume_hierarchy\nfrom yt.utilities.lib.image_samplers import (\n InterpolatedProjectionSampler,\n ProjectionSampler,\n VolumeRenderSampler,\n)\nfrom yt.utilities.on_demand_imports import NotAModule\n\ntry:\n from yt.utilities.lib.embree_mesh import mesh_traversal\n# Catch ValueError in case size of objects in Cython change\nexcept (ImportError, ValueError):\n mesh_traversal = NotAModule(\"pyembree\")\n\n\ndef data_source_or_all(data_source):\n if isinstance(data_source, Dataset):\n data_source = data_source.all_data()\n if not isinstance(data_source, (YTSelectionContainer3D, type(None))):\n raise RuntimeError(\n \"The data_source is not a valid 3D data container.\\n\"\n \"Expected an object of type YTSelectionContainer3D but received \"\n \"an object of type %s.\" % type(data_source)\n )\n return data_source\n\n\ndef new_mesh_sampler(camera, render_source, engine):\n params = ensure_code_unit_params(camera._get_sampler_params(render_source))\n args = (\n np.atleast_3d(params[\"vp_pos\"]),\n np.atleast_3d(params[\"vp_dir\"]),\n params[\"center\"],\n params[\"bounds\"],\n np.atleast_3d(params[\"image\"]).astype(\"float64\"),\n params[\"x_vec\"],\n params[\"y_vec\"],\n params[\"width\"],\n render_source.volume_method,\n )\n kwargs = {\"lens_type\": params[\"lens_type\"]}\n if engine == \"embree\":\n sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)\n elif engine == \"yt\":\n sampler = bounding_volume_hierarchy.BVHMeshSampler(*args, **kwargs)\n return sampler\n\n\ndef new_volume_render_sampler(camera, render_source):\n params = ensure_code_unit_params(camera._get_sampler_params(render_source))\n params.update(transfer_function=render_source.transfer_function)\n params.update(transfer_function=render_source.transfer_function)\n params.update(num_samples=render_source.num_samples)\n args = (\n np.atleast_3d(params[\"vp_pos\"]),\n np.atleast_3d(params[\"vp_dir\"]),\n params[\"center\"],\n params[\"bounds\"],\n params[\"image\"],\n params[\"x_vec\"],\n params[\"y_vec\"],\n params[\"width\"],\n render_source.volume_method,\n params[\"transfer_function\"],\n params[\"num_samples\"],\n )\n kwargs = {\n \"lens_type\": params[\"lens_type\"],\n }\n if \"camera_data\" in params:\n kwargs[\"camera_data\"] = params[\"camera_data\"]\n if render_source.zbuffer is not None:\n kwargs[\"zbuffer\"] = render_source.zbuffer.z\n args[4][:] = np.reshape(\n render_source.zbuffer.rgba[:],\n (camera.resolution[0], camera.resolution[1], 4),\n )\n else:\n kwargs[\"zbuffer\"] = np.ones(params[\"image\"].shape[:2], \"float64\")\n sampler = VolumeRenderSampler(*args, **kwargs)\n return sampler\n\n\ndef new_interpolated_projection_sampler(camera, render_source):\n params = ensure_code_unit_params(camera._get_sampler_params(render_source))\n params.update(transfer_function=render_source.transfer_function)\n params.update(num_samples=render_source.num_samples)\n args = (\n np.atleast_3d(params[\"vp_pos\"]),\n np.atleast_3d(params[\"vp_dir\"]),\n params[\"center\"],\n params[\"bounds\"],\n params[\"image\"],\n params[\"x_vec\"],\n params[\"y_vec\"],\n params[\"width\"],\n render_source.volume_method,\n params[\"num_samples\"],\n )\n kwargs = {\"lens_type\": params[\"lens_type\"]}\n if render_source.zbuffer is not None:\n kwargs[\"zbuffer\"] = render_source.zbuffer.z\n else:\n kwargs[\"zbuffer\"] = np.ones(params[\"image\"].shape[:2], \"float64\")\n sampler = InterpolatedProjectionSampler(*args, **kwargs)\n return sampler\n\n\ndef new_projection_sampler(camera, render_source):\n params = ensure_code_unit_params(camera._get_sampler_params(render_source))\n params.update(transfer_function=render_source.transfer_function)\n params.update(num_samples=render_source.num_samples)\n args = (\n np.atleast_3d(params[\"vp_pos\"]),\n np.atleast_3d(params[\"vp_dir\"]),\n params[\"center\"],\n params[\"bounds\"],\n params[\"image\"],\n params[\"x_vec\"],\n params[\"y_vec\"],\n params[\"width\"],\n render_source.volume_method,\n params[\"num_samples\"],\n )\n kwargs = {\n \"lens_type\": params[\"lens_type\"],\n }\n if render_source.zbuffer is not None:\n kwargs[\"zbuffer\"] = render_source.zbuffer.z\n else:\n kwargs[\"zbuffer\"] = np.ones(params[\"image\"].shape[:2], \"float64\")\n sampler = ProjectionSampler(*args, **kwargs)\n return sampler\n\n\ndef get_corners(le, re):\n return np.array(\n [\n [le[0], le[1], le[2]],\n [re[0], le[1], le[2]],\n [re[0], re[1], le[2]],\n [le[0], re[1], le[2]],\n [le[0], le[1], re[2]],\n [re[0], le[1], re[2]],\n [re[0], re[1], re[2]],\n [le[0], re[1], re[2]],\n ],\n dtype=\"float64\",\n )\n\n\ndef ensure_code_unit_params(params):\n for param_name in [\"center\", \"vp_pos\", \"vp_dir\", \"width\"]:\n param = params[param_name]\n if hasattr(param, \"in_units\"):\n params[param_name] = param.in_units(\"code_length\")\n bounds = params[\"bounds\"]\n if hasattr(bounds[0], \"units\"):\n params[\"bounds\"] = tuple(b.in_units(\"code_length\").d for b in bounds)\n return params\n", "from numpy.testing import assert_allclose, assert_array_less, assert_raises\n\nimport yt\nfrom yt.loaders import load\nfrom yt.testing import fake_random_ds, requires_file\nfrom yt.utilities.exceptions import YTBoundsDefinitionError\n\n\ndef test_cic_deposit():\n ds = fake_random_ds(64, nprocs=8, particles=64 ** 3)\n my_reg = ds.arbitrary_grid(\n ds.domain_left_edge, ds.domain_right_edge, dims=[1, 800, 800]\n )\n f = (\"deposit\", \"all_cic\")\n assert_raises(YTBoundsDefinitionError, my_reg.__getitem__, f)\n\n\nRAMSES = \"output_00080/info_00080.txt\"\nRAMSES_small = \"ramses_new_format/output_00002/info_00002.txt\"\nISOGAL = \"IsolatedGalaxy/galaxy0030/galaxy0030\"\n\n\n@requires_file(RAMSES)\ndef test_one_zone_octree_deposit():\n ds = load(RAMSES)\n\n # Get a sphere centred on the main halo\n hpos = ds.arr(\n [0.5215110772898429, 0.5215110772898429, 0.5215110772898429], \"code_length\"\n )\n hrvir = ds.quan(0.042307235300540924, \"Mpc\")\n\n sp = ds.sphere(hpos, hrvir * 10)\n assert sp[\"deposit\", \"io_cic\"].shape == (1,)\n\n\n@requires_file(RAMSES)\n@requires_file(ISOGAL)\ndef test_mesh_sampling():\n for fn in (RAMSES, ISOGAL):\n ds = yt.load(fn)\n ds.add_mesh_sampling_particle_field((\"index\", \"x\"), ptype=\"all\")\n ds.add_mesh_sampling_particle_field((\"index\", \"dx\"), ptype=\"all\")\n\n dx = ds.r[\"all\", \"cell_index_dx\"]\n xc = ds.r[\"all\", \"cell_index_x\"]\n xp = ds.r[\"all\", \"particle_position_x\"]\n\n dist = xp - xc\n\n assert_array_less(dist, dx)\n assert_array_less(-dist, dx)\n\n\n@requires_file(RAMSES)\n@requires_file(ISOGAL)\ndef test_mesh_sampling_for_filtered_particles():\n for fn in (RAMSES, ISOGAL):\n ds = yt.load(fn)\n\n @yt.particle_filter(requires=[\"particle_position_x\"], filtered_type=\"io\")\n def left(pfilter, data):\n return (\n data[(pfilter.filtered_type, \"particle_position_x\")].to(\"code_length\")\n < 0.5\n )\n\n ds.add_particle_filter(\"left\")\n\n for f in ((\"index\", \"x\"), (\"index\", \"dx\"), (\"gas\", \"density\")):\n ds.add_mesh_sampling_particle_field(f, ptype=\"io\")\n ds.add_mesh_sampling_particle_field(f, ptype=\"left\")\n\n data_sources = (ds.all_data(), ds.box([0] * 3, [0.1] * 3))\n\n def test_source(ptype, src):\n # Test accessing\n src[ptype, \"cell_index_x\"]\n src[ptype, \"cell_index_dx\"]\n src[ptype, \"cell_gas_density\"]\n\n for ptype in (\"io\", \"left\"):\n for src in data_sources:\n test_source(ptype, src)\n\n\n@requires_file(RAMSES)\ndef test_mesh_sampling_with_indexing():\n # Access with index caching\n ds = yt.load(RAMSES)\n ds.add_mesh_sampling_particle_field((\"gas\", \"density\"), ptype=\"all\")\n\n ad = ds.all_data()\n ad[\"all\", \"cell_index\"]\n v1 = ad[\"all\", \"cell_gas_density\"]\n\n # Access with no index caching\n ds = yt.load(RAMSES)\n ds.add_mesh_sampling_particle_field((\"gas\", \"density\"), ptype=\"all\")\n\n ad = ds.all_data()\n v2 = ad[\"all\", \"cell_gas_density\"]\n\n # Check same answer is returned\n assert_allclose(v1, v2)\n\n\n@requires_file(RAMSES_small)\ndef test_mesh_sampling_vs_field_value_at_point():\n all_ds = (fake_random_ds(ndims=3, particles=500), yt.load(RAMSES_small))\n\n for ds in all_ds:\n ds.add_mesh_sampling_particle_field((\"gas\", \"density\"), ptype=\"all\")\n\n val = ds.r[\"all\", \"cell_gas_density\"]\n ref = ds.find_field_values_at_points(\n (\"gas\", \"density\"), ds.r[\"all\", \"particle_position\"]\n )\n\n assert_allclose(val, ref)\n", "from collections import defaultdict\n\nimport numpy as np\n\nfrom yt.units.yt_array import YTArray\nfrom yt.utilities.io_handler import io_registry\n\nfrom .field_exceptions import NeedsGridType\n\nfp_units = {\n \"bulk_velocity\": \"cm/s\",\n \"center\": \"cm\",\n \"normal\": \"\",\n \"cp_x_vec\": \"\",\n \"cp_y_vec\": \"\",\n \"cp_z_vec\": \"\",\n \"x_hat\": \"\",\n \"y_hat\": \"\",\n \"z_hat\": \"\",\n \"omega_baryon\": \"\",\n \"virial_radius\": \"cm\",\n \"observer_redshift\": \"\",\n \"source_redshift\": \"\",\n}\n\n\nclass FieldDetector(defaultdict):\n Level = 1\n NumberOfParticles = 1\n _read_exception = None\n _id_offset = 0\n domain_id = 0\n\n def __init__(self, nd=16, ds=None, flat=False, field_parameters=None):\n self.nd = nd\n self.flat = flat\n self._spatial = not flat\n self.ActiveDimensions = [nd, nd, nd]\n self.shape = tuple(self.ActiveDimensions)\n self.size = np.prod(self.ActiveDimensions)\n self.LeftEdge = [0.0, 0.0, 0.0]\n self.RightEdge = [1.0, 1.0, 1.0]\n self.dds = np.ones(3, \"float64\")\n if field_parameters is None:\n self.field_parameters = {}\n else:\n self.field_parameters = field_parameters\n\n class fake_dataset(defaultdict):\n pass\n\n if ds is None:\n # required attrs\n ds = fake_dataset(lambda: 1)\n ds[\"Massarr\"] = np.ones(6)\n ds.current_redshift = 0.0\n ds.omega_lambda = 0.0\n ds.omega_matter = 0.0\n ds.cosmological_simulation = 0\n ds.gamma = 5.0 / 3.0\n ds.hubble_constant = 0.7\n ds.domain_left_edge = np.zeros(3, \"float64\")\n ds.domain_right_edge = np.ones(3, \"float64\")\n ds.dimensionality = 3\n ds.force_periodicity()\n self.ds = ds\n\n class fake_index:\n class fake_io:\n def _read_data_set(io_self, data, field):\n return self._read_data(field)\n\n _read_exception = RuntimeError\n\n io = fake_io()\n\n def get_smallest_dx(self):\n return 1.0\n\n self.index = fake_index()\n self.requested = []\n self.requested_parameters = []\n if not self.flat:\n defaultdict.__init__(\n self,\n lambda: np.ones((nd, nd, nd), dtype=\"float64\")\n + 1e-4 * np.random.random((nd, nd, nd)),\n )\n else:\n defaultdict.__init__(\n self,\n lambda: np.ones((nd * nd * nd), dtype=\"float64\")\n + 1e-4 * np.random.random(nd * nd * nd),\n )\n\n def _reshape_vals(self, arr):\n if not self._spatial:\n return arr\n if len(arr.shape) == 3:\n return arr\n return arr.reshape(self.ActiveDimensions, order=\"C\")\n\n def __missing__(self, item):\n if not isinstance(item, tuple):\n field = (\"unknown\", item)\n else:\n field = item\n finfo = self.ds._get_field_info(*field)\n params, permute_params = finfo._get_needed_parameters(self)\n self.field_parameters.update(params)\n # For those cases where we are guessing the field type, we will\n # need to re-update -- otherwise, our item will always not have the\n # field type. This can lead to, for instance, \"unknown\" particle\n # types not getting correctly identified.\n # Note that the *only* way this works is if we also fix our field\n # dependencies during checking. Bug #627 talks about this.\n item = self.ds._last_freq\n if finfo is not None and finfo._function.__name__ != \"NullFunc\":\n try:\n for param, param_v in permute_params.items():\n for v in param_v:\n self.field_parameters[param] = v\n vv = finfo(self)\n if not permute_params:\n vv = finfo(self)\n except NeedsGridType as exc:\n ngz = exc.ghost_zones\n nfd = FieldDetector(\n self.nd + ngz * 2,\n ds=self.ds,\n field_parameters=self.field_parameters.copy(),\n )\n nfd._num_ghost_zones = ngz\n vv = finfo(nfd)\n if ngz > 0:\n vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]\n for i in nfd.requested:\n if i not in self.requested:\n self.requested.append(i)\n for i in nfd.requested_parameters:\n if i not in self.requested_parameters:\n self.requested_parameters.append(i)\n if vv is not None:\n if not self.flat:\n self[item] = vv\n else:\n self[item] = vv.ravel()\n return self[item]\n elif finfo is not None and finfo.sampling_type == \"particle\":\n io = io_registry[self.ds.dataset_type](self.ds)\n if hasattr(io, \"_vector_fields\") and (\n item in io._vector_fields or item[1] in io._vector_fields\n ):\n try:\n cols = io._vector_fields[item]\n except KeyError:\n cols = io._vector_fields[item[1]]\n # A vector\n self[item] = YTArray(\n np.ones((self.NumberOfParticles, cols)),\n finfo.units,\n registry=self.ds.unit_registry,\n )\n else:\n # Not a vector\n self[item] = YTArray(\n np.ones(self.NumberOfParticles),\n finfo.units,\n registry=self.ds.unit_registry,\n )\n if item == (\"STAR\", \"BIRTH_TIME\"):\n # hack for the artio frontend so we pass valid times to\n # the artio functions for calculating physical times\n # from internal times\n self[item] *= -0.1\n self.requested.append(item)\n return self[item]\n self.requested.append(item)\n if item not in self:\n self[item] = self._read_data(item)\n return self[item]\n\n def _debug(self):\n # We allow this to pass through.\n return\n\n def deposit(self, *args, **kwargs):\n from yt.data_objects.static_output import ParticleDataset\n from yt.frontends.stream.data_structures import StreamParticlesDataset\n\n if kwargs[\"method\"] == \"mesh_id\":\n if isinstance(self.ds, (StreamParticlesDataset, ParticleDataset)):\n raise ValueError\n return np.random.random((self.nd, self.nd, self.nd))\n\n def mesh_sampling_particle_field(self, *args, **kwargs):\n pos = args[0]\n npart = len(pos)\n return np.random.rand(npart)\n\n def smooth(self, *args, **kwargs):\n tr = np.random.random((self.nd, self.nd, self.nd))\n if kwargs[\"method\"] == \"volume_weighted\":\n return [tr]\n\n return tr\n\n def particle_operation(self, *args, **kwargs):\n return None\n\n def _read_data(self, field_name):\n self.requested.append(field_name)\n finfo = self.ds._get_field_info(*field_name)\n if finfo.sampling_type == \"particle\":\n self.requested.append(field_name)\n return np.ones(self.NumberOfParticles)\n return YTArray(\n defaultdict.__missing__(self, field_name),\n units=finfo.units,\n registry=self.ds.unit_registry,\n )\n\n def get_field_parameter(self, param, default=0.0):\n if self.field_parameters and param in self.field_parameters:\n return self.field_parameters[param]\n self.requested_parameters.append(param)\n if param in [\"center\", \"normal\"] or param.startswith(\"bulk\"):\n if param == \"bulk_magnetic_field\":\n if self.ds.unit_system.has_current_mks:\n unit = \"T\"\n else:\n unit = \"G\"\n else:\n unit = fp_units[param]\n return self.ds.arr(np.random.random(3) * 1e-2, unit)\n elif param in [\"surface_height\"]:\n return self.ds.quan(0.0, \"code_length\")\n elif param in [\"axis\"]:\n return 0\n elif param.startswith(\"cp_\"):\n ax = param[3]\n rv = self.ds.arr((0.0, 0.0, 0.0), fp_units[param])\n rv[\"xyz\".index(ax)] = 1.0\n return rv\n elif param.endswith(\"_hat\"):\n ax = param[0]\n rv = YTArray((0.0, 0.0, 0.0), fp_units[param])\n rv[\"xyz\".index(ax)] = 1.0\n return rv\n elif param == \"fof_groups\":\n return None\n elif param == \"mu\":\n return 1.0\n else:\n return default\n\n _num_ghost_zones = 0\n id = 1\n\n def apply_units(self, arr, units):\n return self.ds.arr(arr, units=units)\n\n def has_field_parameter(self, param):\n return param in self.field_parameters\n\n @property\n def fcoords(self):\n fc = np.array(\n np.mgrid[0 : 1 : self.nd * 1j, 0 : 1 : self.nd * 1j, 0 : 1 : self.nd * 1j]\n )\n if self.flat:\n fc.shape = (self.nd * self.nd * self.nd, 3)\n else:\n fc = fc.transpose()\n return self.ds.arr(fc, units=\"code_length\")\n\n @property\n def fcoords_vertex(self):\n fc = np.random.random((self.nd, self.nd, self.nd, 8, 3))\n if self.flat:\n fc.shape = (self.nd * self.nd * self.nd, 8, 3)\n return self.ds.arr(fc, units=\"code_length\")\n\n @property\n def icoords(self):\n ic = np.mgrid[\n 0 : self.nd - 1 : self.nd * 1j,\n 0 : self.nd - 1 : self.nd * 1j,\n 0 : self.nd - 1 : self.nd * 1j,\n ]\n if self.flat:\n ic.shape = (self.nd * self.nd * self.nd, 3)\n else:\n ic = ic.transpose()\n return ic\n\n @property\n def ires(self):\n ir = np.ones(self.nd ** 3, dtype=\"int64\")\n if not self.flat:\n ir.shape = (self.nd, self.nd, self.nd)\n return ir\n\n @property\n def fwidth(self):\n fw = np.ones((self.nd ** 3, 3), dtype=\"float64\") / self.nd\n if not self.flat:\n fw.shape = (self.nd, self.nd, self.nd, 3)\n return self.ds.arr(fw, units=\"code_length\")\n", "r\"\"\"Routines for tracking the scaling of the triangulation routines.\"\"\"\nimport cProfile\nimport os\nimport pstats\nimport time\n\nimport numpy as np\n\nfrom yt.utilities.lib.cykdtree.tests import run_test\n\n\ndef stats_run(\n npart,\n nproc,\n ndim,\n periodic=False,\n overwrite=False,\n display=False,\n suppress_final_output=False,\n):\n r\"\"\"Get timing stats using :package:`cProfile`.\n\n Args:\n npart (int): Number of particles.\n nproc (int): Number of processors.\n ndim (int): Number of dimensions.\n periodic (bool, optional): If True, the domain is assumed to be\n periodic. Defaults to False.\n overwrite (bool, optional): If True, the existing file for this\n set of input parameters if overwritten. Defaults to False.\n suppress_final_output (bool, optional): If True, the final output\n from spawned MPI processes is suppressed. This is mainly for\n timing purposes. Defaults to False.\n display (bool, optional): If True, display the profile results.\n Defaults to False.\n\n \"\"\"\n perstr = \"\"\n outstr = \"\"\n if periodic:\n perstr = \"_periodic\"\n if suppress_final_output:\n outstr = \"_noout\"\n fname_stat = f\"stat_{npart}part_{nproc}proc_{ndim}dim{perstr}{outstr}.txt\"\n if overwrite or not os.path.isfile(fname_stat):\n cProfile.run(\n \"from yt.utilities.lib.cykdtree.tests import run_test; \"\n + f\"run_test({npart}, {ndim}, nproc={nproc}, \"\n + f\"periodic={periodic}, \"\n + f\"suppress_final_output={suppress_final_output})\",\n fname_stat,\n )\n if display:\n p = pstats.Stats(fname_stat)\n p.sort_stats(\"time\").print_stats(10)\n return p\n return fname_stat\n\n\ndef time_run(\n npart, nproc, ndim, nrep=1, periodic=False, leafsize=10, suppress_final_output=False\n):\n r\"\"\"Get runing times using :package:`time`.\n\n Args:\n npart (int): Number of particles.\n nproc (int): Number of processors.\n ndim (int): Number of dimensions.\n nrep (int, optional): Number of times the run should be performed to\n get an average. Defaults to 1.\n periodic (bool, optional): If True, the domain is assumed to be\n periodic. Defaults to False.\n leafsize (int, optional): The maximum number of points that should be\n in any leaf in the tree. Defaults to 10.\n suppress_final_output (bool, optional): If True, the final output\n from spawned MPI processes is suppressed. This is mainly for\n timing purposes. Defaults to False.\n\n \"\"\"\n times = np.empty(nrep, \"float\")\n for i in range(nrep):\n t1 = time.time()\n run_test(\n npart,\n ndim,\n nproc=nproc,\n periodic=periodic,\n leafsize=leafsize,\n suppress_final_output=suppress_final_output,\n )\n t2 = time.time()\n times[i] = t2 - t1\n return np.mean(times), np.std(times)\n\n\ndef strong_scaling(\n npart=1e6,\n nrep=1,\n periodic=False,\n leafsize=10,\n overwrite=True,\n suppress_final_output=False,\n):\n r\"\"\"Plot the scaling with number of processors for a particular function.\n\n Args:\n npart (int, optional): Number of particles. Defaults to 1e6.\n nrep (int, optional): Number of times the run should be performed to\n get an average. Defaults to 1.\n periodic (bool, optional): If True, the domain is assumed to be\n periodic. Defaults to False.\n leafsize (int, optional): The maximum number of points that should be\n in any leaf in the tree. Defaults to 10.\n overwrite (bool, optional): If True, the existing file for this\n set of input parameters if overwritten. Defaults to False.\n suppress_final_output (bool, optional): If True, the final output\n from spawned MPI processes is suppressed. This is mainly for\n timing purposes. Defaults to False.\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n npart = int(npart)\n perstr = \"\"\n outstr = \"\"\n if periodic:\n perstr = \"_periodic\"\n if suppress_final_output:\n outstr = \"_noout\"\n fname_plot = \"plot_strong_scaling_nproc_{}part{}_{}leafsize{}.png\".format(\n npart, perstr, leafsize, outstr\n )\n nproc_list = [1, 2, 4, 8] # , 16]\n ndim_list = [2, 3, 4]\n clr_list = [\"b\", \"r\", \"g\", \"m\"]\n times = np.empty((len(nproc_list), len(ndim_list), 2), \"float\")\n for j, nproc in enumerate(nproc_list):\n for i, ndim in enumerate(ndim_list):\n times[j, i, 0], times[j, i, 1] = time_run(\n npart,\n nproc,\n ndim,\n nrep=nrep,\n periodic=periodic,\n leafsize=leafsize,\n suppress_final_output=suppress_final_output,\n )\n print(f\"Finished {ndim}D on {nproc}.\")\n fig, axs = plt.subplots(1, 1)\n for i in range(len(ndim_list)):\n ndim = ndim_list[i]\n clr = clr_list[i]\n axs.errorbar(\n nproc_list,\n times[:, i, 0],\n yerr=times[:, i, 1],\n fmt=clr,\n label=f\"ndim = {ndim}\",\n )\n axs.set_xlabel(\"# of Processors\")\n axs.set_ylabel(\"Time (s)\")\n axs.legend()\n fig.savefig(fname_plot)\n print(\" \" + fname_plot)\n\n\ndef weak_scaling(\n npart=1e4,\n nrep=1,\n periodic=False,\n leafsize=10,\n overwrite=True,\n suppress_final_output=False,\n):\n r\"\"\"Plot the scaling with number of processors with a constant number of\n particles per processor for a particular function.\n\n Args:\n npart (int, optional): Number of particles per processor. Defaults to\n 1e4.\n nrep (int, optional): Number of times the run should be performed to\n get an average. Defaults to 1.\n periodic (bool, optional): If True, the domain is assumed to be\n periodic. Defaults to False.\n leafsize (int, optional): The maximum number of points that should be\n in any leaf in the tree. Defaults to 10.\n overwrite (bool, optional): If True, the existing file for this\n set of input parameters if overwritten. Defaults to False.\n suppress_final_output (bool, optional): If True, the final output\n from spawned MPI processes is suppressed. This is mainly for\n timing purposes. Defaults to False.\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n npart = int(npart)\n perstr = \"\"\n outstr = \"\"\n if periodic:\n perstr = \"_periodic\"\n if suppress_final_output:\n outstr = \"_noout\"\n fname_plot = \"plot_weak_scaling_nproc_{}part{}_{}leafsize{}.png\".format(\n npart, perstr, leafsize, outstr\n )\n nproc_list = [1, 2, 4, 8, 16]\n ndim_list = [2, 3]\n clr_list = [\"b\", \"r\", \"g\", \"m\"]\n times = np.empty((len(nproc_list), len(ndim_list), 2), \"float\")\n for j, nproc in enumerate(nproc_list):\n for i, ndim in enumerate(ndim_list):\n times[j, i, 0], times[j, i, 1] = time_run(\n npart * nproc,\n nproc,\n ndim,\n nrep=nrep,\n periodic=periodic,\n leafsize=leafsize,\n suppress_final_output=suppress_final_output,\n )\n fig, axs = plt.subplots(1, 1)\n for i in range(len(ndim_list)):\n ndim = ndim_list[i]\n clr = clr_list[i]\n axs.errorbar(\n nproc_list,\n times[:, i, 0],\n yerr=times[:, i, 1],\n fmt=clr,\n label=f\"ndim = {ndim}\",\n )\n axs.set_xlabel(\"# of Processors\")\n axs.set_ylabel(\"Time (s)\")\n axs.legend()\n fig.savefig(fname_plot)\n print(\" \" + fname_plot)\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.clip" ], [ "numpy.product", "numpy.fromfile", "numpy.ones_like", "numpy.asarray", "numpy.concatenate", "numpy.array", "numpy.empty" ], [ "numpy.sqrt" ], [ "numpy.empty" ], [ "numpy.reshape", "numpy.atleast_3d", "numpy.array", "numpy.ones" ], [ "numpy.testing.assert_array_less", "numpy.testing.assert_raises", "numpy.testing.assert_allclose" ], [ "numpy.random.random", "numpy.ones", "numpy.random.rand", "numpy.prod", "numpy.array", "numpy.zeros" ], [ "numpy.std", "numpy.mean", "matplotlib.pyplot.subplots", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
d813s909q/tensortflow
[ "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5", "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5", "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5", "ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5" ]
[ "tensorflow/python/ops/while_v2.py", "tensorflow/python/data/kernel_tests/multi_device_iterator_test.py", "tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py", "tensorflow/python/saved_model/save_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"while_v2 and gradient.\n\nThis is a version of while_loop that emits a single While op, as well as the\ngradient function for While ops produced by while_loop. This will eventually\nreplace the current tf.while_loop implementation once it reaches feature and\nperformance parity.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import function_def_to_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_util_v2 as util\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import list_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n# pylint: disable=protected-access\n\n# TODO(b/79881896): Handle external control dependencies. tf.while_loop allows\n# control dependencies on external nodes with at least 1 output.\n# Another idea is to create const nodes outside the loop and add control edges\n# to them and then pass those in as data inputs. This should probably be\n# handled in the CapturingGraph itself.\n\n\ndef while_loop(cond,\n body,\n loop_vars,\n shape_invariants=None,\n maximum_iterations=None,\n name=None,\n return_same_structure=True):\n \"\"\"Like tf.while_loop, except emits a single While op.\"\"\"\n maximum_iterations = _validate_and_convert_to_tensor(maximum_iterations)\n # Keep the original loop_vars around to know which args were TensorArrays.\n orig_loop_vars = loop_vars\n # Cache its length since we use it at multiple places below.\n len_orig_loop_vars = len(orig_loop_vars)\n\n # Convert TensorArrays to their flow variables. These get converted back to\n # TensorArrays before calling `cond` and `body`. See `wrapped_cond` and\n # `wrapped_body` below.\n loop_vars = list(_tensor_array_to_flow(orig_loop_vars))\n loop_vars = nest.map_structure(\n ops.internal_convert_to_tensor_or_indexed_slices, loop_vars)\n if shape_invariants is not None:\n nest.assert_same_structure(orig_loop_vars, shape_invariants)\n else:\n shape_invariants = nest.map_structure(lambda t: t.shape, loop_vars)\n\n if not name:\n name = \"while\"\n\n with ops.name_scope(name) as scope:\n with ops.name_scope(None):\n cond_name = util.unique_fn_name(scope, \"cond\")\n body_name = util.unique_fn_name(scope, \"body\")\n\n loop_counter = constant_op.constant(\n 0,\n dtype=maximum_iterations.dtype\n if maximum_iterations is not None else None,\n name=\"loop_counter\")\n # Add loop counter needed for computing gradients.\n loop_vars = [loop_counter] + loop_vars\n\n shape_invariants = type(shape_invariants)([tensor_shape.scalar()\n ]) + shape_invariants\n\n # Automatic control dependencies are added in defuns, but not in v1\n # graphs. Propagate that behavior here.\n add_control_dependencies = util.in_defun()\n\n # Build a `cond` wrapper that can handle the extra counter loop_var.\n def wrapped_cond(loop_counter, *args):\n # Convert the flow variables in `args` to TensorArrays. `args` should\n # already have the same structure as `orig_loop_vars` but currently there\n # is no nest.zip so we call `_pack_sequence_as` which flattens both\n # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays\n # and packs it into the structure of `orig_loop_vars`.\n if maximum_iterations is None:\n return cond(*_pack_sequence_as(orig_loop_vars, args))\n else:\n return math_ops.logical_and(\n loop_counter < maximum_iterations,\n cond(*_pack_sequence_as(orig_loop_vars, args)))\n\n cond_graph = func_graph_module.func_graph_from_py_func(\n cond_name,\n wrapped_cond,\n loop_vars, {},\n signature=_build_signature(loop_vars, shape_invariants),\n func_graph=util.WhileCondFuncGraph(cond_name),\n add_control_dependencies=add_control_dependencies)\n\n # Add external_captures of cond to the list of loop vars.\n # Note that external tensors will be treated as loop invariants, i.e.,\n # the value of that tensor in each iteration is the same as it was at the\n # beginning of the loop execution.\n loop_vars = loop_vars + cond_graph.external_captures\n shape_invariants = shape_invariants + type(shape_invariants)(\n [t.shape for t in cond_graph.external_captures])\n\n def wrapped_body(loop_counter, *args):\n \"\"\"Loop body augmented with counter update.\n\n Args:\n loop_counter: Loop counter which needs to be incremented in the body.\n *args: List of args\n args[:len_orig_loop_vars] - Args for the original loop body.\n args[len_orig_loop_vars:] - External captures of cond. These get\n passed through as is.\n\n Returns:\n A list of tensors the same length as args.\n \"\"\"\n # Convert the flow variables in `args` to TensorArrays. `args` should\n # already have the same structure as `orig_loop_vars` but currently there\n # is no nest.zip so we call `_pack_sequence_as` which flattens both\n # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays\n # and packs it into the structure of `orig_loop_vars`.\n outputs = body(\n *_pack_sequence_as(orig_loop_vars, args[:len_orig_loop_vars]))\n if not nest.is_sequence(outputs):\n outputs = [outputs]\n # Compare the structure of input and output of body converting the\n # top-level tuples to list to be compatible with legacy while_loop.\n nest.assert_same_structure(list(outputs), list(orig_loop_vars))\n\n outputs = _tensor_array_to_flow(outputs)\n\n # Return the external_captures of cond_graph as is, i.e., treat them as\n # loop invariants.\n # TODO(srbs): Update lowering code to create _Enter nodes with\n # is_constant=True for inputs that are directly passed to outputs.\n return [loop_counter + 1] + list(outputs) + list(\n args[len_orig_loop_vars:])\n\n body_graph = func_graph_module.func_graph_from_py_func(\n body_name,\n wrapped_body,\n loop_vars, {},\n signature=_build_signature(loop_vars, shape_invariants),\n func_graph=util.WhileBodyFuncGraph(body_name),\n add_control_dependencies=add_control_dependencies)\n # Add external captures of body to the list of loop vars.\n # Note that external tensors will be treated as loop invariants, i.e.,\n # the value of that tensor in each iteration is the same as it was at the\n # beginning of the loop execution.\n loop_vars = loop_vars + body_graph.external_captures\n # TODO(srbs): Update lowering code to create _Enter nodes with\n # is_constant=True for inputs that are directly passed to outputs.\n body_graph.outputs.extend(body_graph.internal_captures)\n\n # Capture `external_captures` of `body_graph` in `cond_graph` so that it\n # expects to receive those as arguments.\n # TODO(b/118457764): Dedup tensors that are captured in both the cond and\n # body. This logic already exists in cond_v2.\n with cond_graph.as_default():\n for external_capture in body_graph.external_captures:\n assert external_capture not in cond_graph.captures, (\n \"Looks like both cond and body are capturing the same tensor %s. \"\n \"This is not supported yet. For now consider passing,\"\n \" this as a loop variable.\" % str(external_capture))\n cond_graph.capture(external_capture)\n\n # Make sure that the shapes of the loop outputs are compatible with the\n # shape invariants, or the shapes of the loop vars if the invariants are not\n # specified.\n num_flattened_outputs = len(nest.flatten(orig_loop_vars))\n _check_shapes_compat(\n body_graph.outputs[1:1 + num_flattened_outputs],\n nest.flatten(shape_invariants[1:1 + len_orig_loop_vars]),\n nest.flatten(loop_vars[1:1 + len_orig_loop_vars]))\n flattened_loop_vars = nest.flatten(loop_vars)\n _check_num_inputs_outputs(cond_graph, body_graph,\n len(flattened_loop_vars))\n\n outputs = gen_functional_ops._while(\n flattened_loop_vars,\n util.create_new_tf_function(cond_graph),\n util.create_new_tf_function(body_graph),\n output_shapes=[t.shape for t in body_graph.outputs],\n name=scope)\n\n _copy_handle_data(body_graph.outputs, outputs)\n util.maybe_set_lowering_attr(outputs[0].op)\n _maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)\n\n # Return identities for each output of the While op, rather than the output\n # of the While op directly. This makes pruning work if the output of\n # while_loop() is fetched: the lowering pass converts the While outputs into\n # IdentityN outputs, which if fetched will cause all ops in the body to be\n # run (since it takes all exit ops as input). After lowering, each output\n # identity op will end up with only the appropriate exit op as input.\n outputs = tuple(array_ops.identity(t) for t in outputs)\n\n # First var is loop counter.\n outputs = _pack_sequence_as(orig_loop_vars,\n outputs[1:1 + num_flattened_outputs])\n\n if return_same_structure:\n return outputs\n\n flattened_outputs = nest.flatten(outputs)\n if len(flattened_outputs) == 1:\n return flattened_outputs[0]\n else:\n return outputs\n\n\[email protected](\"While\")\ndef _WhileGrad(op, *grads): # pylint: disable=invalid-name\n \"\"\"The gradient of a While op produced by while_loop.\"\"\"\n cond_graph = _get_graph(op, \"cond\")\n body_graph = _get_graph(op, \"body\")\n orig_num_params = len(body_graph.outputs)\n\n maximum_iterations = op.get_attr(\n \"_maximum_iterations\") if _is_in_xla_context() else None\n assert not _is_in_xla_context() or maximum_iterations is not None\n\n # Set the incoming gradient of non-trainable inputs to None. It is possible\n # that we receive non-None gradients for non-trainable types in nested while\n # loops because we accumulate outputs of the inner while as variant tensors\n # which are trainable and hence receive zeros_like tensors in the gradient\n # pass. The non-trainable tensors then receive the popped zeros tensor from\n # this zeros variant. The gradient for the loop vars corresponding to these\n # tensors is None or zeros (this happens only if the loop var is accumulated\n # as well) in _grad_fn so we reset these.\n # TODO(b/118712257): Remove the IsTrainable filter once we can handle None\n # output grads in _grad_fn.\n grads = [\n None if not _is_trainable(output) else grad\n for grad, output in zip(grads, body_graph.outputs)\n ]\n\n # Ensure that all non-resource trainable outputs have incoming gradients.\n assert all(g is not None or o.dtype == dtypes.resource or not _is_trainable(o)\n for o, g in zip(body_graph.outputs, grads)\n ), \"All trainable loop vars must receive incoming gradients.\"\n # We compute the gradient for the sub-graph between trainable ys and xs\n # with non-None incoming gradients. We later pad the None's to the list of\n # outputs.\n ys, xs, non_none_grads = zip(*[(y, x, grad) for (y, x, grad) in zip(\n body_graph.outputs, body_graph.inputs, grads) if grad is not None])\n\n body_grad_graph, args = _create_grad_func(\n ys, xs, non_none_grads, cond_graph, body_graph,\n util.unique_grad_fn_name(body_graph.name), op, maximum_iterations)\n\n if body_grad_graph.while_op_needs_rewrite:\n # Modify 'op' to output the intermediate accumulators needed by the grad\n # function.\n # NOTE(skyewm): if there are any active sessions, this modification to `op`\n # may make them unrunnable!\n\n cond_graph.name += \"_rewritten\"\n body_graph.name += \"_rewritten\"\n\n new_inputs = body_grad_graph.empty_tensor_lists\n new_outputs = body_graph.outputs[orig_num_params:]\n\n op._set_func_attr(\"cond\", util.create_new_tf_function(cond_graph))\n op._set_func_attr(\"body\", util.create_new_tf_function(body_graph))\n op._set_type_list_attr(\"T\", body_graph.output_types)\n op._set_shape_list_attr(\"output_shapes\", body_graph.output_shapes)\n op._add_while_inputs(new_inputs)\n op._add_outputs([t.dtype for t in new_outputs],\n [t.shape for t in new_outputs])\n _copy_handle_data(new_outputs, op.outputs[orig_num_params:])\n\n captured_inputs = _resolve_grad_captures(body_graph, body_grad_graph, op)\n loop_vars = args + captured_inputs\n\n def grad_cond(counter, max_iters, *unused_args):\n return counter < max_iters\n\n grad_cond_name = util.unique_grad_fn_name(op.get_attr(\"cond\").name)\n cond_grad_graph = func_graph_module.func_graph_from_py_func(\n grad_cond_name, grad_cond, loop_vars, {},\n func_graph=util.WhileCondFuncGraph(grad_cond_name))\n\n _check_num_inputs_outputs(cond_grad_graph, body_grad_graph, len(loop_vars))\n\n outputs = gen_functional_ops._while(\n loop_vars,\n util.create_new_tf_function(cond_grad_graph),\n util.create_new_tf_function(body_grad_graph),\n output_shapes=[t.shape for t in body_grad_graph.outputs],\n name=\"%s_grad\" % op.name)\n\n _copy_handle_data(body_grad_graph.outputs, outputs)\n util.maybe_set_lowering_attr(outputs[0].op)\n _maybe_set_maximum_iterations_attr(outputs[0].op, maximum_iterations)\n\n # See comment in while_loop.\n outputs = [array_ops.identity(t) for t in outputs]\n\n # Set None as the output gradient for tensors with None input gradient.\n # outputs[0] is the loop counter.\n # outputs[1] is the total number of loop iterations.\n index = 2\n none_padded_outputs = []\n for g in grads:\n if g is None:\n none_padded_outputs.append(None)\n else:\n none_padded_outputs.append(outputs[index])\n index += 1\n return none_padded_outputs\n\n\ndef _is_trainable(tensor):\n \"\"\"Returns whether the given tensor is trainable.\"\"\"\n if not gradients_impl.IsTrainable(tensor):\n return False\n\n # Special case: untrainable accumulator output. The gradients algorithm\n # doesn't know about tensor lists of untrainable elements. In theory the\n # tensor list gradient functions should return None as appropriate, but\n # because we can't return None from the gradient function we filter out\n # untrainable accumulator output here to avoid computing the gradient at all.\n if tensor.op.type == \"TensorListPopBack\" and tensor.value_index == 0:\n assert tensor.dtype == dtypes.variant\n element_type = tensor.op.get_attr(\"element_dtype\")\n return gradients_impl.IsTrainable(element_type)\n\n return True\n\n\ndef _validate_and_convert_to_tensor(maximum_iterations):\n \"\"\"Checks that `maximum_iterations` is valid.\n\n In XLA context, `maximum_iterations` is required and must be statically\n inferable, e.g. output tensor of a Const node.\n\n Args:\n maximum_iterations: The maximum_iterations passed to while_loop.\n\n Returns:\n A scalar valued tensor of type int32 or None.\n\n Raises:\n ValueError: If `maximum_iterations` is invalid.\n \"\"\"\n if _is_in_xla_context():\n if maximum_iterations is None:\n raise ValueError(\"maximum_iterations is None. It is required and must \"\n \"be statically known (e.g. a constant value or known \"\n \"shape dimension) when building while_loop in XLA \"\n \"context.\")\n if isinstance(maximum_iterations, ops.Tensor):\n # Get the constant value from the `maximum_iterations` tensor to avoid\n # capturing a Const tensor from outside this graph.\n maximum_iterations = tensor_util.constant_value(maximum_iterations)\n if maximum_iterations is None:\n raise ValueError(\"maximum_iterations must be statically known (e.g. a \"\n \"constant value or known shape dimension) when \"\n \"building while_loop in XLA context.\")\n\n if maximum_iterations is not None:\n # EmptyTensorList expects `max_num_elements` to be of type int32.\n maximum_iterations = ops.convert_to_tensor(\n maximum_iterations, dtype=dtypes.int32, name=\"maximum_iterations\")\n if maximum_iterations.shape.ndims != 0:\n raise ValueError(\"maximum_iterations must be a scalar, saw shape: %s\" %\n maximum_iterations.shape)\n return maximum_iterations\n\n\n# TODO(srbs): Pull this into common utils for cond_v2 and while_v2.\ndef _get_graph(while_op, func_attr_name):\n \"\"\"Returns `FuncGraph` for the given function attribute.\n\n Args:\n while_op: The While Operation.\n func_attr_name: string\n\n Returns:\n `FuncGraph`\n \"\"\"\n # TODO(srbs): Handle TensorShapeProto in function_def_to_graph.input_shapes.\n input_shapes = [\n tensor_shape.TensorShape(s) for s in while_op.get_attr(\"output_shapes\")\n ]\n func_name = while_op.get_attr(func_attr_name).name\n fdef = while_op.graph._get_function(func_name).definition\n # `while_op.graph` may not be the same as `ops.get_default_graph()` e.g.\n # if the `while_op` is in the body of another if/while/defun. We build the\n # `func_graph` with `while_op.graph` as its `outer_graph`. This resembles how\n # the `FuncGraph` was built in the forward pass. We need this so that we can\n # appropriately capture references to outer tensors in the nested grad graphs.\n with while_op.graph.as_default():\n func_graph = function_def_to_graph.function_def_to_graph(fdef, input_shapes)\n func_graph._while = while_op\n return func_graph\n\n\ndef _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op,\n max_iters):\n \"\"\"Builds and returns the gradient FuncGraph of `func_graph` and its args.\n\n The returned grad_func_graph must be called with the returned\n args + grad_func_graph.captures.\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n grads: The incoming grads for `ys`.\n cond_graph: FuncGraph for the forward cond function.\n body_graph: FuncGraph for the forward body function.\n name: Name of the returned gradient function.\n while_op: The forward While op.\n max_iters: the maximum number of iterations, or None if no limit.\n\n Returns:\n 2-tuple of (grad_func_graph, args).\n \"\"\"\n assert len(ys) == len(grads)\n\n total_iters = while_op.outputs[0]\n counter = constant_op.constant(\n 0, dtype=total_iters.dtype, name=\"grad_counter\")\n\n args = [counter, total_iters] + list(grads)\n # Note: The returned function does not have `args` in the list of\n # `external_captures`.\n grad_func_graph = func_graph_module.func_graph_from_py_func(\n name,\n lambda *args: _grad_fn(ys, xs, args, body_graph),\n args, {},\n func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph,\n max_iters))\n\n # Add the popped accumulators to the list of outputs.\n for internal_capture in grad_func_graph.internal_captures:\n if internal_capture in grad_func_graph.popped_tensor_lists:\n grad_func_graph.outputs.append(\n grad_func_graph.popped_tensor_lists[internal_capture])\n elif internal_capture.dtype == dtypes.resource:\n grad_func_graph.outputs.append(internal_capture)\n else:\n raise ValueError(\"Tensor %s is in list of internal_captures but is\"\n \" neither a resource nor is in popped_tensor_lists.\" %\n str(internal_capture))\n\n return grad_func_graph, args\n\n\ndef _grad_fn(ys, xs, args, func_graph):\n \"\"\"Computes the gradient of `func_graph` in the current graph.\n\n This function builds the gradient graph of the corresponding forward-pass\n `func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n args: The input arguments.\n args[0] - Loop counter\n args[1] - Total number of iterations.\n args[2:] - Incoming gradients for `ys`.\n func_graph: function.FuncGraph. The corresponding forward-pass function.\n\n Returns:\n The output gradient Tensors.\n \"\"\"\n grad_ys = args[2:]\n\n # Build the gradient graph. Note that this builds the gradient computation of\n # func_graph in the current graph, which requires capturing tensors from\n # func_graph. The captured func_graph tensors are resolved to external tensors\n # after the forward While op has been rewritten in _resolve_grad_captures.\n # TODO(srbs): Mark GradientsHelper as public?\n grad_outs = gradients_impl._GradientsHelper(\n ys, xs, grad_ys=grad_ys, src_graph=func_graph,\n unconnected_gradients=\"zero\")\n\n # TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there\n # is a tf.StopGradient in the loop body.\n assert all(g is not None for g in grad_outs)\n counter = args[0]\n total_iters = args[1]\n return [counter + 1, total_iters] + grad_outs\n\n\ndef _resolve_grad_captures(body_graph, body_grad_graph, while_op):\n \"\"\"Returns the tensors to pass as captured inputs to `body_grad_graph`.\n\n `body_grad_graph` may have external references to:\n 1. Its outer graph containing the input gradients. These are left as-is.\n 2. Accumulators captured from the forward-pass graph. These should have been\n added as `while_op` outputs after the gradient graph was built. We replace\n these with the corresponding output of `while_op`, i.e. a tensor in\n `body_graph.outer_graph`. In the case of nested control flow or functions,\n the gradient logic handling `body_grad_graph.outer_graph` will make sure\n the tensor from `body_graph.outer_graph` is also correctly captured.\n\n Args:\n body_graph: FuncGraph. The forward-pass body function.\n body_grad_graph: FuncGraph. The body gradients function.\n while_op: The forward-pass While Operation calling `body_graph`.\n\n Returns:\n A list of input tensors to be passed as the captured inputs to\n `body_grad_graph`.\n \"\"\"\n new_capture_inputs = []\n for t in body_grad_graph.external_captures:\n # All values captured by gradient computation should be from the forward\n # graph or a captured resource variable (note that input gradients are\n # regular non-captured inputs).\n if t.graph == body_graph:\n # Captured accumulator\n t = while_op.outputs[t.graph.outputs.index(t)]\n # Note: We rely on the capturing logic of the gradient While op graph to\n # correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2\n # and while_v2 handle this while building their gradient functions.\n assert t.graph == body_graph.outer_graph\n else:\n # Captured resource variable\n assert t.dtype == dtypes.resource\n\n new_capture_inputs.append(t)\n return new_capture_inputs\n\n\ndef _get_accumulator(tensor):\n r\"\"\"Returns TensorList if any containing accumulated values of tensor.\n\n We try to find a pattern of the form:\n\n input_tl tensor\n \\ /\n (TensorListPushBack)\n |\n output_tl\n\n which satisfies the following conditions:\n\n 1. input_tl must be in tensor.graph.inputs.\n 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.\n 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).\n\n output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is\n returned if such a pattern is found else None is returned.\n\n Args:\n tensor: The Tensor to be accumulated.\n\n Returns:\n A variant tensor in the same graph as `tensor` or None if no accumulator is\n found.\n \"\"\"\n assert isinstance(tensor.graph, func_graph_module.FuncGraph)\n\n def get_func_graph_output(t):\n \"\"\"Returns t or Identity(t) whichever exists in graph outputs else None.\"\"\"\n if t in tensor.graph.outputs:\n return t\n # tf.defun adds an Identity for each output, check whether that is the case.\n identity_op = t.consumers()[0]\n if (identity_op.type == \"Identity\" and\n identity_op.outputs[0] in tensor.graph.outputs):\n return identity_op.outputs[0]\n return None\n\n for consumer in tensor.consumers():\n # Find the consumer that is a TensorListPushBack node whose TensorList input\n # is in the list of function inputs.\n if (consumer.type != \"TensorListPushBack\" or\n consumer.inputs[0] not in tensor.graph.inputs):\n continue\n\n output = get_func_graph_output(consumer.outputs[0])\n if output is None:\n # The TensorList output of `consumer` is not in the list of function\n # outputs.\n continue\n\n accum_input_idx = tensor.graph.inputs.index(consumer.inputs[0])\n accum_output_idx = tensor.graph.outputs.index(output)\n if accum_input_idx == accum_output_idx:\n return output\n return None\n\n\nclass _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph):\n \"\"\"FuncGraph for the gradient function of the body of a While op.\n\n Contains the logic for capturing the tensors from the body of the forward\n While op which is as follows:\n 1. If the tensor is of resource type (these are not accumulated):\n a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop\n inputs and outputs at the same index.\n b. Lookup the corresponding resource tensor in the forward outer graph and\n try to capture that.\n 2. If the tensor is not of resource type:\n a. Create an accumulator for that tensor and output it from the forward\n pass. Note this also requires adding it as an input to the forward pass.\n b. Capture the accumulator from the forward pass in this FuncGraph. This\n will later be resolved to the correct output of the forward While op.\n c. Pop a value from the captured placeholder and use it as the captured\n value for the forward pass tensor.\n\n This only allows capturing tensors in the forward graph. A ValueError is\n raised if an attempt is made to capture a tensor not in the forward graph.\n To manually capture capture a tensor that is not in the forward graph, call\n `capture` with `whitelisted=True`.\n\n Note: The `captures` dict does not contain the forward tensor since it is not\n directly captured. It contains the accumulator corresponding to this forward\n tensor.\n\n Attributes:\n while_op_needs_rewrite: True if any non-resource intermediates were\n captured, meaning the forward While op needs to be rewritten to output the\n corresponding accumulators.\n empty_tensor_lists: list of EmptyTensorList tensors to be used as initial\n input to the new accumulators in the forward graph.\n popped_tensor_lists: dict from the captured accumulator placeholder to the\n TensorList obtained after popping the intermediate tensor from it. The\n values of this dict need to be added to the list of outputs.\n \"\"\"\n\n def __init__(self, name, forward_cond_graph, forward_body_graph, max_iters):\n super(_WhileBodyGradFuncGraph, self).__init__(name)\n self.empty_tensor_lists = []\n self.popped_tensor_lists = {}\n # FuncGraph for the body of the forward While op.\n self._forward_graph = forward_body_graph\n # FuncGraph for the cond of the forward While op.\n self._forward_cond_graph = forward_cond_graph\n self._maximum_iterations = max_iters\n # Dict from forward intermediate tensor to its indirectly captured tensor\n # in this graph. Indirect capturing happens in two ways:\n # 1. For non-resource tensors we capture their accumulators from the forward\n # outer graph and pop values from that accumulator inside this graph\n # using TensorListPopBack.\n # 2. For resource tensors we directly capture their corresponding tensor\n # in the forward outer graph.\n self._indirect_captures = {}\n\n @property\n def while_op_needs_rewrite(self):\n return self.empty_tensor_lists\n\n def capture(self, tensor, name=None, whitelisted=False):\n \"\"\"Selectively captures external tensors.\n\n If `whitelisted` is False only allows capturing tensors in the\n `_forward_graph`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n whitelisted: If False (default), only allows capturing tensors from the\n forward graph.\n\n Returns:\n The placeholder in this graph for the tensor.\n\n Raises:\n ValueError: If attempting to capture an external tensor not in the forward\n graph with `whitelisted` set to False.\n \"\"\"\n if (not whitelisted and tensor.graph is not self and\n tensor.graph != self._forward_graph):\n raise ValueError(\"Attempting to capture tensor\", str(tensor),\n \" which is not in the forward graph but in \",\n _graph_name(tensor.graph), \".\")\n return super(_WhileBodyGradFuncGraph, self).capture(tensor, name)\n\n def _capture_helper(self, tensor, name):\n if tensor.graph is not self._forward_graph:\n return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name)\n\n while tensor.op.type == \"Identity\":\n # We do not accumulate the output of identity nodes so we try to capture\n # the input of the Identity node instead.\n tensor = tensor.op.inputs[0]\n\n captured_tensor = self._indirect_captures.get(tensor)\n if captured_tensor is not None:\n return captured_tensor\n\n if tensor.dtype == dtypes.resource:\n # Resource-type tensors are not accumulated.\n # If a resource tensor exists in the loop body it must either be a loop\n # input or an output of a nested While op inside the loop body which\n # had captured the external resource.\n if tensor in self._forward_graph.inputs:\n index = self._forward_graph.inputs.index(tensor)\n elif tensor.op.type == \"While\":\n # Captured resources occur at the same index in the lists of inputs and\n # outputs of a while op. So we lookup the input of `tensor.op` at the\n # same index as the index of `tensor` in the `tensor.op.outputs`.\n index = self._forward_graph.inputs.index(\n tensor.op.inputs[tensor.value_index])\n else:\n raise ValueError(\n \"Taking gradient of a while loop which creates\"\n \" a resource in its body is not supported: %s\" % str(tensor))\n # This must be a loop invariant.\n assert self._forward_graph.inputs[index] == self._forward_graph.outputs[\n index], \"Resource tensors must be loop invariants %s.\" % str(\n self._forward_graph._while.inputs[index])\n tensor_in_outer_graph = self._forward_graph._while.inputs[index]\n self._indirect_captures[tensor] = self.capture(\n tensor_in_outer_graph, whitelisted=True)\n return self._indirect_captures[tensor]\n\n # Create or find an existing accumulator output for `tensor` in the forward\n # graph, and fetch from this accumulator in the gradient graph to get the\n # raw intermediate value.\n accumulator = _get_accumulator(tensor)\n if accumulator is None:\n # Create the initial empty tensor list.\n with self._forward_graph.outer_graph.as_default():\n tensor_list = list_ops.empty_tensor_list(\n element_dtype=tensor.dtype, element_shape=tensor.shape,\n max_num_elements=self._maximum_iterations)\n self.empty_tensor_lists.append(tensor_list)\n\n # Push the intermediate tensor to the tensor list. This captures\n # `tensor_list`.\n with self._forward_graph.as_default():\n accumulator = list_ops.tensor_list_push_back(tensor_list, tensor)\n # Add the modified tensor list to the list of outputs. This output will be\n # all the accumulated values.\n self._forward_graph.outputs.append(accumulator)\n\n # Capture in the cond graph as well so the forward cond and body inputs\n # match.\n with self._forward_cond_graph.as_default():\n self._forward_cond_graph.capture(tensor_list)\n\n # Capture the accumulator tensor list in the gradient graph directly from\n # the forward graph -- we'll later modify this to capture the final list\n # output by the forward While op instead.\n captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper(\n accumulator, name)\n\n # Pop the intermediate value from the tensor list in the gradient graph.\n new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back(\n captured_accumulator, element_dtype=tensor.dtype)\n\n self._indirect_captures[tensor] = captured_tensor\n self.popped_tensor_lists[captured_accumulator] = new_tensor_list\n return captured_tensor\n\n\ndef _check_shapes_compat(output_tensors, shape_invariants, input_tensors):\n for (t, shape, input_t) in zip(output_tensors, shape_invariants,\n input_tensors):\n if not control_flow_ops._ShapeLessThanOrEqual(t.shape, shape):\n raise ValueError(\n \"Input tensor '%s' enters the loop with shape %s, but has \"\n \"shape %s after one iteration. To allow the shape to vary across \"\n \"iterations, use the `shape_invariants` argument of tf.while_loop to \"\n \"specify a less-specific shape.\" % (input_t.name, shape, t.shape))\n\n\ndef _check_num_inputs_outputs(cond_graph, body_graph, num_flattened_loop_vars):\n \"\"\"Checks the number of inputs/outputs of `cond_graph` and `body_graph`.\"\"\"\n assert len(cond_graph.inputs) == num_flattened_loop_vars, (\n \"cond_graph takes %d inputs; Expected: %d\" % (len(cond_graph.inputs),\n num_flattened_loop_vars))\n assert len(cond_graph.outputs) == 1, (\n \"cond_graph has %d outputs; Expected: 1\" % len(cond_graph.outputs))\n assert len(body_graph.inputs) == num_flattened_loop_vars, (\n \"body_graph takes %d inputs; Expected: %d\" % (len(cond_graph.inputs),\n num_flattened_loop_vars))\n assert len(body_graph.outputs) == num_flattened_loop_vars, (\n \"body_graph has %d outputs; Expected: %d\" % (len(body_graph.outputs),\n num_flattened_loop_vars))\n\n\ndef _copy_handle_data(src_tensors, tgt_tensors):\n for src_t, tgt_t in zip(src_tensors, tgt_tensors):\n custom_gradient.copy_handle_data(src_t, tgt_t)\n\n\ndef _maybe_set_maximum_iterations_attr(op, maximum_iterations):\n if control_flow_util.IsInXLAContext(op):\n # Store the maximum_iterations to use in the gradient pass.\n op._set_attr( # pylint: disable=protected-access\n \"_maximum_iterations\",\n attr_value_pb2.AttrValue(\n i=tensor_util.constant_value(maximum_iterations)))\n\n\n# TODO(srbs): This method should be in control_flow_util but that introduces\n# a circular dependency ops -> control_flow_util -> ops.\ndef _is_in_xla_context():\n \"\"\"Returns whether the current context is inside an XLA context.\"\"\"\n outer_graph = ops.get_default_graph()\n # The `_control_flow_context` is not copied when building a FuncGraph so\n # we look it up from the base graph.\n while isinstance(outer_graph, func_graph_module.FuncGraph):\n outer_graph = outer_graph.outer_graph\n cur_ctxt = outer_graph._get_control_flow_context() # pylint: disable=protected-access\n return control_flow_util.GetContainingXLAContext(cur_ctxt) is not None\n\n\ndef _graph_name(graph):\n if isinstance(graph, func_graph_module.FuncGraph):\n return graph.name\n return \"Base\"\n\n\ndef _pack_sequence_as(structure_with_tas, loop_vars):\n \"\"\"Like `nest.pack_sequence_as` but also replaces flows with TensorArrays.\"\"\"\n\n def flow_to_tensor_array(flow, ta): # pylint: disable=missing-docstring\n if isinstance(ta, tensor_array_ops.TensorArray):\n # pylint: disable=protected-access\n new_ta = tensor_array_ops.TensorArray(\n dtype=ta.dtype,\n handle=ta.handle,\n flow=flow,\n infer_shape=ta._infer_shape,\n colocate_with_first_write_call=ta._colocate_with_first_write_call)\n new_ta._colocate_with = ta._colocate_with\n new_ta._element_shape = ta._element_shape\n # pylint: enable=protected-access\n return new_ta\n return flow\n\n flattened_loop_vars = [\n flow_to_tensor_array(*z)\n for z in zip(nest.flatten(loop_vars), nest.flatten(structure_with_tas))\n ]\n return nest.pack_sequence_as(structure_with_tas, flattened_loop_vars)\n\n\ndef _tensor_array_to_flow(loop_vars):\n\n def f(maybe_ta):\n if isinstance(maybe_ta, tensor_array_ops.TensorArray):\n return maybe_ta.flow\n return maybe_ta\n\n return nest.map_structure(f, loop_vars)\n\n\ndef _build_signature(loop_vars, shape_invariants):\n return nest.pack_sequence_as(loop_vars, [\n tensor_spec.TensorSpec(s, t.dtype, name=t.op.name)\n for s, t in zip(nest.flatten(shape_invariants), nest.flatten(loop_vars))\n ])\n\n\n# pylint: enable=protected-access\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.MultiDeviceIterator`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.experimental.ops import optimization\nfrom tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import multi_device_iterator_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\n# TODO(b/117581999): Add eager coverage.\nclass MultiDeviceIteratorTest(test_base.DatasetTestBase):\n\n @test_util.run_v1_only(\"b/120545219\")\n def testNoGetNext(self):\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testBasic(self):\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testOneOnSameDevice(self):\n with ops.device(\"/cpu:0\"):\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:0\", \"/cpu:1\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testRepeatDevices(self):\n with ops.device(\"/cpu:0\"):\n dataset = dataset_ops.Dataset.range(20)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\", \"/cpu:1\", \"/cpu:2\"])\n elements = multi_device_iterator.get_next()\n elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 20, 4):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n self.assertEqual(i + 2, self.evaluate(elem_on_3))\n self.assertEqual(i + 3, self.evaluate(elem_on_4))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n self.evaluate(elem_on_3)\n self.evaluate(elem_on_4)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testNotFullyDivisible(self):\n dataset = dataset_ops.Dataset.range(9)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 8, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n self.assertEqual(8, self.evaluate(elem_on_1))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testGetNextAsOptional(self):\n dataset = dataset_ops.Dataset.range(9)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()\n elem_on_1_has_value_t = elem_on_1.has_value()\n elem_on_1_t = elem_on_1.get_value()\n elem_on_2_has_value_t = elem_on_2.has_value()\n elem_on_2_t = elem_on_2.get_value()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 8, 2):\n elem_on_1_has_value, elem_on_1_value = sess.run(\n [elem_on_1_has_value_t, elem_on_1_t])\n self.assertTrue(elem_on_1_has_value)\n self.assertEqual(i, elem_on_1_value)\n elem_on_2_has_value, elem_on_2_value = sess.run(\n [elem_on_2_has_value_t, elem_on_2_t])\n self.assertTrue(elem_on_2_has_value)\n self.assertEqual(i + 1, elem_on_2_value)\n elem_on_1_has_value, elem_on_1_value = sess.run(\n [elem_on_1_has_value_t, elem_on_1_t])\n self.assertTrue(elem_on_1_has_value)\n self.assertEqual(8, elem_on_1_value)\n self.assertFalse(self.evaluate(elem_on_1_has_value_t))\n self.assertFalse(self.evaluate(elem_on_2_has_value_t))\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(elem_on_1_t)\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(elem_on_2_t)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testUneven(self):\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"], max_buffer_size=4)\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n for i in range(0, 10, 2):\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testMultipleInitializations(self):\n with ops.device(\"/cpu:0\"):\n epoch = array_ops.placeholder(dtypes.int64, shape=[])\n dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000)\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset = dataset_ops.Dataset.zip((dataset1, dataset2))\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"], prefetch_buffer_size=4)\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n init_op = multi_device_iterator.initializer\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n for i in range(1000):\n sess.run(init_op, feed_dict={epoch: i})\n self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1,\n elem_on_2]))\n\n def testBasicGpu(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/gpu:0\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 2, \"GPU\": 1})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n def testUnevenGpu(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n dataset = dataset_ops.Dataset.range(10)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/gpu:0\"], max_buffer_size=4)\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 2, \"GPU\": 1})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n for i in range(0, 10, 2):\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n def testGetNextAsOptionalGpu(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n dataset = dataset_ops.Dataset.range(9)\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/gpu:0\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()\n elem_on_1_has_value_t = elem_on_1.has_value()\n elem_on_1_t = elem_on_1.get_value()\n elem_on_2_has_value_t = elem_on_2.has_value()\n elem_on_2_t = elem_on_2.get_value()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 2, \"GPU\": 1})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 8, 2):\n elem_on_1_has_value, elem_on_1_value = sess.run(\n [elem_on_1_has_value_t, elem_on_1_t])\n self.assertTrue(elem_on_1_has_value)\n self.assertEqual(i, elem_on_1_value)\n elem_on_2_has_value, elem_on_2_value = sess.run(\n [elem_on_2_has_value_t, elem_on_2_t])\n self.assertTrue(elem_on_2_has_value)\n self.assertEqual(i + 1, elem_on_2_value)\n elem_on_1_has_value, elem_on_1_value = sess.run(\n [elem_on_1_has_value_t, elem_on_1_t])\n self.assertTrue(elem_on_1_has_value)\n self.assertEqual(8, elem_on_1_value)\n self.assertFalse(self.evaluate(elem_on_1_has_value_t))\n self.assertFalse(self.evaluate(elem_on_2_has_value_t))\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(elem_on_1_t)\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(elem_on_2_t)\n\n @test_util.run_v1_only(\"b/120545219\")\n def testOptimization(self):\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(optimization.assert_next([\"MemoryCacheImpl\"]))\n dataset = dataset.skip(0) # this should be optimized away\n dataset = dataset.cache()\n\n options = dataset_ops.Options()\n options.experimental_optimization = OptimizationOptions()\n options.experimental_optimization.noop_elimination = True\n dataset = dataset.with_options(options)\n\n multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(\n dataset, [\"/cpu:1\", \"/cpu:2\"])\n elem_on_1, elem_on_2 = multi_device_iterator.get_next()\n\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.test_session(config=config) as sess:\n self.evaluate(multi_device_iterator.initializer)\n for i in range(0, 10, 2):\n self.assertEqual(i, self.evaluate(elem_on_1))\n self.assertEqual(i + 1, self.evaluate(elem_on_2))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(elem_on_1)\n self.evaluate(elem_on_2)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ragged.to_tensor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.ops.ragged import ragged_test_util\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedTensorToTensorOpTest(ragged_test_util.RaggedTensorTestCase,\n parameterized.TestCase):\n\n def testDocStringExamples(self):\n \"\"\"Example from ragged_to_tensor.__doc__.\"\"\"\n rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]])\n dt = rt.to_tensor()\n self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])\n\n @parameterized.parameters(\n {\n 'rt_input': [],\n 'ragged_rank': 1,\n 'expected': [],\n 'expected_shape': [0, 0],\n },\n {\n 'rt_input': [[1, 2, 3], [], [4], [5, 6]],\n 'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]\n },\n {\n 'rt_input': [[1, 2, 3], [], [4], [5, 6]],\n 'default': 9,\n 'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]\n },\n {\n 'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],\n 'ragged_rank':\n 1,\n 'default': [9],\n 'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],\n [[5], [6], [9]]]\n },\n {\n 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],\n 'expected': [\n [[1, 2], [0, 0], [3, 4]], #\n [[0, 0], [0, 0], [0, 0]], #\n [[5, 0], [0, 0], [0, 0]], #\n [[6, 7], [8, 0], [0, 0]], #\n ]\n },\n {\n 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],\n 'default':\n 9,\n 'expected': [\n [[1, 2], [9, 9], [3, 4]], #\n [[9, 9], [9, 9], [9, 9]], #\n [[5, 9], [9, 9], [9, 9]], #\n [[6, 7], [8, 9], [9, 9]], #\n ]\n },\n {\n 'rt_input': [[[1], [2], [3]]],\n 'ragged_rank': 1,\n 'default': 0,\n 'expected': [[[1], [2], [3]]],\n },\n {\n 'rt_input': [[[[1], [2]], [], [[3]]]],\n 'default': 9,\n 'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],\n },\n )\n def testRaggedTensorToTensor(self,\n rt_input,\n expected,\n ragged_rank=None,\n default=None,\n expected_shape=None):\n rt = ragged.constant(rt_input, ragged_rank=ragged_rank)\n dt = rt.to_tensor(default)\n self.assertIsInstance(dt, ops.Tensor)\n self.assertEqual(rt.dtype, dt.dtype)\n self.assertTrue(dt.shape.is_compatible_with(rt.shape))\n self.assertAllEqual(self.eval_to_list(dt), expected)\n if expected_shape is not None:\n dt_shape = array_ops.shape(dt)\n self.assertAllEqual(dt_shape, expected_shape)\n\n @parameterized.parameters(\n {\n 'rt_input': [[1, 2, 3]],\n 'default': [0],\n 'error': (ValueError, r'Shape \\(1,\\) must have rank at most 0'),\n },\n {\n 'rt_input': [[[1, 2], [3, 4]], [[5, 6]]],\n 'ragged_rank': 1,\n 'default': [7, 8, 9],\n 'error': (ValueError, r'Shapes \\(3,\\) and \\(2,\\) are incompatible'),\n },\n {\n 'rt_input': [[1, 2, 3]],\n 'default': 'a',\n 'error': (TypeError, '.*'),\n },\n )\n def testError(self, rt_input, default, error, ragged_rank=None):\n rt = ragged.constant(rt_input, ragged_rank=ragged_rank)\n with self.assertRaisesRegexp(error[0], error[1]):\n rt.to_tensor(default)\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for checkpointable object SavedModel save.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import loader\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training.checkpointable import tracking\nfrom tensorflow.python.training.checkpointable import util\n\n\nclass _ModelWithOptimizer(util.Checkpoint):\n\n def __init__(self):\n self.dense = core.Dense(1)\n self.optimizer = adam.AdamOptimizer(0.01)\n\n @def_function.function(\n input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),\n tensor_spec.TensorSpec([None], dtypes.float32)))\n def call(self, x, y):\n with backprop.GradientTape() as tape:\n loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)\n trainable_variables = self.dense.trainable_variables\n gradients = tape.gradient(loss, trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, trainable_variables))\n return {\"loss\": loss}\n\n\ndef _import_and_infer(\n save_dir, inputs,\n signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):\n \"\"\"Import a SavedModel into a TF 1.x-style graph and run `signature_key`.\"\"\"\n graph = ops.Graph()\n with graph.as_default(), session_lib.Session() as session:\n model = loader.load(session, [tag_constants.SERVING], save_dir)\n signature = model.signature_def[signature_key]\n assert set(inputs.keys()) == set(signature.inputs.keys())\n feed_dict = {}\n for arg_name in inputs.keys():\n feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (\n inputs[arg_name])\n output_dict = {}\n for output_name, output_tensor_info in signature.outputs.items():\n output_dict[output_name] = graph.get_tensor_by_name(\n output_tensor_info.name)\n return session.run(output_dict, feed_dict=feed_dict)\n\n\nclass SaveTest(test.TestCase):\n\n def test_method_save_signature(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(\n lambda x: 2. * x,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n root.f(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(root, save_dir, root.f)\n self.assertEqual(\n {\"output_0\": 2.},\n _import_and_infer(save_dir, {\"x\": 1.}))\n\n def test_method_save_concrete(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(\n lambda z: {\"out\": 2. * z})\n root.f(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(\n root,\n save_dir,\n {\"non_default_key\": root.f.get_concrete_function(\n tensor_spec.TensorSpec(None, dtypes.float32))})\n self.assertEqual(\n {\"out\": 2.},\n _import_and_infer(\n save_dir, {\"z\": 1.}, signature_key=\"non_default_key\"))\n\n def test_non_concrete_error(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(lambda x: 2. * x)\n root.f(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n with self.assertRaisesRegexp(\n ValueError, \"must be converted to concrete functions\"):\n save.save(root, save_dir, root.f)\n\n def test_nested_inputs(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(\n lambda x: 2. * x[0],\n input_signature=([tensor_spec.TensorSpec(None, dtypes.float32),\n tensor_spec.TensorSpec(None, dtypes.float32)],))\n root.f([constant_op.constant(1.), constant_op.constant(1.)])\n # Concrete functions must always have uniquely named Tensor inputs. Save\n # relies on this.\n with self.assertRaisesRegexp(\n ValueError, \"two arguments named 'x'\"):\n root.f.get_concrete_function()\n\n def test_nested_outputs(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))\n root.f(constant_op.constant(1.))\n to_save = root.f.get_concrete_function(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n with self.assertRaisesRegexp(\n ValueError, \"non-flat outputs\"):\n save.save(root, save_dir, to_save)\n\n def test_nested_dict_outputs(self):\n root = util.Checkpoint(\n f=def_function.function(\n lambda x: {\"a\": 2. * x, \"b\": (3. * x, 4. * x)}))\n root.f(constant_op.constant(1.))\n to_save = root.f.get_concrete_function(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n with self.assertRaisesRegexp(\n ValueError, \"dictionary containing non-Tensor value\"):\n save.save(root, save_dir, to_save)\n\n def test_variable(self):\n root = tracking.Checkpointable()\n root.v1 = variables.Variable(3.)\n root.v2 = variables.Variable(2.)\n root.f = def_function.function(\n lambda x: root.v1 * root.v2 * x)\n root.f(constant_op.constant(1.))\n to_save = root.f.get_concrete_function(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(root, save_dir, to_save)\n self.assertAllEqual({\"output_0\": 12.},\n _import_and_infer(save_dir, {\"x\": 2.}))\n\n def test_optimizer(self):\n x = constant_op.constant([[3., 4.]])\n y = constant_op.constant([2.])\n model = _ModelWithOptimizer()\n first_loss = model.call(x, y)\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(model, save_dir, model.call)\n second_loss = model.call(x, y)\n self.assertNotEqual(first_loss, second_loss)\n self.assertAllClose(\n second_loss,\n _import_and_infer(save_dir, {\"x\": [[3., 4.]], \"y\": [2.]}))\n\n def test_trivial_save_exception(self):\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n with self.assertRaisesRegexp(ValueError, \"signature\"):\n save.save(tracking.Checkpointable(), save_dir)\n\n def test_single_method_default_signature(self):\n model = _ModelWithOptimizer()\n x = constant_op.constant([[3., 4.]])\n y = constant_op.constant([2.])\n model.call(x, y)\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(model, save_dir)\n self.assertIn(\"loss\",\n _import_and_infer(save_dir,\n {\"x\": [[3., 4.]], \"y\": [2.]}))\n\n def test_single_function_default_signature(self):\n model = tracking.Checkpointable()\n model.f = def_function.function(lambda: 3., input_signature=())\n model.f()\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(model, save_dir)\n self.assertAllClose({\"output_0\": 3.},\n _import_and_infer(save_dir, {}))\n\n def test_ambiguous_signatures(self):\n model = _ModelWithOptimizer()\n x = constant_op.constant([[3., 4.]])\n y = constant_op.constant([2.])\n model.call(x, y)\n model.second_function = def_function.function(lambda: 1.)\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n with self.assertRaisesRegexp(ValueError, \"call.*second_function\"):\n save.save(model, save_dir)\n\n def test_no_signature(self):\n\n class Model(util.Checkpoint):\n\n def call(self, inputs):\n return inputs * 2.\n\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n model = Model()\n with self.assertRaisesRegexp(\n ValueError, \"no @tf.function-decorated methods\"):\n save.save(model, save_dir)\n\n def test_find_default_save_function(self):\n\n class ObjWithDefaultSignature(util.Checkpoint):\n\n @def_function.function(input_signature=[tensor_spec.TensorSpec(\n shape=None, dtype=dtypes.float32)])\n def _default_save_signature(self, x):\n return x + x + 1\n\n obj = ObjWithDefaultSignature()\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(obj, save_dir)\n self.assertAllClose(\n {\"output_0\": 7.}, _import_and_infer(save_dir, {\"x\": 3.}))\n\n def test_docstring(self):\n\n class Adder(util.Checkpoint):\n\n @def_function.function(input_signature=[tensor_spec.TensorSpec(\n shape=None, dtype=dtypes.float32)])\n def add(self, x):\n return x + x + 1.\n\n to_save = Adder()\n to_save.add(constant_op.constant(1.))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(to_save, save_dir)\n self.assertAllClose({\"output_0\": 7.},\n _import_and_infer(save_dir, {\"x\": 3.}))\n\n def test_default_attr_stripping(self):\n\n class Complex(util.Checkpoint):\n\n @def_function.function(input_signature=[])\n def __call__(self):\n return math_ops.complex(\n constant_op.constant(1.),\n constant_op.constant(2.),\n name=\"complex\")\n\n to_save = Complex()\n to_save()\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(to_save, save_dir)\n graph = ops.Graph()\n with graph.as_default(), self.session(graph) as session:\n loader.load(session, [tag_constants.SERVING], save_dir)\n func, = graph._functions.values()\n complex_node, = [\n node for node in func.definition.node_def if node.op == \"Complex\"]\n self.assertNotIn(\"T\", complex_node.attr)\n self.assertNotIn(\"Tout\", complex_node.attr)\n\n\nclass AssetTests(test.TestCase):\n\n def setUp(self):\n super(AssetTests, self).setUp()\n self._vocab_path = os.path.join(self.get_temp_dir(), \"vocab.txt\")\n with open(self._vocab_path, \"w\") as f:\n f.write(\"alpha\\nbeta\\ngamma\\n\")\n\n def test_table(self):\n initializer = lookup_ops.TextFileInitializer(\n self._vocab_path,\n key_dtype=dtypes.string,\n key_index=lookup_ops.TextFileIndex.WHOLE_LINE,\n value_dtype=dtypes.int64,\n value_index=lookup_ops.TextFileIndex.LINE_NUMBER)\n root = util.Checkpoint(table=lookup_ops.HashTable(\n initializer, default_value=-1))\n root.table_user = def_function.function(\n root.table.lookup,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])\n self.assertEqual(\n 2,\n self.evaluate(root.table_user(constant_op.constant(\"gamma\"))))\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(root, save_dir)\n file_io.delete_file(self._vocab_path)\n self.assertAllClose(\n {\"output_0\": [2, 0]},\n _import_and_infer(save_dir, {\"keys\": [\"gamma\", \"alpha\"]}))\n second_dir = os.path.join(self.get_temp_dir(), \"second_dir\")\n # Asset paths should track the location the SavedModel is loaded from.\n file_io.rename(save_dir, second_dir)\n self.assertAllClose(\n {\"output_0\": [2, 1]},\n _import_and_infer(second_dir, {\"keys\": [\"gamma\", \"beta\"]}))\n\n def test_unused_asset(self):\n root = tracking.Checkpointable()\n root.f = def_function.function(\n lambda x: 2. * x,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n root.asset = tracking.TrackableAsset(self._vocab_path)\n\n export_dir = os.path.join(self.get_temp_dir(), \"save_dir\")\n save.save(root, export_dir)\n self.assertAllClose(\n {\"output_0\": [0.2]},\n _import_and_infer(export_dir, {\"x\": [0.1]}))\n\n\nclass MemoryTests(test.TestCase):\n\n def setUp(self):\n self._model = _ModelWithOptimizer()\n\n @test_util.assert_no_garbage_created\n def test_no_reference_cycles(self):\n x = constant_op.constant([[3., 4.]])\n y = constant_op.constant([2.])\n self._model.call(x, y)\n if sys.version_info[0] < 3:\n # TODO(allenl): debug reference cycles in Python 2.x\n self.skipTest(\"This test only works in Python 3+. Reference cycles are \"\n \"created in older Python versions.\")\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save.save(self._model, save_dir, self._model.call)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.control_flow_util_v2.create_new_tf_function", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.gradients_impl.IsTrainable", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.control_flow_util.IsInXLAContext", "tensorflow.python.ops.gradients_impl._GradientsHelper", "tensorflow.python.ops.control_flow_util_v2.WhileBodyFuncGraph", "tensorflow.python.ops.control_flow_util_v2.WhileCondFuncGraph", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.ops.custom_gradient.copy_handle_data", "tensorflow.python.ops.control_flow_util_v2.unique_fn_name", "tensorflow.python.util.nest.is_sequence", "tensorflow.python.ops.control_flow_util.GetContainingXLAContext", "tensorflow.python.ops.list_ops.tensor_list_pop_back", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.ops.list_ops.tensor_list_push_back", "tensorflow.python.ops.control_flow_util_v2.in_defun", "tensorflow.python.ops.control_flow_util_v2.maybe_set_lowering_attr", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.ops.control_flow_util_v2.unique_grad_fn_name", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.control_flow_ops._ShapeLessThanOrEqual", "tensorflow.python.ops.list_ops.empty_tensor_list", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.function_def_to_graph.function_def_to_graph", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.ops.device", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.data.ops.dataset_ops.Dataset.zip", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.multi_device_iterator_ops.MultiDeviceIterator", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.data.experimental.ops.optimization.assert_next", "tensorflow.python.data.experimental.ops.optimization_options.OptimizationOptions", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.test_util.is_gpu_available" ], [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.ragged.constant", "tensorflow.python.platform.googletest.main" ], [ "tensorflow.python.lib.io.file_io.rename", "tensorflow.python.training.checkpointable.tracking.Checkpointable", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.eager.test.main", "tensorflow.python.saved_model.save.save", "tensorflow.python.eager.def_function.function", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.lookup_ops.HashTable", "tensorflow.python.lib.io.file_io.delete_file", "tensorflow.python.ops.variables.Variable", "tensorflow.python.client.session.Session", "tensorflow.python.training.adam.AdamOptimizer", "tensorflow.python.keras.layers.core.Dense", "tensorflow.python.saved_model.loader.load", "tensorflow.python.training.checkpointable.tracking.TrackableAsset", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.lookup_ops.TextFileInitializer", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "2.7", "2.6", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13" ] } ]
larksq/Lane-Lines-Detection-and-Localization
[ "f07d7feaca7c5e1132bf22c27745328968441ff0" ]
[ "Scripts/lines.py" ]
[ "import numpy as np\n\nclass lines:\n\n maxNum = 50\n threshold = 1\n insist = True\n\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n #average x values of the fitted line over the last n iterations\n self.bestx = None\n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None\n\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = None\n #distance in meters of vehicle center from the line\n self.line_base_pos = None\n\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n\n #x values for detected line pixels\n self.allx = None\n #y values for detected line pixels\n self.ally = None\n\n\n\n\n def add_rst(self, detected, fit, radius, bias, linepix, frame):\n\n resonableCurve = self.isReasonable(fit)\n\n if resonableCurve == False:\n self.insist = False\n\n else:\n\n # for starting 50 is to init\n self.recent_xfitted.append(linepix)\n multiplier = min(frame, self.maxNum)\n\n if frame < 2:\n self.bestx =linepix\n self.best_fit = fit\n self.radius_of_curvature = radius\n\n else:\n\n self.insist = True\n\n for index in range(0,2):\n diff = self.best_fit[0][index] - fit[0][index]\n if abs(diff)>self.threshold:\n self.insist = False\n print(\"\\n [Huge Jump] left not inconsist! Redetecting!\", index)\n\n for index in range(0,2):\n diff = self.best_fit[1][index] - fit[1][index]\n if abs(diff)>self.threshold:\n self.insist = False\n print(\"\\n [Huge Jump] right not insist! Redetecting!\", index)\n\n self.bestx = (self.bestx*multiplier+linepix)/(multiplier+1)\n self.best_fit = ((self.best_fit[0]*multiplier+fit[0])/(multiplier+1), (self.best_fit[1]*multiplier+fit[1])/(multiplier+1))\n self.radius_of_curvature = (self.radius_of_curvature*multiplier+radius)/(multiplier+1)\n\n if frame > self.maxNum:\n self.recent_xfitted.pop(0)\n\n self.line_base_pos = bias\n self.current_fit = fit\n\n return self.insist # return False to redetect\n\n def isReasonable(self, fit):\n\n # check left and right parrell\n diff = abs(fit[0][0]-fit[1][0])\n if diff > 0.01:\n print(\"\\n [OUTLIERS] NOT PARRELL! Discarding\")\n return False\n\n # check if curl too much\n if max(abs(fit[0][0]), abs(fit[1][0])) > 0.01:\n print(\"\\n [OUTLIERS] CRUL TOO MUCH! Discarding\")\n return False\n\n return True\n\n def smooth(self):\n pass\n\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LancerWang001/v8
[ "a0f0ebd7a876e8cb2210115adbfcffe900e99540" ]
[ "tools/callstats.py" ]
[ "#!/usr/bin/env python\n# Copyright 2016 the V8 project authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n'''\nUsage: callstats.py [-h] <command> ...\n\nOptional arguments:\n -h, --help show this help message and exit\n\nCommands:\n run run chrome with --runtime-call-stats and generate logs\n stats process logs and print statistics\n json process logs from several versions and generate JSON\n help help information\n\nFor each command, you can try ./runtime-call-stats.py help command.\n'''\n\n# for py2/py3 compatibility\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport operator\nfrom callstats_groups import RUNTIME_CALL_STATS_GROUPS\n\nimport numpy\nfrom math import sqrt\n\n\nMAX_NOF_RETRIES = 5\n\n\n# Run benchmarks.\n\ndef print_command(cmd_args):\n def fix_for_printing(arg):\n m = re.match(r'^--([^=]+)=(.*)$', arg)\n if m and (' ' in m.group(2) or m.group(2).startswith('-')):\n arg = \"--{}='{}'\".format(m.group(1), m.group(2))\n elif ' ' in arg:\n arg = \"'{}'\".format(arg)\n return arg\n print(\" \".join(map(fix_for_printing, cmd_args)))\n\n\ndef start_replay_server(args, sites, discard_output=True):\n with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js',\n mode='wt', delete=False) as f:\n injection = f.name\n generate_injection(f, sites, args.refresh)\n http_port = 4080 + args.port_offset\n https_port = 4443 + args.port_offset\n cmd_args = [\n args.replay_bin,\n \"--port=%s\" % http_port,\n \"--ssl_port=%s\" % https_port,\n \"--no-dns_forwarding\",\n \"--use_closest_match\",\n \"--no-diff_unknown_requests\",\n \"--inject_scripts=deterministic.js,{}\".format(injection),\n args.replay_wpr,\n ]\n print(\"=\" * 80)\n print_command(cmd_args)\n if discard_output:\n with open(os.devnull, 'w') as null:\n server = subprocess.Popen(cmd_args, stdout=null, stderr=null)\n else:\n server = subprocess.Popen(cmd_args)\n print(\"RUNNING REPLAY SERVER: %s with PID=%s\" % (args.replay_bin, server.pid))\n print(\"=\" * 80)\n return {'process': server, 'injection': injection}\n\n\ndef stop_replay_server(server):\n print(\"SHUTTING DOWN REPLAY SERVER %s\" % server['process'].pid)\n server['process'].terminate()\n os.remove(server['injection'])\n\n\ndef generate_injection(f, sites, refreshes=0):\n print(\"\"\"\\\n(function() {\n var s = window.sessionStorage.getItem(\"refreshCounter\");\n var refreshTotal = \"\"\", refreshes, \"\"\";\n var refreshCounter = s ? parseInt(s) : refreshTotal;\n var refreshId = refreshTotal - refreshCounter;\n if (refreshCounter > 0) {\n window.sessionStorage.setItem(\"refreshCounter\", refreshCounter-1);\n }\n function match(url, item) {\n if ('regexp' in item) { return url.match(item.regexp) !== null };\n var url_wanted = item.url;\n /* Allow automatic redirections from http to https. */\n if (url_wanted.startsWith(\"http://\") && url.startsWith(\"https://\")) {\n url_wanted = \"https://\" + url_wanted.substr(7);\n }\n return url.startsWith(url_wanted);\n };\n function onLoad(url) {\n for (var item of sites) {\n if (!match(url, item)) continue;\n var timeout = 'timeline' in item ? 2000 * item.timeline\n : 'timeout' in item ? 1000 * (item.timeout - 3)\n : 10000;\n console.log(\"Setting time out of \" + timeout + \" for: \" + url);\n window.setTimeout(function() {\n console.log(\"Time is out for: \" + url);\n var msg = \"STATS: (\" + refreshId + \") \" + url;\n %GetAndResetRuntimeCallStats(1, msg);\n if (refreshCounter > 0) {\n console.log(\n \"Refresh counter is \" + refreshCounter + \", refreshing: \" + url);\n window.location.reload();\n }\n }, timeout);\n return;\n }\n console.log(\"Ignoring: \" + url);\n };\n var sites =\n \"\"\", json.dumps(sites), \"\"\";\n onLoad(window.location.href);\n})();\"\"\", file=f)\n\ndef get_chrome_flags(js_flags, user_data_dir, arg_delimiter=\"\"):\n return [\n \"--no-default-browser-check\",\n \"--no-sandbox\",\n \"--disable-translate\",\n \"--enable-benchmarking\",\n \"--enable-stats-table\",\n \"--js-flags={}{}{}\".format(arg_delimiter, js_flags, arg_delimiter),\n \"--no-first-run\",\n \"--user-data-dir={}{}{}\".format(arg_delimiter, user_data_dir,\n arg_delimiter),\n \"--data-path={}{}{}\".format(arg_delimiter,\n os.path.join(user_data_dir, 'content-shell-data'), arg_delimiter),\n ]\n\ndef get_chrome_replay_flags(args, arg_delimiter=\"\"):\n http_port = 4080 + args.port_offset\n https_port = 4443 + args.port_offset\n return [\n \"--host-resolver-rules=%sMAP *:80 localhost:%s, \" \\\n \"MAP *:443 localhost:%s, \" \\\n \"EXCLUDE localhost%s\" % (\n arg_delimiter, http_port, https_port,\n arg_delimiter),\n \"--ignore-certificate-errors\",\n \"--disable-seccomp-sandbox\",\n \"--disable-web-security\",\n \"--reduce-security-for-testing\",\n \"--allow-insecure-localhost\",\n ]\n\ndef run_site(site, domain, args, timeout=None):\n print(\"=\"*80)\n print(\"RUNNING DOMAIN %s\" % domain)\n print(\"=\"*80)\n result_template = \"{domain}#{count}.txt\" if args.repeat else \"{domain}.txt\"\n count = 0\n if timeout is None: timeout = args.timeout\n if args.replay_wpr:\n timeout *= 1 + args.refresh\n timeout += 1\n retries_since_good_run = 0\n while count == 0 or args.repeat is not None and count < args.repeat:\n count += 1\n result = result_template.format(domain=domain, count=count)\n retries = 0\n while args.retries is None or retries < args.retries:\n retries += 1\n try:\n if args.user_data_dir:\n user_data_dir = args.user_data_dir\n else:\n user_data_dir = tempfile.mkdtemp(prefix=\"chr_\")\n js_flags = \"--runtime-call-stats\"\n if args.replay_wpr: js_flags += \" --allow-natives-syntax\"\n if args.js_flags: js_flags += \" \" + args.js_flags\n chrome_flags = get_chrome_flags(js_flags, user_data_dir)\n if args.replay_wpr:\n chrome_flags += get_chrome_replay_flags(args)\n else:\n chrome_flags += [ \"--single-process\", ]\n if args.chrome_flags:\n chrome_flags += args.chrome_flags.split()\n cmd_args = [\n \"timeout\", str(timeout),\n args.with_chrome\n ] + chrome_flags + [ site ]\n print(\"- \" * 40)\n print_command(cmd_args)\n print(\"- \" * 40)\n with open(result, \"wt\") as f:\n with open(args.log_stderr or os.devnull, 'at') as err:\n status = subprocess.call(cmd_args, stdout=f, stderr=err)\n # 124 means timeout killed chrome, 0 means the user was bored first!\n # If none of these two happened, then chrome apparently crashed, so\n # it must be called again.\n if status != 124 and status != 0:\n print(\"CHROME CRASHED, REPEATING RUN\");\n continue\n # If the stats file is empty, chrome must be called again.\n if os.path.isfile(result) and os.path.getsize(result) > 0:\n if args.print_url:\n with open(result, \"at\") as f:\n print(file=f)\n print(\"URL: {}\".format(site), file=f)\n retries_since_good_run = 0\n break\n if retries_since_good_run > MAX_NOF_RETRIES:\n # Abort after too many retries, no point in ever increasing the\n # timeout.\n print(\"TOO MANY EMPTY RESULTS ABORTING RUN\")\n return\n timeout += 2 ** retries_since_good_run\n retries_since_good_run += 1\n print(\"EMPTY RESULT, REPEATING RUN ({})\".format(\n retries_since_good_run));\n finally:\n if not args.user_data_dir:\n shutil.rmtree(user_data_dir)\n\n\ndef read_sites_file(args):\n try:\n sites = []\n try:\n with open(args.sites_file, \"rt\") as f:\n for item in json.load(f):\n if 'timeout' not in item:\n # This is more-or-less arbitrary.\n item['timeout'] = int(1.5 * item['timeline'] + 7)\n if item['timeout'] > args.timeout: item['timeout'] = args.timeout\n sites.append(item)\n except ValueError:\n args.error(\"Warning: Could not read sites file as JSON, falling back to \"\n \"primitive file\")\n with open(args.sites_file, \"rt\") as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'): continue\n sites.append({'url': line, 'timeout': args.timeout})\n return sites\n except IOError as e:\n args.error(\"Cannot read from {}. {}.\".format(args.sites_file, e.strerror))\n sys.exit(1)\n\n\ndef read_sites(args):\n # Determine the websites to benchmark.\n if args.sites_file:\n return read_sites_file(args)\n return [{'url': site, 'timeout': args.timeout} for site in args.sites]\n\ndef do_run(args):\n sites = read_sites(args)\n replay_server = start_replay_server(args, sites) if args.replay_wpr else None\n # Disambiguate domains, if needed.\n L = []\n domains = {}\n for item in sites:\n site = item['url']\n domain = None\n if args.domain:\n domain = args.domain\n elif 'domain' in item:\n domain = item['domain']\n else:\n m = re.match(r'^(https?://)?([^/]+)(/.*)?$', site)\n if not m:\n args.error(\"Invalid URL {}.\".format(site))\n continue\n domain = m.group(2)\n entry = [site, domain, None, item['timeout']]\n if domain not in domains:\n domains[domain] = entry\n else:\n if not isinstance(domains[domain], int):\n domains[domain][2] = 1\n domains[domain] = 1\n domains[domain] += 1\n entry[2] = domains[domain]\n L.append(entry)\n try:\n # Run them.\n for site, domain, count, timeout in L:\n if count is not None: domain = \"{}%{}\".format(domain, count)\n print((site, domain, timeout))\n run_site(site, domain, args, timeout)\n finally:\n if replay_server:\n stop_replay_server(replay_server)\n\n\ndef do_run_replay_server(args):\n sites = read_sites(args)\n print(\"- \" * 40)\n print(\"Available URLs:\")\n for site in sites:\n print(\" \"+site['url'])\n print(\"- \" * 40)\n print(\"Launch chromium with the following commands for debugging:\")\n flags = get_chrome_flags(\"--runtime-call-stats --allow-natives-syntax\",\n \"/var/tmp/`date +%s`\", '\"')\n flags += get_chrome_replay_flags(args, \"'\")\n print(\" $CHROMIUM_DIR/out/Release/chrome \" + (\" \".join(flags)) + \" <URL>\")\n print(\"- \" * 40)\n replay_server = start_replay_server(args, sites, discard_output=False)\n try:\n replay_server['process'].wait()\n finally:\n stop_replay_server(replay_server)\n\n\n# Calculate statistics.\n\ndef statistics(data):\n # NOTE(V8:10269): imports moved here to mitigate the outage.\n import scipy\n import scipy.stats\n\n N = len(data)\n average = numpy.average(data)\n median = numpy.median(data)\n low = numpy.min(data)\n high= numpy.max(data)\n if N > 1:\n # evaluate sample variance by setting delta degrees of freedom (ddof) to\n # 1. The degree used in calculations is N - ddof\n stddev = numpy.std(data, ddof=1)\n # Get the endpoints of the range that contains 95% of the distribution\n t_bounds = scipy.stats.t.interval(0.95, N-1)\n #assert abs(t_bounds[0] + t_bounds[1]) < 1e-6\n # sum mean to the confidence interval\n ci = {\n 'abs': t_bounds[1] * stddev / sqrt(N),\n 'low': average + t_bounds[0] * stddev / sqrt(N),\n 'high': average + t_bounds[1] * stddev / sqrt(N)\n }\n else:\n stddev = 0\n ci = { 'abs': 0, 'low': average, 'high': average }\n if abs(stddev) > 0.0001 and abs(average) > 0.0001:\n ci['perc'] = t_bounds[1] * stddev / sqrt(N) / average * 100\n else:\n ci['perc'] = 0\n return { 'samples': N, 'average': average, 'median': median,\n 'stddev': stddev, 'min': low, 'max': high, 'ci': ci }\n\n\ndef add_category_total(entries, groups, category_prefix):\n group_data = { 'time': 0, 'count': 0 }\n for group_name, regexp in groups:\n if not group_name.startswith('Group-' + category_prefix): continue\n group_data['time'] += entries[group_name]['time']\n group_data['count'] += entries[group_name]['count']\n entries['Group-' + category_prefix + '-Total'] = group_data\n\n\ndef read_stats(path, domain, args):\n groups = [];\n if args.aggregate:\n groups = [\n ('Group-IC', re.compile(\".*IC_.*\")),\n ('Group-OptimizeBackground', re.compile(\".*OptimizeBackground.*\")),\n ('Group-Optimize',\n re.compile(\"StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*\")),\n ('Group-CompileBackground', re.compile(\"(.*CompileBackground.*)\")),\n ('Group-Compile', re.compile(\"(^Compile.*)|(.*_Compile.*)\")),\n ('Group-ParseBackground', re.compile(\".*ParseBackground.*\")),\n ('Group-Parse', re.compile(\".*Parse.*\")),\n ('Group-Callback', re.compile(\".*Callback.*\")),\n ('Group-API', re.compile(\".*API.*\")),\n ('Group-GC-Custom', re.compile(\"GC_Custom_.*\")),\n ('Group-GC-Background', re.compile(\".*GC.*BACKGROUND.*\")),\n ('Group-GC', re.compile(\"GC_.*|AllocateInTargetSpace\")),\n ('Group-JavaScript', re.compile(\"JS_Execution\")),\n ('Group-Runtime', re.compile(\".*\"))]\n with open(path, \"rt\") as f:\n # Process the whole file and sum repeating entries.\n entries = { 'Sum': {'time': 0, 'count': 0} }\n for group_name, regexp in groups:\n entries[group_name] = { 'time': 0, 'count': 0 }\n for line in f:\n line = line.strip()\n # Discard headers and footers.\n if not line: continue\n if line.startswith(\"Runtime Function\"): continue\n if line.startswith(\"====\"): continue\n if line.startswith(\"----\"): continue\n if line.startswith(\"URL:\"): continue\n if line.startswith(\"STATS:\"): continue\n # We have a regular line.\n fields = line.split()\n key = fields[0]\n time = float(fields[1].replace(\"ms\", \"\"))\n count = int(fields[3])\n if key not in entries: entries[key] = { 'time': 0, 'count': 0 }\n entries[key]['time'] += time\n entries[key]['count'] += count\n # We calculate the sum, if it's not the \"total\" line.\n if key != \"Total\":\n entries['Sum']['time'] += time\n entries['Sum']['count'] += count\n for group_name, regexp in groups:\n if not regexp.match(key): continue\n entries[group_name]['time'] += time\n entries[group_name]['count'] += count\n break\n # Calculate the V8-Total (all groups except Callback)\n group_data = { 'time': 0, 'count': 0 }\n for group_name, regexp in groups:\n if group_name == 'Group-Callback': continue\n group_data['time'] += entries[group_name]['time']\n group_data['count'] += entries[group_name]['count']\n entries['Group-Total-V8'] = group_data\n # Calculate the Parse-Total, Compile-Total and Optimize-Total groups\n add_category_total(entries, groups, 'Parse')\n add_category_total(entries, groups, 'Compile')\n add_category_total(entries, groups, 'Optimize')\n # Append the sums as single entries to domain.\n for key in entries:\n if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }\n domain[key]['time_list'].append(entries[key]['time'])\n domain[key]['count_list'].append(entries[key]['count'])\n\n\ndef print_stats(S, args):\n # Sort by ascending/descending time average, then by ascending/descending\n # count average, then by ascending name.\n def sort_asc_func(item):\n return (item[1]['time_stat']['average'],\n item[1]['count_stat']['average'],\n item[0])\n def sort_desc_func(item):\n return (-item[1]['time_stat']['average'],\n -item[1]['count_stat']['average'],\n item[0])\n # Sorting order is in the commend-line arguments.\n sort_func = sort_asc_func if args.sort == \"asc\" else sort_desc_func\n # Possibly limit how many elements to print.\n L = [item for item in sorted(S.items(), key=sort_func)\n if item[0] not in [\"Total\", \"Sum\"]]\n N = len(L)\n if args.limit == 0:\n low, high = 0, N\n elif args.sort == \"desc\":\n low, high = 0, args.limit\n else:\n low, high = N-args.limit, N\n # How to print entries.\n def print_entry(key, value):\n def stats(s, units=\"\"):\n conf = \"{:0.1f}({:0.2f}%)\".format(s['ci']['abs'], s['ci']['perc'])\n return \"{:8.1f}{} +/- {:15s}\".format(s['average'], units, conf)\n print(\"{:>50s} {} {}\".format(\n key,\n stats(value['time_stat'], units=\"ms\"),\n stats(value['count_stat'])\n ))\n # Print and calculate partial sums, if necessary.\n for i in range(low, high):\n print_entry(*L[i])\n if args.totals and args.limit != 0 and not args.aggregate:\n if i == low:\n partial = { 'time_list': [0] * len(L[i][1]['time_list']),\n 'count_list': [0] * len(L[i][1]['count_list']) }\n assert len(partial['time_list']) == len(L[i][1]['time_list'])\n assert len(partial['count_list']) == len(L[i][1]['count_list'])\n for j, v in enumerate(L[i][1]['time_list']):\n partial['time_list'][j] += v\n for j, v in enumerate(L[i][1]['count_list']):\n partial['count_list'][j] += v\n # Print totals, if necessary.\n if args.totals:\n print('-' * 80)\n if args.limit != 0 and not args.aggregate:\n partial['time_stat'] = statistics(partial['time_list'])\n partial['count_stat'] = statistics(partial['count_list'])\n print_entry(\"Partial\", partial)\n print_entry(\"Sum\", S[\"Sum\"])\n print_entry(\"Total\", S[\"Total\"])\n\n\ndef do_stats(args):\n domains = {}\n for path in args.logfiles:\n filename = os.path.basename(path)\n m = re.match(r'^([^#]+)(#.*)?$', filename)\n domain = m.group(1)\n if domain not in domains: domains[domain] = {}\n read_stats(path, domains[domain], args)\n if args.aggregate:\n create_total_page_stats(domains, args)\n for i, domain in enumerate(sorted(domains)):\n if len(domains) > 1:\n if i > 0: print()\n print(\"{}:\".format(domain))\n print('=' * 80)\n domain_stats = domains[domain]\n for key in domain_stats:\n domain_stats[key]['time_stat'] = \\\n statistics(domain_stats[key]['time_list'])\n domain_stats[key]['count_stat'] = \\\n statistics(domain_stats[key]['count_list'])\n print_stats(domain_stats, args)\n\n\n# Create a Total page with all entries summed up.\ndef create_total_page_stats(domains, args):\n total = {}\n def sum_up(parent, key, other):\n sums = parent[key]\n for i, item in enumerate(other[key]):\n if i >= len(sums):\n sums.extend([0] * (i - len(sums) + 1))\n if item is not None:\n sums[i] += item\n # Exclude adwords and speedometer pages from aggrigate total, since adwords\n # dominates execution time and speedometer is measured elsewhere.\n excluded_domains = ['adwords.google.com', 'speedometer-angular',\n 'speedometer-jquery', 'speedometer-backbone',\n 'speedometer-ember', 'speedometer-vanilla'];\n # Sum up all the entries/metrics from all non-excluded domains\n for domain, entries in domains.items():\n if domain in excluded_domains:\n continue;\n for key, domain_stats in entries.items():\n if key not in total:\n total[key] = {}\n total[key]['time_list'] = list(domain_stats['time_list'])\n total[key]['count_list'] = list(domain_stats['count_list'])\n else:\n sum_up(total[key], 'time_list', domain_stats)\n sum_up(total[key], 'count_list', domain_stats)\n # Add a new \"Total\" page containing the summed up metrics.\n domains['Total'] = total\n\n# Generate Raw JSON file.\n\ndef _read_logs(args):\n versions = {}\n for path in args.logdirs:\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n version = os.path.basename(root)\n if version not in versions: versions[version] = {}\n for filename in files:\n if filename.endswith(\".txt\"):\n m = re.match(r'^([^#]+)(#.*)?\\.txt$', filename)\n domain = m.group(1)\n if domain not in versions[version]: versions[version][domain] = {}\n read_stats(os.path.join(root, filename),\n versions[version][domain], args)\n\n return versions\n\ndef do_raw_json(args):\n versions = _read_logs(args)\n\n for version, domains in versions.items():\n if args.aggregate:\n create_total_page_stats(domains, args)\n for domain, entries in domains.items():\n raw_entries = []\n for name, value in entries.items():\n # We don't want the calculated sum in the JSON file.\n if name == \"Sum\": continue\n raw_entries.append({\n 'name': name,\n 'duration': value['time_list'],\n 'count': value['count_list'],\n })\n\n domains[domain] = raw_entries\n\n print(json.dumps(versions, separators=(',', ':')))\n\n\n# Generate JSON file.\n\ndef do_json(args):\n versions = _read_logs(args)\n\n for version, domains in versions.items():\n if args.aggregate:\n create_total_page_stats(domains, args)\n for domain, entries in domains.items():\n stats = []\n for name, value in entries.items():\n # We don't want the calculated sum in the JSON file.\n if name == \"Sum\": continue\n entry = [name]\n for x in ['time_list', 'count_list']:\n s = statistics(entries[name][x])\n entry.append(round(s['average'], 1))\n entry.append(round(s['ci']['abs'], 1))\n entry.append(round(s['ci']['perc'], 2))\n stats.append(entry)\n domains[domain] = stats\n print(json.dumps(versions, separators=(',', ':')))\n\n\n# Help.\n\ndef do_help(parser, subparsers, args):\n if args.help_cmd:\n if args.help_cmd in subparsers:\n subparsers[args.help_cmd].print_help()\n else:\n args.error(\"Unknown command '{}'\".format(args.help_cmd))\n else:\n parser.print_help()\n\n\n# Main program, parse command line and execute.\n\ndef coexist(*l):\n given = sum(1 for x in l if x)\n return given == 0 or given == len(l)\n\ndef main():\n parser = argparse.ArgumentParser()\n subparser_adder = parser.add_subparsers(title=\"commands\", dest=\"command\",\n metavar=\"<command>\")\n subparsers = {}\n # Command: run.\n subparsers[\"run\"] = subparser_adder.add_parser(\n \"run\", help=\"Replay websites and collect runtime stats data.\")\n subparsers[\"run\"].set_defaults(\n func=do_run, error=subparsers[\"run\"].error)\n subparsers[\"run\"].add_argument(\n \"--chrome-flags\", type=str, default=\"\",\n help=\"specify additional chrome flags\")\n subparsers[\"run\"].add_argument(\n \"--js-flags\", type=str, default=\"\",\n help=\"specify additional V8 flags\")\n subparsers[\"run\"].add_argument(\n \"-u\", \"--user-data-dir\", type=str, metavar=\"<path>\",\n help=\"specify user data dir (default is temporary)\")\n subparsers[\"run\"].add_argument(\n \"-c\", \"--with-chrome\", type=str, metavar=\"<path>\",\n default=\"/usr/bin/google-chrome\",\n help=\"specify chrome executable to use\")\n subparsers[\"run\"].add_argument(\n \"-r\", \"--retries\", type=int, metavar=\"<num>\",\n help=\"specify retries if website is down (default: forever)\")\n subparsers[\"run\"].add_argument(\n \"--no-url\", dest=\"print_url\", action=\"store_false\", default=True,\n help=\"do not include url in statistics file\")\n subparsers[\"run\"].add_argument(\n \"--domain\", type=str, default=\"\",\n help=\"specify the output file domain name\")\n subparsers[\"run\"].add_argument(\n \"-n\", \"--repeat\", type=int, metavar=\"<num>\",\n help=\"specify iterations for each website (default: once)\")\n\n def add_replay_args(subparser):\n subparser.add_argument(\n \"-k\", \"--refresh\", type=int, metavar=\"<num>\", default=0,\n help=\"specify refreshes for each iteration (default: 0)\")\n subparser.add_argument(\n \"--replay-wpr\", type=str, metavar=\"<path>\",\n help=\"use the specified web page replay (.wpr) archive\")\n subparser.add_argument(\n \"--replay-bin\", type=str, metavar=\"<path>\",\n help=\"specify the replay.py script typically located in \" \\\n \"$CHROMIUM/src/third_party/webpagereplay/replay.py\")\n subparser.add_argument(\n \"-f\", \"--sites-file\", type=str, metavar=\"<path>\",\n help=\"specify file containing benchmark websites\")\n subparser.add_argument(\n \"-t\", \"--timeout\", type=int, metavar=\"<seconds>\", default=60,\n help=\"specify seconds before chrome is killed\")\n subparser.add_argument(\n \"-p\", \"--port-offset\", type=int, metavar=\"<offset>\", default=0,\n help=\"specify the offset for the replay server's default ports\")\n subparser.add_argument(\n \"-l\", \"--log-stderr\", type=str, metavar=\"<path>\",\n help=\"specify where chrome's stderr should go (default: /dev/null)\")\n subparser.add_argument(\n \"--sites\", type=str, metavar=\"<URL>\", nargs=\"*\",\n help=\"specify benchmark website\")\n add_replay_args(subparsers[\"run\"])\n\n # Command: replay-server\n subparsers[\"replay\"] = subparser_adder.add_parser(\n \"replay\", help=\"Run the replay server for debugging purposes\")\n subparsers[\"replay\"].set_defaults(\n func=do_run_replay_server, error=subparsers[\"replay\"].error)\n add_replay_args(subparsers[\"replay\"])\n\n # Command: stats.\n subparsers[\"stats\"] = subparser_adder.add_parser(\n \"stats\", help=\"Analize the results file create by the 'run' command.\")\n subparsers[\"stats\"].set_defaults(\n func=do_stats, error=subparsers[\"stats\"].error)\n subparsers[\"stats\"].add_argument(\n \"-l\", \"--limit\", type=int, metavar=\"<num>\", default=0,\n help=\"limit how many items to print (default: none)\")\n subparsers[\"stats\"].add_argument(\n \"-s\", \"--sort\", choices=[\"asc\", \"desc\"], default=\"asc\",\n help=\"specify sorting order (default: ascending)\")\n subparsers[\"stats\"].add_argument(\n \"-n\", \"--no-total\", dest=\"totals\", action=\"store_false\", default=True,\n help=\"do not print totals\")\n subparsers[\"stats\"].add_argument(\n \"logfiles\", type=str, metavar=\"<logfile>\", nargs=\"*\",\n help=\"specify log files to parse\")\n subparsers[\"stats\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: json.\n subparsers[\"json\"] = subparser_adder.add_parser(\n \"json\", help=\"Collect results file created by the 'run' command into\" \\\n \"a single json file.\")\n subparsers[\"json\"].set_defaults(\n func=do_json, error=subparsers[\"json\"].error)\n subparsers[\"json\"].add_argument(\n \"logdirs\", type=str, metavar=\"<logdir>\", nargs=\"*\",\n help=\"specify directories with log files to parse\")\n subparsers[\"json\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: raw-json.\n subparsers[\"raw-json\"] = subparser_adder.add_parser(\n \"raw-json\", help=\"Collect raw results from 'run' command into\" \\\n \"a single json file.\")\n subparsers[\"raw-json\"].set_defaults(\n func=do_raw_json, error=subparsers[\"json\"].error)\n subparsers[\"raw-json\"].add_argument(\n \"logdirs\", type=str, metavar=\"<logdir>\", nargs=\"*\",\n help=\"specify directories with log files to parse\")\n subparsers[\"raw-json\"].add_argument(\n \"--aggregate\", dest=\"aggregate\", action=\"store_true\", default=False,\n help=\"Create aggregated entries. Adds Group-* entries at the toplevel. \" \\\n \"Additionally creates a Total page with all entries.\")\n\n # Command: help.\n subparsers[\"help\"] = subparser_adder.add_parser(\n \"help\", help=\"help information\")\n subparsers[\"help\"].set_defaults(\n func=lambda args: do_help(parser, subparsers, args),\n error=subparsers[\"help\"].error)\n subparsers[\"help\"].add_argument(\n \"help_cmd\", type=str, metavar=\"<command>\", nargs=\"?\",\n help=\"command for which to display help\")\n\n # Execute the command.\n args = parser.parse_args()\n setattr(args, 'script_path', os.path.dirname(sys.argv[0]))\n if args.command == \"run\" and coexist(args.sites_file, args.sites):\n args.error(\"use either option --sites-file or site URLs\")\n sys.exit(1)\n elif args.command == \"run\" and not coexist(args.replay_wpr, args.replay_bin):\n args.error(\"options --replay-wpr and --replay-bin must be used together\")\n sys.exit(1)\n else:\n args.func(args)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" ]
[ [ "numpy.min", "numpy.median", "numpy.max", "numpy.std", "numpy.average", "scipy.stats.t.interval" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jboilard1994/disentanglement_lib
[ "a64b8b9994a28fafd47ccd866b0318fa30a3c76c", "a64b8b9994a28fafd47ccd866b0318fa30a3c76c" ]
[ "disentanglement_lib/evaluation/metrics/beta_vae_test.py", "disentanglement_lib/evaluation/metrics/reduced_downstream_task.py" ]
[ "# coding=utf-8\n# Copyright 2018 The DisentanglementLib Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for beta_vae.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\nfrom disentanglement_lib.data.ground_truth import dummy_data\nfrom disentanglement_lib.evaluation.metrics import beta_vae\nimport numpy as np\n\n\nclass BetaVaeTest(absltest.TestCase):\n\n def test_metric(self):\n ground_truth_data = dummy_data.IdentityObservationsData()\n representation_function = lambda x: x\n random_state = np.random.RandomState(0)\n scores = beta_vae.compute_beta_vae_sklearn(\n ground_truth_data, representation_function, random_state, None, 5,\n 2000, 2000)\n self.assertBetween(scores[\"train_accuracy\"], 0.9, 1.0)\n self.assertBetween(scores[\"eval_accuracy\"], 0.9, 1.0)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n", "# coding=utf-8\n# Copyright 2018 The DisentanglementLib Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reduced downstream classification task.\n\nTest downstream performance after removing the k most predictive features for\neach factor of variation.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom disentanglement_lib.evaluation.metrics import dci\nfrom disentanglement_lib.evaluation.metrics import utils\nimport numpy as np\nfrom six.moves import range\nimport gin.tf\n\n\[email protected](\n \"reduced_downstream_task\",\n blacklist=[\"ground_truth_data\", \"representation_function\", \"random_state\",\n \"artifact_dir\"])\ndef compute_reduced_downstream_task(ground_truth_data,\n representation_function,\n random_state,\n artifact_dir=None,\n num_factors_to_remove=gin.REQUIRED,\n num_train=gin.REQUIRED,\n num_test=gin.REQUIRED,\n batch_size=16):\n \"\"\"Computes loss of a reduced downstream task.\n\n Measure the information leakage in each latent component after removing the\n k (\"factors_to_remove\") most informative features for the prediction task.\n\n Args:\n ground_truth_data: GroundTruthData to be sampled from.\n representation_function: Function that takes observations as input and\n outputs a dim_representation sized representation for each observation.\n random_state: Numpy random state used for randomness.\n artifact_dir: Optional path to directory where artifacts can be saved.\n num_factors_to_remove: Number of factors to remove from the latent\n representation.\n num_train: Number of points used for training.\n num_test: Number of points used for testing.\n batch_size: Batch size for sampling.\n\n Returns:\n Dictionary with scores.\n \"\"\"\n del artifact_dir\n scores = {}\n # Loop on different sizes of the training 'batch', as specified with gin.\n for train_size in num_train:\n size_string = str(train_size)\n mus_train, ys_train = utils.generate_batch_factor_code(\n ground_truth_data, representation_function, train_size, random_state,\n batch_size)\n mus_test, ys_test = utils.generate_batch_factor_code(\n ground_truth_data, representation_function, num_test, random_state,\n batch_size)\n # Create variables for aggregated scores.\n reduced_factor_train_scores = []\n other_factors_train_scores = []\n reduced_factor_test_scores = []\n other_factors_test_scores = []\n # Compute the reduced representation and test it for each factor of\n # variation.\n for factor_of_interest in range(ground_truth_data.num_factors):\n # Copy the training data and eliminate the k most informative factors.\n reduced_mus_train = mus_train.copy()\n reduced_mus_test = mus_test.copy()\n for _ in range(num_factors_to_remove):\n reduced_mus_train, reduced_mus_test =\\\n compute_reduced_representation(reduced_mus_train, ys_train,\n reduced_mus_test, ys_test,\n factor_of_interest)\n predictor_model = utils.make_predictor_fn()\n train_acc, test_acc = compute_predictive_accuracy(\n np.transpose(reduced_mus_train), ys_train,\n np.transpose(reduced_mus_test), ys_test, predictor_model)\n # Save scores for reduced factor.\n scores[size_string +\n \":reduced_factor_{}:mean_train_accuracy_reduced_factor\".format(\n factor_of_interest)] = train_acc[factor_of_interest]\n scores[size_string +\n \":reduced_factor_{}:mean_test_accuracy_reduced_factor\".format(\n factor_of_interest)] = test_acc[factor_of_interest]\n reduced_factor_train_scores.append(train_acc[factor_of_interest])\n reduced_factor_test_scores.append(test_acc[factor_of_interest])\n\n # Save the scores (accuracies) in the score dictionary.\n local_other_factors_train_scores = []\n local_other_factors_test_scores = []\n for i in range(len(train_acc)):\n scores[size_string +\n \":reduced_factor_{}:mean_train_accuracy_factor_{}\".format(\n factor_of_interest, i)] = train_acc[i]\n scores[size_string +\n \":reduced_factor_{}:mean_test_accuracy_factor_{}\".format(\n factor_of_interest, i)] = test_acc[i]\n if i != factor_of_interest:\n local_other_factors_train_scores.append(train_acc[i])\n local_other_factors_test_scores.append(test_acc[i])\n # Save mean score for non-reduced factors.\n scores[size_string +\n \":reduced_factor_{}:mean_train_accuracy_non_reduced_factor\".format(\n factor_of_interest)] = np.mean(\n local_other_factors_train_scores)\n scores[size_string +\n \":reduced_factor_{}:mean_test_accuracy_non_reduced_factor\".format(\n factor_of_interest)] = np.mean(local_other_factors_test_scores)\n other_factors_train_scores.append(\n np.mean(local_other_factors_train_scores))\n other_factors_test_scores.append(np.mean(local_other_factors_test_scores))\n\n # Compute the aggregate scores.\n scores[size_string + \":mean_train_accuracy_reduced_factor\"] = np.mean(\n reduced_factor_train_scores)\n scores[size_string + \":mean_test_accuracy_reduced_factor\"] = np.mean(\n reduced_factor_test_scores)\n scores[size_string + \":mean_train_accuracy_other_factors\"] = np.mean(\n other_factors_train_scores)\n scores[size_string + \":mean_test_accuracy_other_factors\"] = np.mean(\n other_factors_test_scores)\n return scores\n\n\[email protected](\"reduced_representation\")\ndef compute_reduced_representation(mus_train,\n ys_train,\n mus_test,\n ys_test,\n factor_of_interest,\n correlation_measure=gin.REQUIRED):\n \"\"\"Computes a reduced representation of the data.\n\n The most informative factor with respect to the labels is deleted.\n\n Args:\n mus_train: latent means of the training batch.\n ys_train: labels of the training batch.\n mus_test: latent means of the test batch.\n ys_test: labels of the test batch.\n factor_of_interest: index of the factor of interest.\n correlation_measure: measure of correlation.\n\n Returns:\n Tuple with reduced representations for the training and test set.\n \"\"\"\n importance_matrix = correlation_measure(mus_train, ys_train, mus_test,\n ys_test)\n factor_of_interest_importance = importance_matrix[:, factor_of_interest]\n factor_to_remove_index = np.argmax(factor_of_interest_importance)\n # Remove the factor of variation above from the representation\n reduced_representation_train = np.delete(\n mus_train.copy(), factor_to_remove_index, axis=0)\n reduced_representation_test = np.delete(\n mus_test.copy(), factor_to_remove_index, axis=0)\n return reduced_representation_train, reduced_representation_test\n\n\[email protected](\n \"factorwise_dci\",\n blacklist=[\"mus_train\", \"ys_train\", \"mus_test\", \"ys_test\"])\ndef compute_factorwise_dci(mus_train, ys_train, mus_test, ys_test):\n \"\"\"Computes the DCI importance matrix of the attributes.\n\n Args:\n mus_train: latent means of the training batch.\n ys_train: labels of the training batch.\n mus_test: latent means of the test batch.\n ys_test: labels of the test batch.\n\n Returns:\n Matrix with importance scores.\n \"\"\"\n importance_matrix, _, _ = dci.compute_importance_gbt(mus_train, ys_train,\n mus_test, ys_test)\n assert importance_matrix.shape[0] == mus_train.shape[0]\n assert importance_matrix.shape[1] == ys_train.shape[0]\n return importance_matrix\n\n\ndef compute_predictive_accuracy(x_train, y_train, x_test, y_test, predictor_fn):\n \"\"\"Computes average predictive accuracy for train and test set.\n\n Args:\n x_train: data x of the training batch.\n y_train: labels y of the training batch.\n x_test: data x of the test batch.\n y_test: labels y of the test batch.\n predictor_fn: function that is used to fit and predict the labels.\n\n Returns:\n Tuple with lists of training and test set accuracies.\n \"\"\"\n num_factors = y_train.shape[0]\n train_acc = []\n test_acc = []\n # Loop on the generative factors to predict\n for i in range(num_factors):\n model = predictor_fn()\n model.fit(x_train, y_train[i, :])\n train_acc.append(np.mean(model.predict(x_train) == y_train[i, :]))\n test_acc.append(np.mean(model.predict(x_test) == y_test[i, :]))\n return train_acc, test_acc\n" ]
[ [ "numpy.random.RandomState" ], [ "numpy.argmax", "numpy.mean", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HuiminHe/BugBot
[ "ac121a37ac0b4858e5ed3849062c9bfaa47cb0fa" ]
[ "test_box.py" ]
[ "from simulator import Simulator, Map, Agent\nfrom devices import Device\nimport numpy as np\nimport simulator_config\n\nenv = Simulator(simulator_config)\nmap = Map()\nmap.get_map_from_geom2d(env, kp=np.array([[-100, 100], [-100, -100], [100, -100], [100, 100]]))\n\nrobot = Agent(env, kp=np.array([[-2, 0], [2, 0]]), color=(1, 0, 0, 0.5), v_max=5)\nrobot.reset(init_state=np.array([0, 40, 0]))\ndevice = Device(env, parent=robot, kp=np.array([[-10, 0], [10, 0]]), color=[0, 1, 0, 1], filled=False)\nwhile True:\n robot.update(v=np.array([5, 0]))\n env._render()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OnsenTamagoYoshi/DeepLearningFromScratch
[ "006f80b63130829b142c04a88632287bcf5a61b0" ]
[ "ch04/gradient_2d.py" ]
[ "# -*- coding: utf-8 -*-\n# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef _numerical_gradient_no_batch(f, x):\n h = 1e-4 #0.0001\n grad = np.zeros_like(x) #xと同じ形状の配列を作成\n \n for idx in range(x.size):\n tmp_val = x[idx]\n #f(x + h)の計算\n x[idx] = tmp_val + h\n fxh1 = f(x)\n \n #f(x - h)の計算\n x[idx] = tmp_val - h\n fxh2 = f(x)\n \n grad[idx] = (fxh1 - fxh2) / (2 * h)\n x[idx] = tmp_val #値を元に戻す\n \n return grad\n\ndef numerical_gradient(f, X):\n if X.ndim == 1:\n return _numerical_gradient_no_batch(f, X)\n else:\n grad = np.zeros_like(X)\n \n for idx, x in enumerate(X):\n grad[idx] = _numerical_gradient_no_batch(f, x)\n \n return grad \n\ndef function_2(x):\n if x.ndim == 1:\n return x[0] ** 2 + x[1] ** 2 #または return np.sum(x**2)\n else:\n return np.sum(x ** 2, axis=1)\n \ndef tangent_line(f, x):\n d = numerical_gradient(f, x)\n print(d)\n y = f(x) - d * x\n return lambda t: d * t + y\n\nif __name__ == '__main__':\n x0 = np.arange(-2, 2.5, 0.25)\n x1 = np.arange(-2, 2.5, 0.25)\n X, Y = np.meshgrid(x0, x1)\n \n X = X.flatten()\n Y = Y.flatten()\n \n grad = numerical_gradient(function_2, np.array([X, Y]))\n \n plt.figure()\n plt.quiver(X, Y, -grad[0], -grad[1], angles=\"xy\", color=\"#666666\") #,headwidth=10,scale=40,color=\"#444444\")\n plt.xlim([-2, 2])\n plt.ylim([-2, 2])\n plt.xlabel('x0')\n plt.xlabel('x1')\n plt.grid()\n plt.legend()\n plt.draw()\n plt.show()\n" ]
[ [ "matplotlib.pylab.show", "matplotlib.pylab.grid", "matplotlib.pylab.xlim", "numpy.arange", "matplotlib.pylab.legend", "matplotlib.pylab.draw", "matplotlib.pylab.xlabel", "matplotlib.pylab.figure", "numpy.zeros_like", "matplotlib.pylab.ylim", "matplotlib.pylab.quiver", "numpy.array", "numpy.meshgrid", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JianhengHou/Medical-Sieve
[ "cafb69054ef98cf2f42229ff73c93b6796f9fa91" ]
[ "Medical_Sieve_Pipeline/Medical_Sieve_Model_Pipeline/estimator.py" ]
[ "from sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import hamming_loss\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom sklearn.metrics import roc_auc_score\nimport numpy as np\nimport copy\n\ndef combinations(nums):\n ans = [[]]\n for row in nums:\n curr = []\n for combination in ans:\n for element in row:\n new_combination = copy.deepcopy(combination)\n new_combination.append(element)\n curr.append(new_combination)\n ans = curr\n return ans\n\ndef f1(matrix):\n precision = matrix[1][1]*1.0 / (matrix[0][1] + matrix[1][1])\n recall = matrix[1][1]*1.0 / (matrix[1][0] + matrix[1][1])\n return 2*((precision*recall)/(precision+recall))\n\ndef model_evaluation(val_preds, aspect_vectors, thresholds_set):\n mlb_aspect = MultiLabelBinarizer()\n mlb_aspect.fit([aspect_vectors.columns.values.tolist()]) \n\n max_avg_f1 = 0\n max_hamming_score = 0\n max_exact_accuracy = 0\n max_fuzzy_accuracy = 0\n max_fuzzy_accuracy_pos = 0\n max_exact_accuracy_pos = 0\n max_avg_rocauc = 0\n max_confusion_matrix = None\n max_threshold_set = []\n\n for threshold_set in thresholds_set:\n predict_softmax = np.zeros(aspect_vectors.shape, dtype=int)\n for row_index, row in enumerate(val_preds):\n for index, each in enumerate(row):\n if each >= threshold_set[index]:\n predict_softmax[row_index][index] = 1\n\n hamming_score = 1 - hamming_loss(predict_softmax, aspect_vectors) \n num_fuzzy_match = 0\n num_fuzzy_match_pos = 0\n num_exact_match_pos = 0\n num_pos = 0\n for true, pre in zip(mlb_aspect.inverse_transform(aspect_vectors.values), mlb_aspect.inverse_transform(predict_softmax)):\n if len(true) != 0: \n num_pos += 1\n intersect = set(pre).intersection(set(true))\n if (len(true)>0 and len(pre)>0 and len(intersect) > 0) or (len(true) == 0 and len(pre) == 0):\n num_fuzzy_match += 1\n if len(true)>0 and len(pre)>0 and len(intersect) > 0:\n num_fuzzy_match_pos += 1\n if len(true)>0 and len(pre)>0 and pre == true: \n num_exact_match_pos += 1\n fuzzy_accuracy = num_fuzzy_match*1.0/len(predict_softmax)\n exact_accuracy = accuracy_score(predict_softmax, aspect_vectors)\n fuzzy_accuracy_pos = num_fuzzy_match_pos*1.0/num_pos\n exact_accuracy_pos = num_exact_match_pos*1.0/num_pos\n\n class_f1 = []\n for aspect, confusion_matrix in zip(mlb_aspect.classes_, multilabel_confusion_matrix(aspect_vectors, predict_softmax)):\n # print(aspect, ':',f1(confusion_matrix),'\\n', confusion_matrix, '\\n')\n class_f1.append(f1(confusion_matrix))\n \n rocauc_score = roc_auc_score(aspect_vectors, val_preds, 'weighted')\n if np.mean(class_f1) > max_avg_f1:\n max_threshold_set = threshold_set\n max_avg_f1 = max(max_avg_f1, np.mean(class_f1))\n max_hamming_score = hamming_score\n max_exact_accuracy = exact_accuracy\n max_fuzzy_accuracy = fuzzy_accuracy \n max_exact_accuracy_pos = exact_accuracy_pos\n max_fuzzy_accuracy_pos = fuzzy_accuracy_pos\n max_avg_rocauc = rocauc_score\n max_confusion_matrix = multilabel_confusion_matrix(aspect_vectors, predict_softmax)\n \n print(\"threshold set:\", max_threshold_set)\n print(\"Confusion Matrix for Each Aspect:\\n\" + \"=\"*60)\n print(max_confusion_matrix)\n print(\"Result of Metrics for Evaluation:\\n\" + \"=\"*60)\n print(\"Hamming score:\", max_hamming_score)\n print(\"Exact accuracy:\", max_exact_accuracy)\n print(\"Fuzzy accuracy:\", max_fuzzy_accuracy)\n print(\"Exact accuracy (exclude negative):\", max_exact_accuracy_pos )\n print(\"Fuzzy accuracy (exclude negative):\", max_fuzzy_accuracy_pos)\n print(\"Average F1 Score: \", max_avg_f1)\n print(\"ROC AUC Score: \", max_avg_rocauc)\n" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.metrics.hamming_loss", "sklearn.preprocessing.MultiLabelBinarizer", "sklearn.metrics.multilabel_confusion_matrix", "numpy.mean", "numpy.zeros", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mike0sv/catalyst
[ "54597a3b3d78e5b6c3084dfc3c28185600c79c90" ]
[ "catalyst/rl/scripts/run_samplers.py" ]
[ "#!/usr/bin/env python\n# isort:skip_file\n\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\n\nimport argparse # noqa E402\nimport atexit # noqa E402\nimport copy # noqa E402\nimport multiprocessing as mp # noqa E402\nimport time # noqa E402\n\nimport torch # noqa E402\ntorch.set_num_threads(1)\n\nfrom catalyst.rl.core import ( # noqa E402\n ExplorationHandler, Sampler, ValidSampler\n)\nfrom catalyst.rl.registry import ( # noqa E402\n DATABASES, ENVIRONMENTS, OFFPOLICY_ALGORITHMS, ONPOLICY_ALGORITHMS\n)\nfrom catalyst.rl.scripts.misc import ( # noqa E402\n OFFPOLICY_ALGORITHMS_NAMES, ONPOLICY_ALGORITHMS_NAMES\n)\nfrom catalyst.utils import ( # noqa E402\n boolean_flag, prepare_cudnn, set_global_seed\n)\nfrom catalyst.utils.config import parse_args_uargs # noqa E402\nfrom catalyst.utils.scripts import import_module # noqa E402\n\n\ndef build_args(parser):\n parser.add_argument(\n \"--config\",\n \"--configs\",\n \"-C\",\n nargs=\"+\",\n help=\"path to config/configs\",\n metavar=\"CONFIG_PATH\",\n dest=\"configs\",\n required=True\n )\n parser.add_argument(\"--expdir\", type=str, default=None)\n parser.add_argument(\"--logdir\", type=str, default=None)\n parser.add_argument(\"--resume\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=42)\n\n parser.add_argument(\"--train\", type=int, default=None)\n parser.add_argument(\"--valid\", type=int, default=None)\n parser.add_argument(\"--infer\", type=int, default=None)\n parser.add_argument(\"--vis\", type=int, default=None)\n\n boolean_flag(parser, \"check\", default=False)\n boolean_flag(parser, \"db\", default=True)\n\n parser.add_argument(\"--run-delay\", type=int, default=1)\n boolean_flag(parser, \"daemon\", default=True)\n parser.add_argument(\"--sampler-id\", type=int, default=0)\n\n boolean_flag(\n parser, \"deterministic\",\n default=None,\n help=\"Deterministic mode if running in CuDNN backend\"\n )\n boolean_flag(\n parser, \"benchmark\",\n default=None,\n help=\"Use CuDNN benchmark\"\n )\n\n return parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n build_args(parser)\n args, unknown_args = parser.parse_known_args()\n return args, unknown_args\n\n\ndef run_sampler(\n *,\n config,\n logdir,\n algorithm_fn,\n environment_fn,\n visualize,\n mode,\n seed=42,\n id=None,\n resume=None,\n db=True,\n exploration_power=1.0,\n sync_epoch=False\n):\n config_ = copy.deepcopy(config)\n id = 0 if id is None else id\n seed = seed + id\n set_global_seed(seed)\n\n db_server = DATABASES.get_from_params(\n **config.get(\"db\", {}), sync_epoch=sync_epoch\n ) if db else None\n\n env = environment_fn(\n **config_[\"environment\"],\n visualize=visualize,\n mode=mode,\n sampler_id=id,\n )\n agent = algorithm_fn.prepare_for_sampler(env_spec=env, config=config_)\n\n exploration_params = config_[\"sampler\"].pop(\"exploration_params\", None)\n exploration_handler = ExplorationHandler(env=env, *exploration_params) \\\n if exploration_params is not None \\\n else None\n if exploration_handler is not None:\n exploration_handler.set_power(exploration_power)\n\n seeds = dict(\n (k, config_[\"sampler\"].pop(f\"{k}_seeds\", None))\n for k in [\"train\", \"valid\", \"infer\"]\n )\n seeds = seeds[mode]\n\n if algorithm_fn in OFFPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"critic\" if env.discrete_actions else \"actor\"\n elif algorithm_fn in ONPOLICY_ALGORITHMS.values():\n weights_sync_mode = \"actor\"\n else:\n # @TODO: add registry for algorithms, trainers, samplers\n raise NotImplementedError()\n\n if mode in [\"valid\"]:\n sampler_fn = ValidSampler\n else:\n sampler_fn = Sampler\n\n monitoring_params = config.get(\"monitoring_params\", None)\n\n sampler = sampler_fn(\n agent=agent,\n env=env,\n db_server=db_server,\n exploration_handler=exploration_handler,\n logdir=logdir,\n id=id,\n mode=mode,\n weights_sync_mode=weights_sync_mode,\n sampler_seed=seed,\n trajectory_seeds=seeds,\n monitoring_params=monitoring_params,\n **config_[\"sampler\"],\n )\n\n if resume is not None:\n sampler.load_checkpoint(filepath=resume)\n\n sampler.run()\n\n\ndef main(args, unknown_args):\n args, config = parse_args_uargs(args, unknown_args)\n set_global_seed(args.seed)\n prepare_cudnn(args.deterministic, args.benchmark)\n\n args.vis = args.vis or 0\n args.infer = args.infer or 0\n args.valid = args.valid or 0\n args.train = args.train or 0\n\n if args.expdir is not None:\n module = import_module(expdir=args.expdir) # noqa: F841\n\n environment_name = config[\"environment\"].pop(\"environment\")\n environment_fn = ENVIRONMENTS.get(environment_name)\n\n algorithm_name = config[\"algorithm\"].pop(\"algorithm\")\n\n if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = OFFPOLICY_ALGORITHMS\n sync_epoch = False\n elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:\n ALGORITHMS = ONPOLICY_ALGORITHMS\n sync_epoch = True\n else:\n raise NotImplementedError()\n\n algorithm_fn = ALGORITHMS.get(algorithm_name)\n\n processes = []\n sampler_id = args.sampler_id\n\n def on_exit():\n for p in processes:\n p.terminate()\n\n atexit.register(on_exit)\n\n params = dict(\n seed=args.seed,\n logdir=args.logdir,\n algorithm_fn=algorithm_fn,\n environment_fn=environment_fn,\n config=config,\n resume=args.resume,\n db=args.db,\n sync_epoch=sync_epoch\n )\n\n if args.check:\n mode = \"train\"\n mode = \"valid\" if (args.valid is not None and args.valid > 0) else mode\n mode = \"infer\" if (args.infer is not None and args.infer > 0) else mode\n params_ = dict(\n visualize=(args.vis is not None and args.vis > 0),\n mode=mode,\n id=sampler_id\n )\n run_sampler(**params, **params_)\n return\n\n for i in range(args.vis):\n params_ = dict(\n visualize=True, mode=\"infer\", id=sampler_id, exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.infer):\n params_ = dict(\n visualize=False,\n mode=\"infer\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(args.valid):\n params_ = dict(\n visualize=False,\n mode=\"valid\",\n id=sampler_id,\n exploration_power=0.0\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for i in range(1, args.train + 1):\n exploration_power = i / args.train\n params_ = dict(\n visualize=False,\n mode=\"train\",\n id=sampler_id,\n exploration_power=exploration_power\n )\n p = mp.Process(\n target=run_sampler,\n kwargs=dict(**params, **params_),\n daemon=args.daemon,\n )\n p.start()\n processes.append(p)\n sampler_id += 1\n time.sleep(args.run_delay)\n\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n args, unknown_args = parse_args()\n main(args, unknown_args)\n" ]
[ [ "torch.set_num_threads" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
todd-deshane/aihwkit
[ "07269e29731f9a6482d25326400437f6bef2fc94", "07269e29731f9a6482d25326400437f6bef2fc94", "07269e29731f9a6482d25326400437f6bef2fc94" ]
[ "src/aihwkit/nn/modules/linear_mapped.py", "src/aihwkit/nn/functions.py", "examples/20_mnist_ddp.py" ]
[ "# -*- coding: utf-8 -*-\n\n# (C) Copyright 2020, 2021 IBM. All Rights Reserved.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Analog mapped layers.\"\"\"\n\nfrom typing import Optional, Tuple, List\n\nfrom torch import Tensor, cat, split, no_grad\nfrom torch.nn import Linear\n\nfrom aihwkit.nn.functions import AnalogFunction\nfrom aihwkit.nn.modules.base import AnalogModuleBase, RPUConfigAlias\nfrom aihwkit.simulator.configs import SingleRPUConfig\nfrom aihwkit.exceptions import ModuleError\n\n\nclass AnalogLinearMapped(AnalogModuleBase, Linear):\n \"\"\"Linear layer that uses an analog tile.\n\n Linear layer that uses an analog tile during its forward, backward\n and update passes. In contrast to\n :class:`~aihwkit.bb.modules.linear.Linear` the maximal in and/or\n out dimension can be restricted, in which case the linear layer is\n split into multiple parts and computed on multiple tiles of given\n max sizes.\n\n In contrast to :class:`~aihwkit.bb.modules.linear.Linear`, the\n bias vector (if requested) is always handled in digital (floating\n point).\n\n Note:\n Mapping is controlled by the :class:`aihwkit.simulator.configs.utils.MappingParameter`.\n\n Note:\n The tensor parameters of this layer (``.weight`` and ``.bias``) are not\n guaranteed to contain the same values as the internal weights and biases\n stored in the analog tile. Please use ``set_weights`` and\n ``get_weights`` when attempting to read or modify the weight/bias. This\n read/write process can simulate the (noisy and inexact) analog writing\n and reading of the resistive elements.\n\n Args:\n in_features: input vector size (number of columns).\n out_features: output vector size (number of rows).\n rpu_config: resistive processing unit configuration.\n bias: whether to use a bias row on the analog tile or not\n realistic_read_write: whether to enable realistic read/write\n for setting initial weights and read out of weights\n weight_scaling_omega: the weight value where the max\n weight will be scaled to. If zero, no weight scaling will\n be performed\n \"\"\"\n # pylint: disable=abstract-method, too-many-locals, too-many-instance-attributes\n\n __constants__ = ['in_features', 'out_features', 'realistic_read_write', 'weight_scaling_omega',\n 'digital_bias', 'analog_bias', 'use_bias']\n in_features: int\n out_features: int\n realistic_read_write: bool\n weight_scaling_omega: float\n digital_bias: bool\n analog_bias: bool\n use_bias: bool\n in_sizes: List[int]\n out_sizes: List[int]\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n rpu_config: Optional[RPUConfigAlias] = None,\n realistic_read_write: bool = False,\n weight_scaling_omega: float = 0.0,\n ):\n\n # Call super() after tile creation, including ``reset_parameters``.\n Linear.__init__(self, in_features, out_features, bias=bias)\n\n # Create tiles\n if rpu_config is None:\n rpu_config = SingleRPUConfig()\n\n AnalogModuleBase.__init__(\n self,\n in_features,\n out_features,\n bias,\n realistic_read_write,\n weight_scaling_omega,\n rpu_config.mapping\n )\n if self.analog_bias:\n raise ModuleError(\"AnalogLinearMapped only supports digital bias.\")\n\n # More than one tile may need to be created. If so, divide\n # weight matrix into equal pieces along input dimension with\n # as many tiles as needed\n max_input_size = rpu_config.mapping.max_input_size\n max_output_size = rpu_config.mapping.max_output_size\n\n self.in_sizes = self.get_split_sizes(in_features, max_input_size)\n self.out_sizes = self.get_split_sizes(out_features, max_output_size)\n\n self.analog_tile_array = []\n for i, in_tile_size in enumerate(self.in_sizes):\n in_tiles = []\n for j, out_tile_size in enumerate(self.out_sizes):\n tile = rpu_config.tile_class(out_tile_size,\n in_tile_size,\n rpu_config,\n bias=self.analog_bias)\n self.register_analog_tile(tile, name=f\"{i}_{j}\")\n in_tiles.append(tile)\n self.analog_tile_array.append(in_tiles)\n\n # Set weights from the reset_parameters\n self.set_weights(self.weight, self.bias)\n\n # Unregister weight/bias as a parameter but keep for sync\n self.unregister_parameter('weight')\n\n if self.analog_bias:\n self.unregister_parameter('bias')\n\n def get_split_sizes(self, size: int, split_max_size: int) -> List[int]:\n \"\"\" Computed the split sizes.\n\n Args:\n size: number of elements of the layer in one dimension\n split_max_size: max size of the split\n\n Returns:\n List of split sizes\n \"\"\"\n if split_max_size <= 0:\n return [size]\n\n n_splits = (size + split_max_size - 1) // split_max_size\n base, extra = divmod(size, n_splits)\n return [base + (i < extra) for i in range(n_splits)]\n\n def set_weights(\n self,\n weight: Tensor,\n bias: Optional[Tensor] = None,\n force_exact: bool = False\n ) -> None:\n \"\"\"Set the weight (and bias) with given Tensors.\n\n This uses an realistic write if the property ``realistic_read_write``\n of the layer is set, unless it is overwritten by ``force_exact``. It\n uses a scaled write if ``weight_scaling_omega`` is positive (see\n :meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`).\n\n Note:\n This is the recommended way for setting the weight/bias matrix of\n the analog tile, as it will correctly store the weights into the\n internal memory. Directly writing to ``self.weight`` and\n ``self.bias`` might yield wrong results as they are not always in\n sync with the analog tile Parameters, for performance reasons.\n\n Args:\n weight: weight matrix\n bias: bias vector\n force_exact: forces an exact write to the analog tiles\n\n \"\"\"\n shape = [self.out_features, self.in_features]\n weight = weight.clone().reshape(shape)\n\n realistic = self.realistic_read_write and not force_exact\n\n in_start = in_end = 0\n for in_size, in_tiles in zip(self.in_sizes, self.analog_tile_array):\n in_end += in_size\n out_start = out_end = 0\n for out_size, analog_tile in zip(self.out_sizes, in_tiles):\n out_end += out_size\n\n tile_weight = weight[out_start:out_end, in_start:in_end]\n\n if self.weight_scaling_omega > 0.0:\n analog_tile.set_weights_scaled(tile_weight, None,\n realistic=realistic,\n omega=self.weight_scaling_omega)\n else:\n analog_tile.set_weights(tile_weight, None, realistic=realistic)\n\n out_start = out_end\n in_start = in_end\n\n if self.digital_bias and bias is not None:\n with no_grad():\n self.bias.data[:] = bias[:]\n\n self._sync_weights_from_tile()\n\n def get_weights(self, force_exact: bool = False) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Get the weight (and bias) tensors.\n\n This uses an realistic read if the property ``realistic_read_write`` of\n the layer is set, unless it is overwritten by ``force_exact``. It\n scales the analog weights by the digital alpha scale if\n ``weight_scaling_omega`` is positive (see\n :meth:`~aihwkit.simulator.tiles.base.BaseTile.get_weights_scaled`).\n\n Note:\n This is the recommended way for setting the weight/bias matrix from\n the analog tile, as it will correctly fetch the weights from the\n internal memory. Accessing ``self.weight`` and ``self.bias`` might\n yield wrong results as they are not always in sync with the\n analog tile library, for performance reasons.\n\n Args:\n force_exact: forces an exact read to the analog tiles\n\n Returns:\n tuple: weight matrix, bias vector\n\n \"\"\"\n\n realistic = self.realistic_read_write and not force_exact\n\n weight_lst = []\n for in_tiles in self.analog_tile_array:\n in_tile_weight = []\n for analog_tile in in_tiles:\n if self.weight_scaling_omega > 0.0:\n tile_weight, _ = analog_tile.get_weights_scaled(realistic=realistic)\n else:\n tile_weight, _ = analog_tile.get_weights(realistic=realistic)\n in_tile_weight.append(tile_weight)\n weight_lst.append(cat(in_tile_weight, 0))\n\n weight = cat(weight_lst, 1)\n\n if self.digital_bias:\n with no_grad():\n return weight, self.bias.data.detach().cpu()\n return weight, None\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters (weight and bias).\"\"\"\n super().reset_parameters()\n if self.analog_tile_count():\n self.set_weights(self.weight, self.bias)\n\n def forward(self, x_input: Tensor) -> Tensor:\n \"\"\"Compute the forward pass.\"\"\"\n # pylint: disable=arguments-differ,arguments-renamed\n\n if self.analog_tile_count() == 1:\n out = AnalogFunction.apply(\n self.analog_tile_array[0][0].get_analog_ctx(), x_input,\n self.analog_tile_array[0][0].shared_weights, not self.training)\n\n if self.digital_bias:\n return out + self.bias\n return out\n\n # mapped version\n last_dim = x_input.ndim - 1\n splits = split(x_input, self.in_sizes, dim=last_dim)\n result = None # type: Tensor\n for idx, (x, in_tiles) in enumerate(zip(splits, self.analog_tile_array)):\n out_result = []\n\n for analog_tile in in_tiles:\n output = AnalogFunction.apply(\n analog_tile.get_analog_ctx(), x,\n analog_tile.shared_weights, not self.training)\n out_result.append(output)\n\n if idx == 0:\n result = cat(out_result, last_dim)\n else:\n result.add_(cat(out_result, last_dim))\n\n # add bias to final result\n if self.digital_bias:\n return result.add_(self.bias)\n return result\n\n def extra_repr(self) -> str:\n \"\"\"Set the extra representation of the module.\n\n Returns:\n A string with the extra representation.\n \"\"\"\n output = AnalogModuleBase.extra_repr(self)\n output += ', mapping={}'.format((len(self.in_sizes), len(self.out_sizes)))\n\n return output\n\n @classmethod\n def from_digital(\n cls,\n module: Linear,\n rpu_config: Optional[RPUConfigAlias] = None,\n realistic_read_write: bool = False,\n weight_scaling_omega: float = 0.0,\n ) -> 'AnalogLinearMapped':\n \"\"\"Return an AnalogLinearMapped layer from a torch Linear layer.\n\n Args:\n module: The torch module to convert. All layers that are\n defined in the ``conversion_map``.\n rpu_config: RPU config to apply to all converted tiles.\n Applied to all converted tiles.\n realistic_read_write: Whether to use closed-loop programming\n when setting the weights. Applied to all converted tiles.\n weight_scaling_omega: If non-zero, the analog weights will be\n scaled by ``weight_scaling_omega`` divided by the absolute\n maximum value of the original weight matrix.\n\n Note:\n Make sure that the weight max and min settings of the\n device support the desired analog weight range.\n\n Returns:\n an AnalogLinearMapped layer based on the digital Linear ``module``.\n \"\"\"\n analog_module = cls(module.in_features,\n module.out_features,\n module.bias is not None,\n rpu_config,\n realistic_read_write,\n weight_scaling_omega,\n )\n\n analog_module.set_weights(module.weight, module.bias)\n return analog_module\n", "# -*- coding: utf-8 -*-\n\n# (C) Copyright 2020, 2021 IBM. All Rights Reserved.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Autograd functions for aihwkit.\"\"\"\n\nfrom typing import Any, Optional, Tuple\n\nfrom torch import Tensor, empty_like\nfrom torch.autograd import Function\nfrom aihwkit.optim.context import AnalogContext\n\n\nclass AnalogFunctionBase(Function):\n \"\"\"Base function for analog functions.\"\"\"\n # pylint: disable=arguments-differ, protected-access, abstract-method\n\n @staticmethod\n def forward(\n ctx: Any,\n analog_ctx: AnalogContext,\n input_: Tensor,\n shared_weights: Optional[Tensor] = None,\n is_test: bool = False) -> Tensor:\n \"\"\"Execute the forward pass in the analog tile.\n\n Note: Indexed versions can used when analog_ctx.use_indexed is\n set to True.\n \"\"\"\n # Store in context for using during `backward()`.\n analog_tile = analog_ctx.analog_tile\n ctx.analog_ctx = analog_ctx\n ctx.shared_weights = None\n ctx.save_for_backward(input_)\n\n use_indexed = analog_ctx.use_indexed\n if shared_weights is not None:\n ctx.shared_weights = shared_weights\n analog_tile.ensure_shared_weights(shared_weights)\n analog_ctx.use_torch_update = True\n else:\n analog_ctx.use_torch_update = False\n\n # Invoke the forward pass in the tile instance.\n if use_indexed:\n return analog_tile.forward_indexed(input_, is_test)\n return analog_tile.forward(input_, is_test)\n\n @staticmethod\n def backward(\n ctx: Any,\n grad_output: Tensor,\n ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]]:\n \"\"\"Execute the backward pass in the analog tile.\"\"\"\n analog_ctx = ctx.analog_ctx\n analog_tile = analog_ctx.analog_tile\n input_, = ctx.saved_tensors\n\n shared_weights_grad = None\n use_indexed = analog_ctx.use_indexed\n\n if ctx.shared_weights is not None:\n analog_tile.ensure_shared_weights(ctx.shared_weights)\n\n # Call the backward function in the tile instance.\n if use_indexed:\n grad_input = analog_tile.backward_indexed(grad_output)\n else:\n grad_input = analog_tile.backward(grad_output)\n\n if analog_ctx.use_torch_update:\n # Grad computed directly (for inference training)\n shared_weights_grad = empty_like(ctx.shared_weights)\n analog_tile.set_delta_weights(shared_weights_grad)\n if use_indexed:\n analog_tile.update_indexed(input_, grad_output)\n else:\n analog_tile.update(input_, grad_output)\n analog_tile.reset_delta_weights()\n else:\n # Store activation and errors for optimizer (for analog training)\n analog_ctx.analog_input.append(input_)\n analog_ctx.analog_grad_output.append(grad_output)\n\n return None, grad_input, shared_weights_grad, None\n\n\nclass AnalogFunction(AnalogFunctionBase):\n \"\"\"Function that delegates into a `RPU` unit.\"\"\"\n # pylint: disable=arguments-differ, abstract-method\n\n @staticmethod\n def forward(\n ctx: Any,\n analog_ctx: AnalogContext,\n input_: Tensor,\n shared_weights: Optional[Tensor] = None,\n is_test: bool = False) -> Tensor:\n \"\"\"Execute the forward pass in the analog tile.\"\"\"\n analog_ctx.use_indexed = False\n return AnalogFunctionBase.forward(\n ctx, analog_ctx, input_, shared_weights, is_test)\n\n\nclass AnalogIndexedFunction(AnalogFunctionBase):\n \"\"\"Function that delegates into a `RPU` unit to use the indexed forward/backward/update.\"\"\"\n # pylint: disable=arguments-differ, abstract-method\n\n @staticmethod\n def forward(\n ctx: Any,\n analog_ctx: AnalogContext,\n input_: Tensor,\n shared_weights: Optional[Tensor] = None,\n is_test: bool = False) -> Tensor:\n \"\"\"Execute the forward pass in the analog tile.\"\"\"\n analog_ctx.use_indexed = True\n return AnalogFunctionBase.forward(\n ctx, analog_ctx, input_, shared_weights, is_test)\n", "# -*- coding: utf-8 -*-\n\n# (C) Copyright 2020, 2021 IBM. All Rights Reserved.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"aihwkit example 20: MNIST training with PyTorch Distributed Data Parallel (DDP).\n\nMNIST training example based on the paper:\nhttps://www.frontiersin.org/articles/10.3389/fnins.2016.00333/full\n\nUses learning rates of η = 0.01, 0.005, and 0.0025\nfor epochs 0–10, 11–20, and 21–30, respectively.\n\"\"\"\n# pylint: disable=invalid-name\n# pylint: disable=too-many-locals\n\nimport os\nfrom time import time\n\n# Imports from PyTorch.\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.optim.lr_scheduler import StepLR\n\nfrom torchvision import datasets, transforms\n\n\n# Imports from aihwkit.\nfrom aihwkit.nn import AnalogLinear, AnalogLinearMapped, AnalogSequential\nfrom aihwkit.optim import AnalogSGD\nfrom aihwkit.simulator.configs import InferenceRPUConfig\n\n# Check device\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Path where the datasets will be stored.\nPATH_DATASET = os.path.join('data', 'DATASET')\n\n# Network definition.\nINPUT_SIZE = 784\nHIDDEN_SIZES = [256, 128]\nOUTPUT_SIZE = 10\n\n# Training parameters.\nEPOCHS = 30\nBATCH_SIZE = 64\n\n\ndef init_process(rank, size, fn, backend='nccl'):\n \"\"\" Initialize the distributed environment. \"\"\"\n print(\"init process: \", rank)\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '29411'\n dist.init_process_group(backend, rank=rank, world_size=size)\n fn()\n\n\ndef cleanup():\n \"\"\" Destroy distributed processes once they are complete. \"\"\"\n dist.destroy_process_group()\n\n\ndef load_images():\n \"\"\"Load images for train from the torchvision datasets.\"\"\"\n rank = dist.get_rank()\n size = dist.get_world_size()\n transform = transforms.Compose([transforms.ToTensor()])\n\n # Load the images.\n train_set = datasets.MNIST(PATH_DATASET,\n download=True, train=True, transform=transform)\n\n val_set = datasets.MNIST(PATH_DATASET,\n download=True, train=False, transform=transform)\n\n train_sampler = torch.utils.data.DistributedSampler(train_set, num_replicas=size, rank=rank,\n shuffle=True, seed=42)\n\n train_data = torch.utils.data.DataLoader(train_set,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=size,\n sampler=train_sampler,\n pin_memory=True)\n\n validation_data = torch.utils.data.DataLoader(val_set,\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=size,\n pin_memory=True)\n\n return train_data, validation_data\n\n\ndef create_analog_network(input_size, hidden_sizes, output_size):\n \"\"\"Create the neural network using analog and digital layers.\n\n Args:\n input_size (int): size of the Tensor at the input.\n hidden_sizes (list): list of sizes of the hidden layers (2 layers).\n output_size (int): size of the Tensor at the output.\n\n Returns:\n nn.Module: created analog model\n \"\"\"\n model = AnalogSequential(\n AnalogLinear(input_size, hidden_sizes[0], True,\n rpu_config=InferenceRPUConfig()),\n nn.Sigmoid(),\n AnalogLinear(hidden_sizes[0], hidden_sizes[1], True,\n rpu_config=InferenceRPUConfig()),\n nn.Sigmoid(),\n AnalogLinearMapped(hidden_sizes[1], output_size, True,\n rpu_config=InferenceRPUConfig()),\n nn.LogSoftmax(dim=1)\n )\n\n return model\n\n\ndef create_sgd_optimizer(model):\n \"\"\"Create the analog-aware optimizer.\n\n Args:\n model (nn.Module): model to be trained.\n Returns:\n nn.Module: optimizer\n \"\"\"\n optimizer = AnalogSGD(model.parameters(), lr=0.05)\n optimizer.regroup_param_groups(model)\n\n return optimizer\n\n\ndef train(model, train_set):\n \"\"\"Train the network.\n\n Args:\n model (nn.Module): model to be trained.\n train_set (DataLoader): dataset of elements to use as input for training.\n \"\"\"\n rank = dist.get_rank()\n size = dist.get_world_size()\n device = torch.device('cuda', rank)\n\n classifier = nn.NLLLoss()\n optimizer = create_sgd_optimizer(model)\n scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n\n time_init = time()\n total_time = [torch.zeros(1, dtype=torch.float).to(device) for _ in range(size)]\n for epoch_number in range(EPOCHS):\n total_loss = torch.zeros(1, dtype=torch.float).to(device)\n total_images = torch.zeros(1, dtype=torch.int).to(device)\n for images, labels in train_set:\n images = images.to(device)\n labels = labels.to(device)\n # Flatten MNIST images into a 784 vector.\n images = images.view(images.shape[0], -1)\n\n optimizer.zero_grad()\n # Add training Tensor to the model (input).\n output = model(images)\n loss = classifier(output, labels)\n\n # Run training (backward propagation).\n loss.backward()\n\n # Optimize weights.\n optimizer.step()\n\n total_images += labels.size(0)\n total_loss += loss.item() * labels.size(0)\n\n dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)\n dist.all_reduce(total_images, op=dist.ReduceOp.SUM)\n\n if rank == 0:\n train_loss = total_loss.item() / total_images.item()\n print('Epoch {} - Training loss: {:.16f}'.format(epoch_number, train_loss))\n\n # Decay learning rate if needed.\n scheduler.step()\n\n dist.all_gather(total_time, torch.tensor(time()-time_init).to(device))\n\n if rank == 0:\n avg_train_time = torch.mean(torch.cat(total_time, 0))\n print('\\nAverage Training Time (s) = {}'.format(avg_train_time))\n\n\ndef test_evaluation(model, val_set):\n \"\"\"Test trained network\n\n Args:\n model (nn.Model): Trained model to be evaluated\n val_set (DataLoader): Validation set to perform the evaluation\n \"\"\"\n rank = dist.get_rank()\n size = dist.get_world_size()\n device = torch.device('cuda', rank)\n\n # Setup counter of images predicted to 0.\n predicted_ok = 0\n total_images = 0\n\n # make list to collect test ccuracies for each gpu\n acc_list = [torch.zeros(1, dtype=torch.float).to(device) for _ in range(size)]\n\n model.eval()\n\n for images, labels in val_set:\n # Predict image.\n images = images.to(device)\n labels = labels.to(device)\n\n images = images.view(images.shape[0], -1)\n pred = model(images)\n\n _, predicted = torch.max(pred.data, 1)\n total_images += labels.size(0)\n predicted_ok += (predicted == labels).sum().item()\n\n dist.all_gather(acc_list, torch.tensor(predicted_ok/total_images).to(device))\n\n if rank == 0:\n acc = torch.mean(torch.cat(acc_list, 0))\n print('\\nNumber Of Images Tested = {}'.format(total_images))\n print('Model Accuracy = {}'.format(acc))\n\n\ndef main():\n \"\"\"Train a PyTorch analog model with the MNIST dataset.\"\"\"\n rank = dist.get_rank()\n device = torch.device('cuda', rank)\n\n # Load datasets.\n train_dataset, validation_dataset = load_images()\n\n # Prepare the model.\n model = create_analog_network(INPUT_SIZE, HIDDEN_SIZES, OUTPUT_SIZE)\n\n if rank == 0:\n print(model)\n\n model.prepare_for_ddp()\n model.to(device)\n\n # enable parallel training\n model = DDP(model, device_ids=[rank], output_device=rank)\n\n # Train the model.\n train(model, train_dataset)\n\n # Evaluate the trained model.\n test_evaluation(model, validation_dataset)\n\n cleanup()\n\n\nif __name__ == '__main__':\n # Execute only if run as the entry point into the program\n world_size = 2\n print(\"Device count: \", world_size)\n processes = []\n ctx = mp.get_context(\"spawn\")\n\n for world_rank in range(world_size):\n print(\"Process: \", world_rank)\n p = ctx.Process(target=init_process, args=(world_rank, world_size, main))\n p.start()\n processes.append(p)\n\n for p in processes:\n p.join()\n" ]
[ [ "torch.split", "torch.no_grad", "torch.nn.Linear.__init__", "torch.cat" ], [ "torch.empty_like" ], [ "torch.max", "torch.cat", "torch.zeros", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.utils.data.DistributedSampler", "torch.distributed.init_process_group", "torch.nn.Sigmoid", "torch.tensor", "torch.optim.lr_scheduler.StepLR", "torch.nn.NLLLoss", "torch.nn.LogSoftmax", "torch.distributed.destroy_process_group", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "torch.multiprocessing.get_context", "torch.distributed.all_reduce" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DanielJMaher/compliance-checker
[ "944220a4a7bd0e945d7b4e468ffb524af5eca5b2" ]
[ "compliance_checker/tests/test_suite.py" ]
[ "from pkg_resources import resource_filename\nfrom compliance_checker.suite import CheckSuite\nfrom compliance_checker.base import Result, BaseCheck\nimport numpy as np\nimport unittest\nimport os\n\nstatic_files = {\n '2dim' : resource_filename('compliance_checker', 'tests/data/2dim-grid.nc'),\n 'bad_region' : resource_filename('compliance_checker', 'tests/data/bad_region.nc'),\n 'bad_data_type' : resource_filename('compliance_checker', 'tests/data/bad_data_type.nc'),\n 'test_cdl' : resource_filename('compliance_checker', 'tests/data/test_cdl.cdl'),\n 'test_cdl_nc' : resource_filename('compliance_checker', 'tests/data/test_cdl_nc_file.nc'),\n}\n\n\nclass TestSuite(unittest.TestCase):\n # @see\n # http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/\n\n def shortDescription(self):\n return None\n\n # override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests\n # ion.module:TestClassName.test_function_name\n def __repr__(self):\n name = self.id()\n name = name.split('.')\n if name[0] not in [\"ion\", \"pyon\"]:\n return \"%s (%s)\" % (name[-1], '.'.join(name[:-1]))\n else:\n return \"%s ( %s )\" % (name[-1], '.'.join(name[:-2]) + \":\" + '.'.join(name[-2:]))\n __str__ = __repr__\n\n def test_suite(self):\n # BWA: what's the purpose of this test? Just to see if the suite\n # runs without errors?\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['2dim'])\n cs.run(ds, 'acdd')\n\n def test_unicode_formatting(self):\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['bad_region'])\n score_groups = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in score_groups.items():\n groups, errors = rpair\n score_list, points, out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, points, out_of)\n\n def test_skip_checks(self):\n \"\"\"Tests that checks are properly skipped when specified\"\"\"\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['2dim'])\n # exclude title from the check attributes\n score_groups = cs.run(ds, ['check_high'], 'acdd')\n assert all(sg.name not in {'Conventions', 'title', 'keywords',\n 'summary'} for sg in score_groups['acdd'][0])\n\n def test_group_func(self):\n # This is checking for issue #183, where group_func results in\n # IndexError: list index out of range\n cs = CheckSuite()\n cs.load_all_available_checkers()\n ds = cs.load_dataset(static_files['bad_data_type'])\n score_groups = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in score_groups.items():\n groups, errors = rpair\n score_list, points, out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, points, out_of)\n\n def test_score_grouping(self):\n # Testing the grouping of results for output, which can fail\n # if some assumptions are not met, e.g. if a Result object has\n # a value attribute of unexpected type\n cs = CheckSuite()\n res = [\n Result(BaseCheck.MEDIUM, True, 'one'),\n Result(BaseCheck.MEDIUM, (1, 3), 'one'),\n Result(BaseCheck.MEDIUM, None, 'one'),\n Result(BaseCheck.MEDIUM, True, 'two'),\n Result(BaseCheck.MEDIUM, np.isnan(1), 'two') # value is type numpy.bool_\n ]\n score = cs.scores(res)\n self.assertEqual(score[0].name, 'one')\n self.assertEqual(score[0].value, (2, 4))\n self.assertEqual(score[1].name, 'two')\n self.assertEqual(score[1].value, (1, 2))\n\n def test_cdl_file(self):\n # Testing whether you can run compliance checker on a .cdl file\n cs = CheckSuite()\n cs.load_all_available_checkers()\n\n # Load the cdl file\n ds = cs.load_dataset(static_files['test_cdl'])\n vals = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in vals.items():\n groups, errors = rpair\n score_list, cdl_points, cdl_out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, cdl_points, cdl_out_of)\n ds.close()\n\n # Ok now load the nc file that it came from\n ds = cs.load_dataset(static_files['test_cdl_nc'])\n vals = cs.run(ds, 'cf')\n\n limit = 2\n for checker, rpair in vals.items():\n groups, errors = rpair\n score_list, nc_points, nc_out_of = cs.standard_output(limit, checker, groups)\n # This asserts that print is able to generate all of the unicode output\n cs.non_verbose_output_generation(score_list, groups, limit, nc_points, nc_out_of)\n ds.close()\n\n nc_file_path = static_files['test_cdl'].replace('.cdl', '.nc')\n self.addCleanup(os.remove, nc_file_path)\n\n # Ok the scores should be equal!\n self.assertEqual(nc_points, cdl_points)\n self.assertEqual(nc_out_of, cdl_out_of)\n" ]
[ [ "numpy.isnan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gyungchan2110/ImageUtils
[ "618d032122d6eadeec4afc9fc6c6906fa71f0ff6" ]
[ "LungBoundaryCrop.py" ]
[ "# In[]\nimport cv2 \nimport numpy as np \nimport os \nfrom operator import eq\nimport random\nimport matplotlib.pyplot as plt \nfrom skimage import io\nimport shutil\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimgBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original\"\nsrcbase = \"D:/[Data]/[Lung_Segmentation]/WholeDataSetMask\"\n\n#classMaskBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180125_103950_Expand_40pixel/Masks/Mask_Rt Upper CB\"\n#lungMaskBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180324_LungMaskData/Imgs\"\nmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask\"\ncropmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask_Cropped\"\nmaskcropmaskdstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_LungMask_Cropped_Mask\"\ndstBase = \"D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180327_151800_2Classes_Original_Img2Mask_3Channel\"\n\n\n# Img_20180130_175720\n\n# Img_20180130_162001\n# Img_20180130_163512\n# Img_20180130_164744\n\n\nlowerFolders = [\"Normal\", \"Abnormal\"]\n#lowerFolders = [\"1_AS\", \"2_AR\", \"3_MS\", \"4_MR\", \"5_AS+AR\", \"6_MS_MR\"]\nsrcPaths = []\nimgPaths = []\nmaskdstPaths = []\ncropImgsdstPaths = []\nmaskcropImgsdstPaths = []\ndstPath = []\n\n\n\nfor folder in folders: \n\n if(not os.path.isdir(maskdstBase + \"/\" + folder)):\n os.mkdir(maskdstBase + \"/\" + folder)\n\n if(not os.path.isdir(cropmaskdstBase + \"/\" + folder)):\n os.mkdir(cropmaskdstBase + \"/\" + folder)\n\n if(not os.path.isdir(maskcropmaskdstBase + \"/\" + folder)):\n os.mkdir(maskcropmaskdstBase + \"/\" + folder)\n if(not os.path.isdir(dstBase + \"/\" + folder)):\n os.mkdir(dstBase + \"/\" + folder)\n\n for lowerFolder in lowerFolders:\n if(not os.path.isdir(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n if(not os.path.isdir(dstBase + \"/\" + folder + \"/\" + lowerFolder)):\n os.mkdir(dstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n\n maskdstPaths.append(maskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n cropImgsdstPaths.append(cropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n maskcropImgsdstPaths.append(maskcropmaskdstBase + \"/\" + folder + \"/\" + lowerFolder)\n dstPath.append(dstBase + \"/\" + folder + \"/\" + lowerFolder)\n\n srcPaths.append(srcbase + \"/\" + lowerFolder)\n imgPaths.append(imgBase + \"/\" + folder + \"/\" + lowerFolder)\n\n\ndef run_Modyfying():\n \n for i, imgPath in enumerate(imgPaths) : \n for file in os.listdir(imgPath):\n LungBoundaryCrop(imgPath,srcPaths[i], maskdstPaths[i],cropImgsdstPaths[i],maskcropImgsdstPaths[i], file)\n break\n break\n\ndef LungBoundaryEnhancement(imgPath, maskPath, dstPath, filename):\n\n Img = cv2.imread(imgPath + \"/\" + filename, 0)\n Mask = cv2.imread(maskPath + \"/\" + filename, 0)\n Img = cv2.resize(Img, (1024,1024))\n Img = np.asarray(Img)\n Mask = np.asarray(Mask)\n\n Image = np.stack((Img, Img, Mask), -1)\n\n cv2.imwrite(dstPath + \"/\" + filename, Image)\n\n\ndef LungBoundaryCrop(imgPath, srcPath, maskdstPath,cropmaskdstPath, maskcropmaskdstPath, filename): \n \n \n #shutil.copyfile(srcPath + \"/\" + filename, maskdstPath + \"/\" + filename)\n \n maskImg = cv2.imread(maskdstPath + \"/\" + filename, 0)\n maskImg = np.asarray(maskImg, dtype = np.uint8)\n\n _, maskImg = cv2.threshold(maskImg, 127, 255, cv2.THRESH_BINARY)\n _, contours, _ = cv2.findContours(maskImg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n rects = []\n for cnt in contours:\n rects.append(cv2.boundingRect(cnt))\n\n tcomx = 10\n tcomy = 10 \n bcomx = 10 \n bcomy = 10\n\n top_x, top_y, bottom_x, bottom_y = 0, 0 ,0, 0\n\n rects.sort()\n\n top_x = min([x for (x, y, w, h) in rects]) - tcomx #26\n top_y = min([y for (x, y, w, h) in rects]) - tcomy #26\n bottom_x = max([x+w for (x, y, w, h) in rects]) + bcomx #234\n bottom_y = max([y+h for (x, y, w, h) in rects]) + bcomy #227\n \n #print(top_x, top_y, bottom_x, bottom_y)\n\n if(top_x <=0 ) : top_x = tcomx\n if(top_y <=0 ) : top_y = tcomy\n \n if(bottom_x >= 1024 ) : bottom_x = 1024 - tcomx\n if(bottom_y >= 1024 ) : bottom_y = 1024 - tcomy\n\n print((top_x + bottom_x)/2, (top_y + bottom_y)/2)\n center_shift_x = 512 - (int)((top_x + bottom_x)/2)\n center_shift_y = 512 - (int)((top_y + bottom_y)/2)\n\n\n # maskCrop = maskImg[top_y:bottom_y, top_x:bottom_x]\n # maskCrop = cv2.resize(maskCrop, (1024,1024))\n # cv2.imwrite(maskcropmaskdstPath + \"/\" + filename, maskCrop)\n\n Img = cv2.imread(imgPath + \"/\" + filename)\n Img = np.asarray(Img)\n Img = cv2.resize(Img, (1024,1024))\n # ImgCrop = Img[top_y*2:bottom_y*2, top_x*2:bottom_x*2, :]\n # ImgCrop = cv2.resize(ImgCrop, (1024,1024))\n # cv2.imwrite(cropmaskdstPath + \"/\" + filename, ImgCrop)\n # print(imgPath + \"/\" + filename)\n Img_Shifted = np.zeros(Img.shape)\n #Img_Shifted = Img_Shifted * 255\n Img_Shifted[:1024+center_shift_y, center_shift_x:] = Img[-center_shift_y:, :1024-center_shift_x]\n cv2.imwrite(\"D:/Temp/Shifted.png\", Img_Shifted)\n cv2.imwrite(\"D:/Temp/Original.png\", Img)\nrun_Modyfying()" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jerbaroo/bridge-sim
[ "c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af" ]
[ "bridge_sim/internal/plot/geometry/node.py" ]
[ "from typing import List\n\nimport numpy as np\n\nfrom bridge_sim.internal.plot import plt\nfrom bridge_sim.internal.plot.geometry.angles import ax_3d\nfrom bridge_sim.sim.model import Node\n\n\ndef node_scatter_3d(nodes: List[Node], new_fig: bool = True):\n # Split into separate arrays of x, y and z position, and colors.\n xs = np.array([n.x for n in nodes])\n ys = np.array([n.y for n in nodes])\n zs = np.array([n.z for n in nodes])\n\n # Setup a new 3D landscape figure.\n if new_fig:\n fig, ax = ax_3d(xs=xs, ys=zs, zs=ys)\n else:\n ax = plt.gca()\n\n ax.scatter(xs, zs, ys, marker=\"o\", s=1)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dravog7/nboost
[ "e0c086db2eaa8601c20244c81d8f5483b7491902" ]
[ "tests/unit/test_onnx_bert_rerank.py" ]
[ "from nboost.plugins.models import resolve_model\nfrom nboost import defaults\nimport unittest\nimport numpy as np\n\n\nclass TestPtBertRerankModelPlugin(unittest.TestCase):\n def setUp(self):\n self.model = resolve_model(\n model_dir='onnx-bert-base-msmarco',\n data_dir=defaults.data_dir,\n model_cls=''\n )\n self.pt_model = resolve_model(\n model_dir='pt-bert-base-uncased-msmarco',\n data_dir=defaults.data_dir,\n model_cls=''\n )\n\n def test_rank(self):\n QUERY = 'O wherefore art thou'\n ranks, scores = self.model.rank(QUERY, CHOICES)\n self.assertEqual(self.model.__class__.__name__, 'ONNXBertRerankModelPlugin')\n self.assertIsInstance(ranks, list)\n self.assertEqual(6, len(ranks))\n pt_ranks, pt_scores = self.pt_model.rank(QUERY, CHOICES)\n assert np.allclose(pt_scores, scores, rtol=1e-04, atol=1e-05)\n\n def tearDown(self) -> None:\n self.model.close()\n\n\nCHOICES = [\n 'From fairest creatures we desire increase' * 4,\n 'That thereby beautys rose might never die' * 4,\n 'But as the riper should by time decease' * 4,\n 'His tender heir might bear his memory:' * 4,\n 'But thou contracted to thine own bright eyes' * 4,\n 'Feedst thy lights flame with self-substantial fuel' * 4,\n]" ]
[ [ "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mpewsey/civpy
[ "bbf74b1c04ca9f7604831f5280cc80d796240e67", "bbf74b1c04ca9f7604831f5280cc80d796240e67" ]
[ "civpy/survey/alignment.py", "civpy/math/linalg.py" ]
[ "\"\"\"\nCopyright (c) 2019, Matt Pewsey\n\"\"\"\n\nimport attr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .spatial_hash import SpatialHash\n\n__all__ = ['Alignment']\n\n\[email protected](hash=False)\nclass Alignment(object):\n \"\"\"\n A class representing a survey alignment.\n\n Parameters\n ----------\n name : str\n Name of alignment.\n pis : list\n A list of :class:`.PI`.\n stakes : list\n A list of :class:`.SurveyStake`.\n grid : float\n The grid size used for spatial hash generation.\n view_offset : float\n The offset beyond which points will be ignored when generating station\n coordinates from global coordinates.\n view_margin : float\n The station margin at the beginning and end of the alignment. Beyond\n this threshold, generated station coordinates from global coordinates\n will be ignored.\n\n Examples\n --------\n .. plot:: ../examples/survey/alignment_ex1.py\n :include-source:\n \"\"\"\n # Global class variables\n BISC_TOL = 1e-4 # Bisector station tolerance\n\n # Properties\n name = attr.ib()\n pis = attr.ib(default=[])\n stakes = attr.ib(default=[])\n grid = attr.ib(default=10)\n view_offset = attr.ib(default=15)\n view_margin = attr.ib(default=15)\n\n def set_stake_xy(self):\n \"\"\"\n Sets the xy coordinates for all station stakes assigned to the\n alignment.\n \"\"\"\n obj = []\n p = []\n\n for x in self.stakes:\n if x._type == 'station':\n obj.append(x)\n p.append((x.station, x.offset, x.rotation))\n\n p = np.array(p)\n c, s = np.cos(p[:,2]), np.sin(p[:,2])\n c, s = np.column_stack([c, -s]), np.column_stack([s, c])\n\n b = self.coordinates(p[:,0])\n p = self.coordinates(p[:,:2])\n p -= b\n\n c = np.einsum('ij,ij->i', p, c)\n s = np.einsum('ij,ij->i', p, s)\n p = np.column_stack([c, s])\n p += b\n\n for a, b in zip(obj, p):\n a[:2] = b\n\n def pi_coordinates(self):\n \"\"\"\n Returns an array of PI coordinates of shape (N, 3).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n return np.array(self.pis, dtype='float')\n\n def pi_radii(self):\n \"\"\"\n Returns an array of PI horizontal curve radii of shape (N,).\n \"\"\"\n return np.array([x.radius for x in self.pis], dtype='float')\n\n def azimuths(self):\n \"\"\"\n Returns an array of alignment azimuths in the shape (N,). Each element\n of the array corresponds to a PI index and represents the azimuth of\n the alignment ahead of that PI.\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n elif len(self.pis) == 1:\n return np.zeros(1, dtype='float')\n\n x = self.pi_coordinates()\n dx = x[1:,:2] - x[:-1,:2]\n az = np.arctan2(dx[:,0], dx[:,1])\n az = np.append(az, az[-1])\n\n return np.asarray(az, dtype='float')\n\n def deflection_angles(self):\n \"\"\"\n Returns an array of PI deflection angles in the shape (N,). The angle\n is negative for turns to the left and positive for turns to the right.\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n elif len(self.pis) == 1:\n return np.zeros(1, dtype='float')\n\n az = self.azimuths()\n da = az[1:] - az[:-1]\n i = (np.abs(da) > np.pi)\n da[i] -= 2 * np.pi * np.sign(da[i])\n da = np.insert(da, 0, 0)\n\n return np.asarray(da, dtype='float')\n\n def tangent_ordinates(self):\n \"\"\"\n Returns an array of tangent ordinates corresponding to each PI\n in the shape (N,). This value is the horizontal distance between\n the PI and PC and PI and PT.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(np.tan(da/2))\n\n def curve_lengths(self):\n \"\"\"\n Returns an array of horizontal curve lengths corresponding to each PI\n in teh shape (N,). This value is the station distance between the\n PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(da)\n\n def middle_ordinates(self):\n \"\"\"\n Returns an array of middle ordinate distances corresponding to each PI\n in the shape (N,). This value is the horizontal distance between the\n MPC and midpoint of the chord line between the PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = np.abs(self.deflection_angles())\n return r * (1 - np.cos(da/2))\n\n def external_ordinates(self):\n \"\"\"\n Returns an array of external ordinates corresponding to each PI\n in the shape (N,). This is the horizontal distance between the\n MPC and PI.\n \"\"\"\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(np.tan(da/2) * np.tan(da/4))\n\n def chord_distances(self):\n \"\"\"\n Returns an array of chord distances corresponding to each PI\n in teh shape (N,). This is the straight line horizontal distance\n between the PC and PT.\n \"\"\"\n r = self.pi_radii()\n da = np.abs(self.deflection_angles())\n return 2 * r * np.sin(da/2)\n\n def pt_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Tangents (PT)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n t = self.tangent_ordinates()\n t = np.expand_dims(t, 1)\n uv = np.column_stack([np.sin(az), np.cos(az)])\n pt = pi[:,:2] + t * uv\n\n return np.asarray(pt, dtype='float')\n\n def pc_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Curves (PC)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n t = self.tangent_ordinates()\n t = np.expand_dims(t, 1)\n az -= da\n uv = np.column_stack([np.sin(az), np.cos(az)])\n pc = pi[:,:2] - t * uv\n\n return np.asarray(pc, dtype='float')\n\n def mpc_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Midpoint of Curves (MPC)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n e = self.external_ordinates()\n az += (np.pi - da) / 2\n da = np.expand_dims(da, 1)\n e = np.expand_dims(e, 1)\n uv = np.column_stack([np.sin(az), np.cos(az)])\n mpc = pi[:,:2] + np.sign(da) * e * uv\n\n return np.asarray(mpc, dtype='float')\n\n def rp_coordinates(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Radius Points (RP)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros((0, 3), dtype='float')\n\n pi = self.pi_coordinates()\n az = self.azimuths()\n da = self.deflection_angles()\n e = self.external_ordinates()\n e = np.expand_dims(e, 1)\n r = self.pi_radii()\n r = np.expand_dims(r, 1)\n az += (np.pi - da) / 2\n uv = np.column_stack([np.sin(az), np.cos(az)])\n da = np.expand_dims(da, 1)\n rp = pi[:,:2] + np.sign(da) * (e + r) * uv\n\n return np.asarray(rp, dtype='float')\n\n def pt_stations(self):\n \"\"\"\n Returns an array of (x, y) coordinates for the Point of Tangents (PT)\n in the shape (N, 2).\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n x = self.pi_coordinates()\n tan = self.tangent_ordinates()\n dist = np.linalg.norm(x[:-1,:2] - x[1:,:2], axis=1)\n dist = np.insert(dist, 0, 0)\n dist += self.curve_lengths() - tan\n sta = np.cumsum(dist)\n sta[1:] -= np.cumsum(tan[:-1])\n\n return np.asarray(sta, dtype='float')\n\n def pc_stations(self):\n \"\"\"\n Returns an array of stations for the Point of Curves (PC) in the\n shape (N,).\n \"\"\"\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n sta = self.pt_stations() - self.curve_lengths()\n return np.asarray(sta, dtype='float')\n\n def mpc_stations(self):\n \"\"\"\n Returns an array of stations for the Midpoint of Curves (MPC)\n in the shape (N,).\n \"\"\"\n return 0.5 * (self.pt_stations() + self.pc_stations())\n\n def poc_transforms(self):\n \"\"\"\n Returns the POC transforms in the shape (N, 2, 2). These transforms\n project (x, y) global coordinates to (offset, station) station\n coordinates relative to the PI angle bisector.\n \"\"\"\n az = self.azimuths()\n da = self.deflection_angles()\n l = az - da / 2\n t = l + np.pi / 2\n t = np.column_stack([np.sin(t), np.cos(t), np.sin(l), np.cos(l)])\n\n return t.reshape(t.shape[0], 2, 2)\n\n def pot_transforms(self):\n \"\"\"\n Returns the POT transforms in the shape (N, 2, 2). These transforms\n project (x, y) global coordinates to (offset, station) station\n coordinates relative to the tangent line between PI's.\n \"\"\"\n l = self.azimuths()\n t = l + np.pi / 2\n t = np.column_stack([np.sin(t), np.cos(t), np.sin(l), np.cos(l)])\n return t.reshape(t.shape[0], 2, 2)\n\n def segment_indices(self, stations):\n \"\"\"\n Determines the segment type and PI indices corresponding to the\n specified stations. Returns an array of shape (N, 2). The first column\n of the array contains 1 if the station is located along an alignment\n tangent or 2 if the station is located on a horizontal curve or\n alignment bisector. The second column contains the index corresponding\n to the PI where the point is located.\n\n Parameters\n ----------\n stations : array\n An array of stations of shape (N,).\n \"\"\"\n sta = np.asarray(stations)\n pc_sta = self.pc_stations()\n pt_sta = self.pt_stations()\n s = SpatialHash(np.expand_dims(sta, 1), self.grid)\n\n # Set values beyond alignment limits\n r = np.zeros((sta.shape[0], 2), dtype='int')\n r[sta < 0] = 1, 0\n r[sta > pt_sta[-1]] = 1, pt_sta.shape[0]-1\n\n # POT segments\n ah = np.expand_dims(pc_sta[1:], 1)\n bk = np.expand_dims(pt_sta[:-1], 1)\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = s.query_range(b, a, 0)\n r[f] = 1, i\n\n # POC segments\n f = (self.curve_lengths() == 0)\n pc_sta[f] -= Alignment.BISC_TOL\n pt_sta[f] += Alignment.BISC_TOL\n\n ah = np.expand_dims(pt_sta[1:-1], 1)\n bk = np.expand_dims(pc_sta[1:-1], 1)\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = s.query_range(b, a, 0)\n r[f] = 2, i+1\n\n return r\n\n def _pot_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POT coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n f = (seg[:,0] == 1)\n\n if not f.any():\n return\n\n sta = np.expand_dims(sta_coords[f,0], 1)\n off = np.expand_dims(sta_coords[f,1], 1)\n\n i = seg[f,1]\n t = self.pot_transforms()[i]\n tx, ty = t[:,0], t[:,1]\n pt_coord = self.pt_coordinates()[i]\n pt_sta = np.expand_dims(self.pt_stations()[i], 1)\n\n result[f] = tx * off + ty * (sta - pt_sta) + pt_coord\n\n def _poc_bisc_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POC bisector coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n f = (seg[:,0] == 2) & (self.curve_lengths() == 0)[seg[:,1]]\n\n if not f.any():\n return\n\n off = np.expand_dims(sta_coords[f,1], 1)\n\n i = seg[f,1]\n tx = self.poc_transforms()[i,0]\n rp_coord = self.rp_coordinates()[i]\n\n result[f] = tx * off + rp_coord\n\n def _poc_curve_coordinates(self, result, seg, sta_coords):\n \"\"\"\n Assigns the POC curve coordinates for :meth:`.coordinates`.\n\n Parameters\n ----------\n result : array\n The array to which the results will be added.\n seg : array\n The segment indices array.\n sta_coords : array\n An array of station coordinates of shape (N, 2).\n \"\"\"\n l = self.curve_lengths()\n f = (seg[:,0] == 2) & (l != 0)[seg[:,1]]\n\n if not f.any():\n return\n\n sta = sta_coords[f,0]\n off = sta_coords[f,1]\n\n i = seg[f,1]\n tx = self.poc_transforms()[i,0]\n mpc_sta = self.mpc_stations()[i]\n rp_coord = self.rp_coordinates()[i]\n da = self.deflection_angles()[i]\n r = np.expand_dims(self.pi_radii()[i], 1)\n\n beta = da * (mpc_sta - sta) / l[i]\n c, s = np.cos(beta), np.sin(beta)\n c, s = np.column_stack([c, -s]), np.column_stack([s, c])\n\n c = np.einsum('ij,ij->i', tx, c)\n s = np.einsum('ij,ij->i', tx, s)\n\n tx = np.column_stack([c, s])\n da = np.sign(np.expand_dims(da, 1))\n off = np.expand_dims(off, 1)\n\n result[f] = tx * (off - da * r) + rp_coord\n\n def coordinates(self, sta_coords):\n \"\"\"\n Returns the (x, y) or (x, y, z) global coordinates corresponding\n to the input station coordinates. Result is in the shape of (N, 2)\n of (N, 3).\n\n Parameters\n ----------\n sta_coords : array\n An array of (station), (station, offset), or (station, offset, z)\n coordinates of the shape (N,), (N, 2) or (N, 3).\n \"\"\"\n sta_coords = np.asarray(sta_coords)\n\n # If shape is (N,), add zero offsets\n if len(sta_coords.shape) == 1:\n sta_coords = np.column_stack([sta_coords, np.zeros(sta_coords.shape[0])])\n\n result = np.zeros((sta_coords.shape[0], 2), dtype='float')\n seg = self.segment_indices(sta_coords[:,0])\n\n self._pot_coordinates(result, seg, sta_coords)\n self._poc_bisc_coordinates(result, seg, sta_coords)\n self._poc_curve_coordinates(result, seg, sta_coords)\n\n # Add z coordinate to result if available\n if sta_coords.shape[1] == 3:\n result = np.column_stack([result, sta_coords[:,2]])\n\n return np.asarray(result, dtype='float')\n\n def _pot_station_coordinates(self, result, spatial_hash, coords):\n \"\"\"\n Adds the POT station coordinates within the view.\n\n Parameters\n ----------\n result : dict\n The dictionary to which the results will be added.\n spatial_hash : array\n The spatial hash.\n coords : array\n An array of coordinates of shape (N, 2) or (N, 3).\n \"\"\"\n t = self.pot_transforms()\n pt_sta = self.pt_stations()\n pt_coord = self.pt_coordinates()\n\n bk = self.pt_coordinates()[:-1]\n ah = self.pc_coordinates()[1:]\n\n if t.shape[0] > 0:\n bk[0] -= self.view_margin * t[0, 1]\n ah[-1] += self.view_margin * t[-1, 1]\n\n for i, (a, b) in enumerate(zip(ah, bk)):\n f = spatial_hash.query_range(b, a, self.view_offset)\n\n if f.shape[0] == 0:\n continue\n\n delta = coords[f,:2] - pt_coord[i]\n sta = np.dot(delta, t[i,1]) + pt_sta[i]\n off = np.dot(delta, t[i,0])\n\n if coords.shape[1] == 3:\n p = np.column_stack([sta, off, coords[f,2]])\n else:\n p = np.column_stack([sta, off])\n\n for n, m in enumerate(f):\n if m not in result:\n result[m] = []\n result[m].append(p[n])\n\n def _poc_station_coordinates(self, result, spatial_hash, coords):\n \"\"\"\n Adds the POC station coordinates within the view.\n\n Parameters\n ----------\n result : dict\n The dictionary to which the results will be added.\n spatial_hash : array\n The spatial hash.\n coords : array\n An array of coordinates of shape (N, 2) or (N, 3).\n \"\"\"\n l = self.curve_lengths()\n t = self.poc_transforms()\n da = self.deflection_angles()\n pc_sta = self.pc_stations()\n pt_sta = self.pt_stations()\n rp_coord = self.rp_coordinates()\n pt_coord = self.pt_coordinates()\n\n for i in range(1, len(self.pis)-1):\n r = self.pis[i].radius\n ro = r + self.view_offset\n ri = max(r - self.view_offset, 0)\n f = spatial_hash.query_point(rp_coord[i], ro, ri)\n\n if f.shape[0] == 0:\n continue\n\n if l[i] == 0:\n # Angle bisector\n delta = coords[f,:2] - pt_coord[i]\n sta = np.dot(delta, t[i,1]) + pt_sta[i]\n off = np.dot(delta, t[i,0])\n\n g = ((np.abs(off) <= self.view_offset)\n & (sta >= pt_sta[i] - Alignment.BISC_TOL)\n & (sta <= pt_sta[i] + Alignment.BISC_TOL))\n else:\n # Horizontal curve\n delta = pt_coord[i] - rp_coord[i]\n delta = np.arctan2(delta[0], delta[1])\n p = coords[f,:2] - rp_coord[i]\n delta -= np.arctan2(p[:,0], p[:,1])\n\n sta = pt_sta[i] - (l[i] / da[i]) * delta\n off = np.sign(da[i]) * (r - np.linalg.norm(p, axis=1))\n\n g = (sta >= pc_sta[i]) & (sta <= pt_sta[i])\n\n if coords.shape[1] == 3:\n p = np.column_stack([sta, off, coords[f,2]])[g]\n else:\n p = np.column_stack([sta, off])[g]\n\n for n, m in enumerate(f[g]):\n if m not in result:\n result[m] = []\n result[m].append(p[n])\n\n def station_coordinates(self, coordinates):\n \"\"\"\n Finds the (station, offset) or (station, offset, z) coordinates\n for the input global coordinates. Returns a dictionary of point\n indices with arrays of shape (N, 2) or (N, 3). If a point index\n is not in the dictionary, then no points are located along\n the alignment within the view threshold.\n\n Parameters\n ----------\n coordinates : array\n An array of (x, y) or (x, y, z) global coordinates in the shape\n (N, 2) or (N, 3).\n \"\"\"\n coordinates = np.asarray(coordinates)\n s = SpatialHash(coordinates[:,:2], self.grid)\n result = {}\n\n self._pot_station_coordinates(result, s, coordinates)\n self._poc_station_coordinates(result, s, coordinates)\n\n for k, x in result.items():\n result[k] = np.array(x, dtype='float')\n\n return result\n\n def plot_plan(self, ax=None, step=1, symbols={}):\n \"\"\"\n Plots a the plan view for the alignment.\n\n Parameters\n ----------\n ax : :class:`matplotlib.axes.Axes`\n The axex to which to add the plot. If None, a new figure and axes\n will be created.\n step : float\n The step interval to use for plotting points along horizontal\n curves.\n symbols : dict\n A dictionary of symbols to use for the plot. The following keys\n are used:\n\n * `pi`: PI point symbol, default is 'r.'\n * `rp`: RP point symbol, default is 'c.'\n * `pc`: PC point symbol, default is 'b.'\n * `pt`: PT point symbol, default is 'b.'\n * `alignment`: Alignment lines, default is 'b-'\n * `stakes`: Stake symbols, default is 'rx'\n\n Examples\n --------\n .. plot:: ../examples/survey/alignment_ex1.py\n :include-source:\n \"\"\"\n if ax is None:\n x = self.pi_coordinates()[:,:2]\n mx = x.max(axis=0)\n c = 0.5 * (mx + x.min(axis=0))\n r = 1.1 * (np.max(mx - c) + self.view_offset + self.view_margin)\n xlim, ylim = np.column_stack([c - r, c + r])\n\n fig = plt.figure()\n ax = fig.add_subplot(111,\n title=self.name,\n xlim=xlim,\n ylim=ylim,\n xlabel='X',\n ylabel='Y',\n aspect='equal'\n )\n ax.grid('major', alpha=0.2)\n\n sym = dict(\n pi='r.',\n rp='c.',\n pc='b.',\n pt='b.',\n alignment='b-',\n stakes='rx'\n )\n sym.update(symbols)\n\n pt = self.pt_coordinates()\n pc = self.pc_coordinates()\n\n if sym['alignment'] is not None:\n for a, b in zip(pt[:-1], pc[1:]):\n x = np.array([a, b])\n ax.plot(x[:,0], x[:,1], sym['alignment'])\n\n for a, b in zip(self.pt_stations(), self.pc_stations()):\n if a != b:\n n = int(np.ceil((a - b) / step))\n sta = np.linspace(b, a, n)\n x = self.coordinates(sta)\n ax.plot(x[:,0], x[:,1], sym['alignment'])\n\n if sym['pi'] is not None:\n x = self.pi_coordinates()\n ax.plot(x[:,0], x[:,1], sym['pi'])\n\n if sym['rp'] is not None:\n x = self.rp_coordinates()\n ax.plot(x[:,0], x[:,1], sym['rp'])\n\n if sym['pt'] is not None:\n ax.plot(pt[:,0], pt[:,1], sym['pt'])\n\n if sym['pc'] is not None:\n ax.plot(pc[:,0], pc[:,1], sym['pc'])\n\n if sym['stakes'] is not None and len(self.stakes) > 0:\n self.set_stake_xy()\n x = np.array(self.stakes)\n ax.plot(x[:,0], x[:,1], sym['stakes'])\n\n return ax\n", "\"\"\"\nCopyright (c) 2019, Matt Pewsey\n\"\"\"\n\nimport numpy as np\nfrom math import cos, sin\n\n__all__ = [\n 'projection_angles',\n 'rotation_matrix2',\n 'rotation_matrix3',\n 'rotate2',\n 'rotate3',\n]\n\n\ndef projection_angles(name):\n \"\"\"\n Returns the rotation angles for the specified projection.\n\n Parameters\n ----------\n name : {'xy', 'xz', 'yz', 'yx', 'zx', 'zy'}\n The name of the projection.\n \"\"\"\n if name == 'xy':\n return 0, 0, 0\n elif name == 'xz':\n return -np.pi/2, 0, 0\n elif name == 'yz':\n return -np.pi/2, 0, -np.pi/2\n elif name == 'yx':\n return 0, np.pi, np.pi/2\n elif name == 'zx':\n return np.pi/2, np.pi/2, 0\n elif name == 'zy':\n return np.pi, np.pi/2, np.pi\n else:\n raise ValueError('Invalid projection name: {!r}.'.format(name))\n\n\ndef rotation_matrix2(angle):\n \"\"\"\n Returns the 2D rotation matrix.\n\n Parameters\n ----------\n angle : float\n The counter clockwise rotation angle in radians.\n \"\"\"\n c, s = cos(angle), sin(angle)\n return np.array([[c, -s], [s, c]])\n\n\ndef rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n \"\"\"\n Returns the 3D rotation matrix.\n\n Parameters\n ----------\n angle : float\n \"\"\"\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r\n\n\ndef rotate2(x, angle, origin=(0, 0)):\n \"\"\"\n Rotates the input 2D vectors by the specified angle.\n\n Parameters\n ----------\n x : array\n One or multiple vectors to rotate.\n angle : float\n The counter clockwise rotation angle in radians.\n origin : array\n The point about which the rotation will be performed.\n \"\"\"\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix2(angle)\n return x.dot(r.T) + origin\n\n\ndef rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n \"\"\"\n Rotates the input 3D vectors by the specified angles.\n\n Parameters\n ----------\n x : array\n One or multiple vectors to rotate.\n angle_x, angle_y, angle_z : float\n The counter clockwise rotation angles about the x, y, and z axes\n in radians.\n origin : array\n The point about which the rotation will be performed.\n \"\"\"\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin\n" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.einsum", "numpy.linspace", "numpy.asarray", "numpy.cumsum", "numpy.arctan2", "numpy.max", "numpy.sin", "numpy.ceil", "numpy.insert", "numpy.column_stack", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.tan", "numpy.append", "numpy.array", "numpy.abs", "numpy.linalg.norm", "numpy.cos", "numpy.sign" ], [ "numpy.asarray", "numpy.array", "numpy.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lfchener/dgl
[ "77f4287a4118db64c46f4f413a426e1419a09d53", "77f4287a4118db64c46f4f413a426e1419a09d53", "77f4287a4118db64c46f4f413a426e1419a09d53", "77f4287a4118db64c46f4f413a426e1419a09d53", "77f4287a4118db64c46f4f413a426e1419a09d53", "77f4287a4118db64c46f4f413a426e1419a09d53" ]
[ "examples/pytorch/rgcn-hetero-ogbn-mag/model.py", "examples/pytorch/ogb/deepwalk/model.py", "python/dgl/nn/pytorch/conv/densechebconv.py", "python/dgl/heterograph_index.py", "python/dgl/nn/tensorflow/conv/edgeconv.py", "examples/pytorch/ogb/ogbn-products/graphsage/main.py" ]
[ "from typing import Callable, Dict, List, Union\n\nimport dgl\nimport dgl.nn.pytorch as dglnn\nimport torch\nimport torch.nn as nn\n\n\nclass RelGraphEmbedding(nn.Module):\n def __init__(\n self,\n hg: dgl.DGLHeteroGraph,\n embedding_size: int,\n num_nodes: Dict[str, int],\n node_feats: Dict[str, torch.Tensor],\n node_feats_projection: bool = False,\n ):\n super().__init__()\n self._hg = hg\n self._node_feats = node_feats\n self._node_feats_projection = node_feats_projection\n self.node_embeddings = nn.ModuleDict()\n\n if node_feats_projection:\n self.embeddings = nn.ParameterDict()\n\n for ntype in hg.ntypes:\n if node_feats[ntype] is None:\n node_embedding = nn.Embedding(\n num_nodes[ntype], embedding_size, sparse=True)\n nn.init.uniform_(node_embedding.weight, -1, 1)\n\n self.node_embeddings[ntype] = node_embedding\n elif node_feats[ntype] is not None and node_feats_projection:\n input_embedding_size = node_feats[ntype].shape[-1]\n embedding = nn.Parameter(torch.Tensor(\n input_embedding_size, embedding_size))\n nn.init.xavier_uniform_(embedding)\n\n self.embeddings[ntype] = embedding\n\n def forward(\n self,\n in_nodes: Dict[str, torch.Tensor] = None,\n device: torch.device = None,\n ) -> Dict[str, torch.Tensor]:\n if in_nodes is not None:\n ntypes = [ntype for ntype in in_nodes.keys()]\n nids = [nid for nid in in_nodes.values()]\n else:\n ntypes = self._hg.ntypes\n nids = [self._hg.nodes(ntype) for ntype in ntypes]\n\n x = {}\n\n for ntype, nid in zip(ntypes, nids):\n if self._node_feats[ntype] is None:\n x[ntype] = self.node_embeddings[ntype](nid)\n else:\n if device is not None:\n self._node_feats[ntype] = self._node_feats[ntype].to(\n device)\n\n if self._node_feats_projection:\n x[ntype] = self._node_feats[ntype][nid] @ self.embeddings[ntype]\n else:\n x[ntype] = self._node_feats[ntype][nid]\n\n return x\n\n\nclass RelGraphConvLayer(nn.Module):\n def __init__(\n self,\n in_feats: int,\n out_feats: int,\n rel_names: List[str],\n num_bases: int,\n norm: str = 'right',\n weight: bool = True,\n bias: bool = True,\n activation: Callable[[torch.Tensor], torch.Tensor] = None,\n dropout: float = None,\n self_loop: bool = False,\n ):\n super().__init__()\n self._rel_names = rel_names\n self._num_rels = len(rel_names)\n self._conv = dglnn.HeteroGraphConv({rel: dglnn.GraphConv(\n in_feats, out_feats, norm=norm, weight=False, bias=False) for rel in rel_names})\n self._use_weight = weight\n self._use_basis = num_bases < self._num_rels and weight\n self._use_bias = bias\n self._activation = activation\n self._dropout = nn.Dropout(dropout) if dropout is not None else None\n self._use_self_loop = self_loop\n\n if weight:\n if self._use_basis:\n self.basis = dglnn.WeightBasis(\n (in_feats, out_feats), num_bases, self._num_rels)\n else:\n self.weight = nn.Parameter(torch.Tensor(\n self._num_rels, in_feats, out_feats))\n nn.init.xavier_uniform_(\n self.weight, gain=nn.init.calculate_gain('relu'))\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_feats))\n nn.init.zeros_(self.bias)\n\n if self_loop:\n self.self_loop_weight = nn.Parameter(\n torch.Tensor(in_feats, out_feats))\n nn.init.xavier_uniform_(\n self.self_loop_weight, gain=nn.init.calculate_gain('relu'))\n\n def _apply_layers(\n self,\n ntype: str,\n inputs: torch.Tensor,\n inputs_dst: torch.Tensor = None,\n ) -> torch.Tensor:\n x = inputs\n\n if inputs_dst is not None:\n x += torch.matmul(inputs_dst[ntype], self.self_loop_weight)\n\n if self._use_bias:\n x += self.bias\n\n if self._activation is not None:\n x = self._activation(x)\n\n if self._dropout is not None:\n x = self._dropout(x)\n\n return x\n\n def forward(\n self,\n hg: dgl.DGLHeteroGraph,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n hg = hg.local_var()\n\n if self._use_weight:\n weight = self.basis() if self._use_basis else self.weight\n weight_dict = {self._rel_names[i]: {'weight': w.squeeze(\n dim=0)} for i, w in enumerate(torch.split(weight, 1, dim=0))}\n else:\n weight_dict = {}\n\n if self._use_self_loop:\n if hg.is_block:\n inputs_dst = {ntype: h[:hg.num_dst_nodes(\n ntype)] for ntype, h in inputs.items()}\n else:\n inputs_dst = inputs\n else:\n inputs_dst = None\n\n x = self._conv(hg, inputs, mod_kwargs=weight_dict)\n x = {ntype: self._apply_layers(ntype, h, inputs_dst)\n for ntype, h in x.items()}\n\n return x\n\n\nclass EntityClassify(nn.Module):\n def __init__(\n self,\n hg: dgl.DGLHeteroGraph,\n in_feats: int,\n hidden_feats: int,\n out_feats: int,\n num_bases: int,\n num_layers: int,\n norm: str = 'right',\n layer_norm: bool = False,\n input_dropout: float = 0,\n dropout: float = 0,\n activation: Callable[[torch.Tensor], torch.Tensor] = None,\n self_loop: bool = False,\n ):\n super().__init__()\n self._hidden_feats = hidden_feats\n self._out_feats = out_feats\n self._num_layers = num_layers\n self._input_dropout = nn.Dropout(input_dropout)\n self._dropout = nn.Dropout(dropout)\n self._activation = activation\n self._rel_names = sorted(list(set(hg.etypes)))\n self._num_rels = len(self._rel_names)\n\n if num_bases < 0 or num_bases > self._num_rels:\n self._num_bases = self._num_rels\n else:\n self._num_bases = num_bases\n\n self._layers = nn.ModuleList()\n\n self._layers.append(RelGraphConvLayer(\n in_feats,\n hidden_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n for _ in range(1, num_layers - 1):\n self._layers.append(RelGraphConvLayer(\n hidden_feats,\n hidden_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n self._layers.append(RelGraphConvLayer(\n hidden_feats,\n out_feats,\n self._rel_names,\n self._num_bases,\n norm=norm,\n self_loop=self_loop,\n ))\n\n if layer_norm:\n self._layer_norms = nn.ModuleList()\n\n for _ in range(num_layers - 1):\n self._layer_norms.append(nn.LayerNorm(hidden_feats))\n else:\n self._layer_norms = None\n\n def _apply_layers(\n self,\n layer_idx: int,\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n x = inputs\n\n for ntype, h in x.items():\n if self._layer_norms is not None:\n h = self._layer_norms[layer_idx](h)\n\n if self._activation is not None:\n h = self._activation(h)\n\n x[ntype] = self._dropout(h)\n\n return x\n\n def forward(\n self,\n hg: Union[dgl.DGLHeteroGraph, List[dgl.DGLHeteroGraph]],\n inputs: Dict[str, torch.Tensor],\n ) -> Dict[str, torch.Tensor]:\n x = {ntype: self._input_dropout(h) for ntype, h in inputs.items()}\n\n if isinstance(hg, list):\n for i, (layer, block) in enumerate(zip(self._layers, hg)):\n x = layer(block, x)\n\n if i < self._num_layers - 1:\n x = self._apply_layers(i, x)\n else:\n for i, layer in enumerate(self._layers):\n x = layer(hg, x)\n\n if i < self._num_layers - 1:\n x = self._apply_layers(i, x)\n\n return x\n\n def inference(\n self,\n hg: dgl.DGLHeteroGraph,\n batch_size: int,\n num_workers: int,\n embedding_layer: nn.Module,\n device: torch.device,\n ) -> Dict[str, torch.Tensor]:\n for i, layer in enumerate(self._layers):\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n hg,\n {ntype: hg.nodes(ntype) for ntype in hg.ntypes},\n sampler,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers,\n )\n\n if i < self._num_layers - 1:\n y = {ntype: torch.zeros(hg.num_nodes(\n ntype), self._hidden_feats, device=device) for ntype in hg.ntypes}\n else:\n y = {ntype: torch.zeros(hg.num_nodes(\n ntype), self._out_feats, device=device) for ntype in hg.ntypes}\n\n for in_nodes, out_nodes, blocks in dataloader:\n in_nodes = {rel: nid.to(device)\n for rel, nid in in_nodes.items()}\n out_nodes = {rel: nid.to(device)\n for rel, nid in out_nodes.items()}\n block = blocks[0].to(device)\n\n if i == 0:\n h = embedding_layer(in_nodes=in_nodes, device=device)\n else:\n h = {ntype: x[ntype][in_nodes[ntype]]\n for ntype in hg.ntypes}\n\n h = layer(block, h)\n\n if i < self._num_layers - 1:\n h = self._apply_layers(i, h)\n\n for ntype in h:\n y[ntype][out_nodes[ntype]] = h[ntype]\n\n x = y\n\n return x\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport random\nimport numpy as np\nimport dgl.multiprocessing as mp\nfrom dgl.multiprocessing import Queue\n\n\ndef init_emb2pos_index(walk_length, window_size, batch_size):\n ''' select embedding of positive nodes from a batch of node embeddings\n \n Return\n ------\n index_emb_posu torch.LongTensor : the indices of u_embeddings\n index_emb_posv torch.LongTensor : the indices of v_embeddings\n\n Usage\n -----\n # emb_u.shape: [batch_size * walk_length, dim]\n batch_emb2posu = torch.index_select(emb_u, 0, index_emb_posu)\n '''\n idx_list_u = []\n idx_list_v = []\n for b in range(batch_size):\n for i in range(walk_length):\n for j in range(i-window_size, i):\n if j >= 0:\n idx_list_u.append(j + b * walk_length)\n idx_list_v.append(i + b * walk_length)\n for j in range(i + 1, i + 1 + window_size):\n if j < walk_length:\n idx_list_u.append(j + b * walk_length)\n idx_list_v.append(i + b * walk_length)\n\n # [num_pos * batch_size]\n index_emb_posu = torch.LongTensor(idx_list_u)\n index_emb_posv = torch.LongTensor(idx_list_v)\n\n return index_emb_posu, index_emb_posv\n\ndef init_emb2neg_index(walk_length, window_size, negative, batch_size):\n '''select embedding of negative nodes from a batch of node embeddings \n for fast negative sampling\n \n Return\n ------\n index_emb_negu torch.LongTensor : the indices of u_embeddings\n index_emb_negv torch.LongTensor : the indices of v_embeddings\n\n Usage\n -----\n # emb_u.shape: [batch_size * walk_length, dim]\n batch_emb2negu = torch.index_select(emb_u, 0, index_emb_negu)\n '''\n idx_list_u = []\n for b in range(batch_size):\n for i in range(walk_length):\n for j in range(i-window_size, i):\n if j >= 0:\n idx_list_u += [i + b * walk_length] * negative\n for j in range(i+1, i+1+window_size):\n if j < walk_length:\n idx_list_u += [i + b * walk_length] * negative\n \n idx_list_v = list(range(batch_size * walk_length))\\\n * negative * window_size * 2\n random.shuffle(idx_list_v)\n idx_list_v = idx_list_v[:len(idx_list_u)]\n\n # [bs * walk_length * negative]\n index_emb_negu = torch.LongTensor(idx_list_u)\n index_emb_negv = torch.LongTensor(idx_list_v)\n\n return index_emb_negu, index_emb_negv\n\ndef init_weight(walk_length, window_size, batch_size):\n ''' init context weight '''\n weight = []\n for b in range(batch_size):\n for i in range(walk_length):\n for j in range(i-window_size, i):\n if j >= 0:\n weight.append(1. - float(i - j - 1)/float(window_size))\n for j in range(i + 1, i + 1 + window_size):\n if j < walk_length:\n weight.append(1. - float(j - i - 1)/float(window_size))\n\n # [num_pos * batch_size]\n return torch.Tensor(weight).unsqueeze(1)\n\ndef init_empty_grad(emb_dimension, walk_length, batch_size):\n \"\"\" initialize gradient matrix \"\"\"\n grad_u = torch.zeros((batch_size * walk_length, emb_dimension))\n grad_v = torch.zeros((batch_size * walk_length, emb_dimension))\n\n return grad_u, grad_v\n\ndef adam(grad, state_sum, nodes, lr, device, only_gpu):\n \"\"\" calculate gradients according to adam \"\"\"\n grad_sum = (grad * grad).mean(1)\n if not only_gpu:\n grad_sum = grad_sum.cpu()\n state_sum.index_add_(0, nodes, grad_sum) # cpu\n std = state_sum[nodes].to(device) # gpu\n std_values = std.sqrt_().add_(1e-10).unsqueeze(1)\n grad = (lr * grad / std_values) # gpu\n\n return grad\n\ndef async_update(num_threads, model, queue):\n \"\"\" asynchronous embedding update \"\"\"\n torch.set_num_threads(num_threads)\n while True:\n (grad_u, grad_v, grad_v_neg, nodes, neg_nodes) = queue.get()\n if grad_u is None:\n return\n with torch.no_grad():\n model.u_embeddings.weight.data.index_add_(0, nodes.view(-1), grad_u)\n model.v_embeddings.weight.data.index_add_(0, nodes.view(-1), grad_v)\n if neg_nodes is not None:\n model.v_embeddings.weight.data.index_add_(0, neg_nodes.view(-1), grad_v_neg)\n\nclass SkipGramModel(nn.Module):\n \"\"\" Negative sampling based skip-gram \"\"\"\n def __init__(self, \n emb_size, \n emb_dimension,\n walk_length,\n window_size,\n batch_size,\n only_cpu,\n only_gpu,\n mix,\n neg_weight,\n negative,\n lr,\n lap_norm,\n fast_neg,\n record_loss,\n norm,\n use_context_weight,\n async_update,\n num_threads,\n ):\n \"\"\" initialize embedding on CPU \n\n Paremeters\n ----------\n emb_size int : number of nodes\n emb_dimension int : embedding dimension\n walk_length int : number of nodes in a sequence\n window_size int : context window size\n batch_size int : number of node sequences in each batch\n only_cpu bool : training with CPU\n only_gpu bool : training with GPU\n mix bool : mixed training with CPU and GPU\n negative int : negative samples for each positve node pair\n neg_weight float : negative weight\n lr float : initial learning rate\n lap_norm float : weight of laplacian normalization\n fast_neg bool : do negative sampling inside a batch\n record_loss bool : print the loss during training\n norm bool : do normalizatin on the embedding after training\n use_context_weight : give different weights to the nodes in a context window\n async_update : asynchronous training\n \"\"\"\n super(SkipGramModel, self).__init__()\n self.emb_size = emb_size\n self.emb_dimension = emb_dimension\n self.walk_length = walk_length\n self.window_size = window_size\n self.batch_size = batch_size\n self.only_cpu = only_cpu\n self.only_gpu = only_gpu\n self.mixed_train = mix\n self.neg_weight = neg_weight\n self.negative = negative\n self.lr = lr\n self.lap_norm = lap_norm\n self.fast_neg = fast_neg\n self.record_loss = record_loss\n self.norm = norm\n self.use_context_weight = use_context_weight\n self.async_update = async_update\n self.num_threads = num_threads\n \n # initialize the device as cpu\n self.device = torch.device(\"cpu\")\n\n # content embedding\n self.u_embeddings = nn.Embedding(\n self.emb_size, self.emb_dimension, sparse=True)\n # context embedding\n self.v_embeddings = nn.Embedding(\n self.emb_size, self.emb_dimension, sparse=True)\n # initialze embedding\n initrange = 1.0 / self.emb_dimension\n init.uniform_(self.u_embeddings.weight.data, -initrange, initrange)\n init.constant_(self.v_embeddings.weight.data, 0)\n\n # lookup_table is used for fast sigmoid computing\n self.lookup_table = torch.sigmoid(torch.arange(-6.01, 6.01, 0.01))\n self.lookup_table[0] = 0.\n self.lookup_table[-1] = 1.\n if self.record_loss:\n self.logsigmoid_table = torch.log(torch.sigmoid(torch.arange(-6.01, 6.01, 0.01)))\n self.loss = []\n\n # indexes to select positive/negative node pairs from batch_walks\n self.index_emb_posu, self.index_emb_posv = init_emb2pos_index(\n self.walk_length,\n self.window_size,\n self.batch_size)\n self.index_emb_negu, self.index_emb_negv = init_emb2neg_index(\n self.walk_length,\n self.window_size,\n self.negative,\n self.batch_size)\n\n if self.use_context_weight:\n self.context_weight = init_weight(\n self.walk_length,\n self.window_size,\n self.batch_size)\n\n # adam\n self.state_sum_u = torch.zeros(self.emb_size)\n self.state_sum_v = torch.zeros(self.emb_size)\n\n # gradients of nodes in batch_walks\n self.grad_u, self.grad_v = init_empty_grad(\n self.emb_dimension,\n self.walk_length,\n self.batch_size)\n\n def create_async_update(self):\n \"\"\" Set up the async update subprocess.\n \"\"\"\n self.async_q = Queue(1)\n self.async_p = mp.Process(target=async_update, args=(self.num_threads, self, self.async_q))\n self.async_p.start()\n\n def finish_async_update(self):\n \"\"\" Notify the async update subprocess to quit.\n \"\"\"\n self.async_q.put((None, None, None, None, None))\n self.async_p.join()\n\n def share_memory(self):\n \"\"\" share the parameters across subprocesses \"\"\"\n self.u_embeddings.weight.share_memory_()\n self.v_embeddings.weight.share_memory_()\n self.state_sum_u.share_memory_()\n self.state_sum_v.share_memory_()\n\n def set_device(self, gpu_id):\n \"\"\" set gpu device \"\"\"\n self.device = torch.device(\"cuda:%d\" % gpu_id)\n print(\"The device is\", self.device)\n self.lookup_table = self.lookup_table.to(self.device)\n if self.record_loss:\n self.logsigmoid_table = self.logsigmoid_table.to(self.device)\n self.index_emb_posu = self.index_emb_posu.to(self.device)\n self.index_emb_posv = self.index_emb_posv.to(self.device)\n self.index_emb_negu = self.index_emb_negu.to(self.device)\n self.index_emb_negv = self.index_emb_negv.to(self.device)\n self.grad_u = self.grad_u.to(self.device)\n self.grad_v = self.grad_v.to(self.device)\n if self.use_context_weight:\n self.context_weight = self.context_weight.to(self.device)\n\n def all_to_device(self, gpu_id):\n \"\"\" move all of the parameters to a single GPU \"\"\"\n self.device = torch.device(\"cuda:%d\" % gpu_id)\n self.set_device(gpu_id)\n self.u_embeddings = self.u_embeddings.cuda(gpu_id)\n self.v_embeddings = self.v_embeddings.cuda(gpu_id)\n self.state_sum_u = self.state_sum_u.to(self.device)\n self.state_sum_v = self.state_sum_v.to(self.device)\n\n def fast_sigmoid(self, score):\n \"\"\" do fast sigmoid by looking up in a pre-defined table \"\"\"\n idx = torch.floor((score + 6.01) / 0.01).long()\n return self.lookup_table[idx]\n\n def fast_logsigmoid(self, score):\n \"\"\" do fast logsigmoid by looking up in a pre-defined table \"\"\"\n idx = torch.floor((score + 6.01) / 0.01).long()\n return self.logsigmoid_table[idx]\n\n def fast_learn(self, batch_walks, neg_nodes=None):\n \"\"\" Learn a batch of random walks in a fast way. It has the following features:\n 1. It calculating the gradients directly without the forward operation.\n 2. It does sigmoid by a looking up table.\n\n Specifically, for each positive/negative node pair (i,j), the updating procedure is as following:\n score = self.fast_sigmoid(u_embedding[i].dot(v_embedding[j]))\n # label = 1 for positive samples; label = 0 for negative samples.\n u_embedding[i] += (label - score) * v_embedding[j]\n v_embedding[i] += (label - score) * u_embedding[j]\n\n Parameters\n ----------\n batch_walks list : a list of node sequnces\n lr float : current learning rate\n neg_nodes torch.LongTensor : a long tensor of sampled true negative nodes. If neg_nodes is None,\n then do negative sampling randomly from the nodes in batch_walks as an alternative.\n\n Usage example\n -------------\n batch_walks = [torch.LongTensor([1,2,3,4]), \n torch.LongTensor([2,3,4,2])])\n lr = 0.01\n neg_nodes = None\n \"\"\"\n lr = self.lr\n\n # [batch_size, walk_length]\n if isinstance(batch_walks, list):\n nodes = torch.stack(batch_walks)\n elif isinstance(batch_walks, torch.LongTensor):\n nodes = batch_walks\n if self.only_gpu:\n nodes = nodes.to(self.device)\n if neg_nodes is not None:\n neg_nodes = neg_nodes.to(self.device)\n emb_u = self.u_embeddings(nodes).view(-1, self.emb_dimension).to(self.device)\n emb_v = self.v_embeddings(nodes).view(-1, self.emb_dimension).to(self.device)\n\n ## Postive\n bs = len(batch_walks)\n if bs < self.batch_size:\n index_emb_posu, index_emb_posv = init_emb2pos_index(\n self.walk_length, \n self.window_size, \n bs)\n index_emb_posu = index_emb_posu.to(self.device)\n index_emb_posv = index_emb_posv.to(self.device)\n else:\n index_emb_posu = self.index_emb_posu\n index_emb_posv = self.index_emb_posv\n\n # num_pos: the number of positive node pairs generated by a single walk sequence\n # [batch_size * num_pos, dim]\n emb_pos_u = torch.index_select(emb_u, 0, index_emb_posu)\n emb_pos_v = torch.index_select(emb_v, 0, index_emb_posv)\n\n pos_score = torch.sum(torch.mul(emb_pos_u, emb_pos_v), dim=1)\n pos_score = torch.clamp(pos_score, max=6, min=-6)\n # [batch_size * num_pos, 1]\n score = (1 - self.fast_sigmoid(pos_score)).unsqueeze(1)\n if self.record_loss:\n self.loss.append(torch.mean(self.fast_logsigmoid(pos_score)).item())\n\n # [batch_size * num_pos, dim]\n if self.lap_norm > 0:\n grad_u_pos = score * emb_pos_v + self.lap_norm * (emb_pos_v - emb_pos_u)\n grad_v_pos = score * emb_pos_u + self.lap_norm * (emb_pos_u - emb_pos_v)\n else:\n grad_u_pos = score * emb_pos_v\n grad_v_pos = score * emb_pos_u\n\n if self.use_context_weight:\n if bs < self.batch_size:\n context_weight = init_weight(\n self.walk_length,\n self.window_size,\n bs).to(self.device)\n else:\n context_weight = self.context_weight\n grad_u_pos *= context_weight\n grad_v_pos *= context_weight\n\n # [batch_size * walk_length, dim]\n if bs < self.batch_size:\n grad_u, grad_v = init_empty_grad(\n self.emb_dimension, \n self.walk_length, \n bs)\n grad_u = grad_u.to(self.device)\n grad_v = grad_v.to(self.device)\n else:\n self.grad_u = self.grad_u.to(self.device)\n self.grad_u.zero_()\n self.grad_v = self.grad_v.to(self.device)\n self.grad_v.zero_()\n grad_u = self.grad_u\n grad_v = self.grad_v\n grad_u.index_add_(0, index_emb_posu, grad_u_pos)\n grad_v.index_add_(0, index_emb_posv, grad_v_pos)\n\n ## Negative\n if bs < self.batch_size:\n index_emb_negu, index_emb_negv = init_emb2neg_index(\n self.walk_length, self.window_size, self.negative, bs)\n index_emb_negu = index_emb_negu.to(self.device)\n index_emb_negv = index_emb_negv.to(self.device)\n else:\n index_emb_negu = self.index_emb_negu\n index_emb_negv = self.index_emb_negv\n emb_neg_u = torch.index_select(emb_u, 0, index_emb_negu)\n \n if neg_nodes is None:\n emb_neg_v = torch.index_select(emb_v, 0, index_emb_negv)\n else:\n emb_neg_v = self.v_embeddings.weight[neg_nodes].to(self.device)\n\n # [batch_size * walk_length * negative, dim]\n neg_score = torch.sum(torch.mul(emb_neg_u, emb_neg_v), dim=1)\n neg_score = torch.clamp(neg_score, max=6, min=-6)\n # [batch_size * walk_length * negative, 1]\n score = - self.fast_sigmoid(neg_score).unsqueeze(1)\n if self.record_loss:\n self.loss.append(self.negative * self.neg_weight * torch.mean(self.fast_logsigmoid(-neg_score)).item())\n\n grad_u_neg = self.neg_weight * score * emb_neg_v\n grad_v_neg = self.neg_weight * score * emb_neg_u\n\n grad_u.index_add_(0, index_emb_negu, grad_u_neg)\n if neg_nodes is None:\n grad_v.index_add_(0, index_emb_negv, grad_v_neg)\n\n ## Update\n nodes = nodes.view(-1)\n\n # use adam optimizer\n grad_u = adam(grad_u, self.state_sum_u, nodes, lr, self.device, self.only_gpu)\n grad_v = adam(grad_v, self.state_sum_v, nodes, lr, self.device, self.only_gpu)\n if neg_nodes is not None:\n grad_v_neg = adam(grad_v_neg, self.state_sum_v, neg_nodes, lr, self.device, self.only_gpu)\n\n if self.mixed_train:\n grad_u = grad_u.cpu()\n grad_v = grad_v.cpu()\n if neg_nodes is not None:\n grad_v_neg = grad_v_neg.cpu()\n else:\n grad_v_neg = None\n\n if self.async_update:\n grad_u.share_memory_()\n grad_v.share_memory_()\n nodes.share_memory_()\n if neg_nodes is not None:\n neg_nodes.share_memory_()\n grad_v_neg.share_memory_()\n self.async_q.put((grad_u, grad_v, grad_v_neg, nodes, neg_nodes))\n \n if not self.async_update:\n self.u_embeddings.weight.data.index_add_(0, nodes.view(-1), grad_u)\n self.v_embeddings.weight.data.index_add_(0, nodes.view(-1), grad_v) \n if neg_nodes is not None:\n self.v_embeddings.weight.data.index_add_(0, neg_nodes.view(-1), grad_v_neg)\n return\n\n def forward(self, pos_u, pos_v, neg_v):\n ''' Do forward and backward. It is designed for future use. '''\n emb_u = self.u_embeddings(pos_u)\n emb_v = self.v_embeddings(pos_v)\n emb_neg_v = self.v_embeddings(neg_v)\n\n score = torch.sum(torch.mul(emb_u, emb_v), dim=1)\n score = torch.clamp(score, max=6, min=-6)\n score = -F.logsigmoid(score)\n\n neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()\n neg_score = torch.clamp(neg_score, max=6, min=-6)\n neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)\n\n #return torch.mean(score + neg_score)\n return torch.sum(score), torch.sum(neg_score)\n\n def save_embedding(self, dataset, file_name):\n \"\"\" Write embedding to local file. Only used when node ids are numbers.\n\n Parameter\n ---------\n dataset DeepwalkDataset : the dataset\n file_name str : the file name\n \"\"\"\n embedding = self.u_embeddings.weight.cpu().data.numpy()\n if self.norm:\n embedding /= np.sqrt(np.sum(embedding * embedding, 1)).reshape(-1, 1)\n np.save(file_name, embedding)\n\n def save_embedding_pt(self, dataset, file_name):\n \"\"\" For ogb leaderboard.\n \"\"\"\n try:\n max_node_id = max(dataset.node2id.keys())\n if max_node_id + 1 != self.emb_size:\n print(\"WARNING: The node ids are not serial.\")\n\n embedding = torch.zeros(max_node_id + 1, self.emb_dimension)\n index = torch.LongTensor(list(map(lambda id: dataset.id2node[id], list(range(self.emb_size)))))\n embedding.index_add_(0, index, self.u_embeddings.weight.cpu().data)\n\n if self.norm:\n embedding /= torch.sqrt(torch.sum(embedding.mul(embedding), 1) + 1e-6).unsqueeze(1)\n torch.save(embedding, file_name)\n except:\n self.save_embedding_pt_dgl_graph(dataset, file_name)\n\n def save_embedding_pt_dgl_graph(self, dataset, file_name):\n \"\"\" For ogb leaderboard \"\"\"\n embedding = torch.zeros_like(self.u_embeddings.weight.cpu().data)\n valid_seeds = torch.LongTensor(dataset.valid_seeds)\n valid_embedding = self.u_embeddings.weight.cpu().data.index_select(0, \n valid_seeds)\n embedding.index_add_(0, valid_seeds, valid_embedding)\n\n if self.norm:\n embedding /= torch.sqrt(torch.sum(embedding.mul(embedding), 1) + 1e-6).unsqueeze(1)\n\n torch.save(embedding, file_name)\n\n def save_embedding_txt(self, dataset, file_name):\n \"\"\" Write embedding to local file. For future use.\n\n Parameter\n ---------\n dataset DeepwalkDataset : the dataset\n file_name str : the file name\n \"\"\"\n embedding = self.u_embeddings.weight.cpu().data.numpy()\n if self.norm:\n embedding /= np.sqrt(np.sum(embedding * embedding, 1)).reshape(-1, 1)\n with open(file_name, 'w') as f:\n f.write('%d %d\\n' % (self.emb_size, self.emb_dimension))\n for wid in range(self.emb_size):\n e = ' '.join(map(lambda x: str(x), embedding[wid]))\n f.write('%s %s\\n' % (str(dataset.id2node[wid]), e))\n", "\"\"\"Torch Module for DenseChebConv\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nimport torch as th\nfrom torch import nn\nfrom torch.nn import init\n\n\nclass DenseChebConv(nn.Module):\n r\"\"\"\n\n Description\n -----------\n Chebyshev Spectral Graph Convolution layer from paper `Convolutional\n Neural Networks on Graphs with Fast Localized Spectral Filtering\n <https://arxiv.org/pdf/1606.09375.pdf>`__.\n\n We recommend to use this module when applying ChebConv on dense graphs.\n\n Parameters\n ----------\n in_feats: int\n Dimension of input features :math:`h_i^{(l)}`.\n out_feats: int\n Dimension of output features :math:`h_i^{(l+1)}`.\n k : int\n Chebyshev filter size.\n activation : function, optional\n Activation function, default is ReLu.\n bias : bool, optional\n If True, adds a learnable bias to the output. Default: ``True``.\n\n Example\n -------\n >>> import dgl\n >>> import numpy as np\n >>> import torch as th\n >>> from dgl.nn import DenseChebConv\n >>>\n >>> feat = th.ones(6, 10)\n >>> adj = th.tensor([[0., 0., 1., 0., 0., 0.],\n ... [1., 0., 0., 0., 0., 0.],\n ... [0., 1., 0., 0., 0., 0.],\n ... [0., 0., 1., 0., 0., 1.],\n ... [0., 0., 0., 1., 0., 0.],\n ... [0., 0., 0., 0., 0., 0.]])\n >>> conv = DenseChebConv(10, 2, 2)\n >>> res = conv(adj, feat)\n >>> res\n tensor([[-3.3516, -2.4797],\n [-3.3516, -2.4797],\n [-3.3516, -2.4797],\n [-4.5192, -3.0835],\n [-2.5259, -2.0527],\n [-0.5327, -1.0219]], grad_fn=<AddBackward0>)\n\n See also\n --------\n `ChebConv <https://docs.dgl.ai/api/python/nn.pytorch.html#chebconv>`__\n \"\"\"\n def __init__(self,\n in_feats,\n out_feats,\n k,\n bias=True):\n super(DenseChebConv, self).__init__()\n self._in_feats = in_feats\n self._out_feats = out_feats\n self._k = k\n self.W = nn.Parameter(th.Tensor(k, in_feats, out_feats))\n if bias:\n self.bias = nn.Parameter(th.Tensor(out_feats))\n else:\n self.register_buffer('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"Reinitialize learnable parameters.\"\"\"\n if self.bias is not None:\n init.zeros_(self.bias)\n for i in range(self._k):\n init.xavier_normal_(self.W[i], init.calculate_gain('relu'))\n\n def forward(self, adj, feat, lambda_max=None):\n r\"\"\"\n\n Description\n -----------\n Compute (Dense) Chebyshev Spectral Graph Convolution layer.\n\n Parameters\n ----------\n adj : torch.Tensor\n The adjacency matrix of the graph to apply Graph Convolution on,\n should be of shape :math:`(N, N)`, where a row represents the destination\n and a column represents the source.\n feat : torch.Tensor\n The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`\n is size of input feature, :math:`N` is the number of nodes.\n lambda_max : float or None, optional\n A float value indicates the largest eigenvalue of given graph.\n Default: None.\n\n Returns\n -------\n torch.Tensor\n The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`\n is size of output feature.\n \"\"\"\n A = adj.to(feat)\n num_nodes = A.shape[0]\n\n in_degree = 1 / A.sum(dim=1).clamp(min=1).sqrt()\n D_invsqrt = th.diag(in_degree)\n I = th.eye(num_nodes).to(A)\n L = I - D_invsqrt @ A @ D_invsqrt\n\n if lambda_max is None:\n lambda_ = th.eig(L)[0][:, 0]\n lambda_max = lambda_.max()\n\n L_hat = 2 * L / lambda_max - I\n Z = [th.eye(num_nodes).to(A)]\n for i in range(1, self._k):\n if i == 1:\n Z.append(L_hat)\n else:\n Z.append(2 * L_hat @ Z[-1] - Z[-2])\n\n Zs = th.stack(Z, 0) # (k, n, n)\n\n Zh = (Zs @ feat.unsqueeze(0) @ self.W)\n Zh = Zh.sum(0)\n\n if self.bias is not None:\n Zh = Zh + self.bias\n return Zh\n", "\"\"\"Module for heterogeneous graph index class definition.\"\"\"\nfrom __future__ import absolute_import\n\nimport itertools\nimport numpy as np\nimport scipy\n\nfrom ._ffi.object import register_object, ObjectBase\nfrom ._ffi.function import _init_api\nfrom .base import DGLError, dgl_warning\nfrom .graph_index import from_coo\nfrom . import backend as F\nfrom . import utils\n\n@register_object('graph.HeteroGraph')\nclass HeteroGraphIndex(ObjectBase):\n \"\"\"HeteroGraph index object.\n\n Note\n ----\n Do not create GraphIndex directly.\n \"\"\"\n def __new__(cls):\n obj = ObjectBase.__new__(cls)\n obj._cache = {}\n return obj\n\n def __getstate__(self):\n \"\"\"Issue: https://github.com/pytorch/pytorch/issues/32351\n Need to set the tensor created in the __getstate__ function\n as object attribute to avoid potential bugs\n \"\"\"\n self._pk_state = _CAPI_DGLHeteroPickle(self)\n return self._pk_state\n\n def __setstate__(self, state):\n self._cache = {}\n\n # Pickle compatibility check\n # TODO: we should store a storage version number in later releases.\n if isinstance(state, HeteroPickleStates):\n # post-0.4.3\n self.__init_handle_by_constructor__(_CAPI_DGLHeteroUnpickle, state)\n elif isinstance(state, tuple) and len(state) == 3:\n # pre-0.4.2\n metagraph, number_of_nodes, edges = state\n\n self._cache = {}\n # loop over etypes and recover unit graphs\n rel_graphs = []\n for i, edges_per_type in enumerate(edges):\n src_ntype, dst_ntype = metagraph.find_edge(i)\n num_src = number_of_nodes[src_ntype]\n num_dst = number_of_nodes[dst_ntype]\n src_id, dst_id, _ = edges_per_type\n rel_graphs.append(create_unitgraph_from_coo(\n 1 if src_ntype == dst_ntype else 2, num_src, num_dst, src_id, dst_id,\n ['coo', 'csr', ' csc']))\n self.__init_handle_by_constructor__(\n _CAPI_DGLHeteroCreateHeteroGraph, metagraph, rel_graphs)\n\n @property\n def metagraph(self):\n \"\"\"Meta graph\n\n Returns\n -------\n GraphIndex\n The meta graph.\n \"\"\"\n return _CAPI_DGLHeteroGetMetaGraph(self)\n\n def number_of_ntypes(self):\n \"\"\"Return number of node types.\"\"\"\n return self.metagraph.number_of_nodes()\n\n def number_of_etypes(self):\n \"\"\"Return number of edge types.\"\"\"\n return self.metagraph.number_of_edges()\n\n def get_relation_graph(self, etype):\n \"\"\"Get the unitgraph graph of the given edge/relation type.\n\n Parameters\n ----------\n etype : int\n The edge/relation type.\n\n Returns\n -------\n HeteroGraphIndex\n The unitgraph graph.\n \"\"\"\n return _CAPI_DGLHeteroGetRelationGraph(self, int(etype))\n\n def flatten_relations(self, etypes):\n \"\"\"Convert the list of requested unitgraph graphs into a single unitgraph\n graph.\n\n Parameters\n ----------\n etypes : list[int]\n The edge/relation types.\n\n Returns\n -------\n FlattenedHeteroGraph\n A flattened heterograph object\n \"\"\"\n return _CAPI_DGLHeteroGetFlattenedGraph(self, etypes)\n\n def add_nodes(self, ntype, num):\n \"\"\"Add nodes.\n\n Parameters\n ----------\n ntype : int\n Node type\n num : int\n Number of nodes to be added.\n \"\"\"\n _CAPI_DGLHeteroAddVertices(self, int(ntype), int(num))\n self.clear_cache()\n\n def add_edge(self, etype, u, v):\n \"\"\"Add one edge.\n\n Parameters\n ----------\n etype : int\n Edge type\n u : int\n The src node.\n v : int\n The dst node.\n \"\"\"\n _CAPI_DGLHeteroAddEdge(self, int(etype), int(u), int(v))\n self.clear_cache()\n\n def add_edges(self, etype, u, v):\n \"\"\"Add many edges.\n\n Parameters\n ----------\n etype : int\n Edge type\n u : utils.Index\n The src nodes.\n v : utils.Index\n The dst nodes.\n \"\"\"\n _CAPI_DGLHeteroAddEdges(self, int(etype), u.todgltensor(), v.todgltensor())\n self.clear_cache()\n\n def clear(self):\n \"\"\"Clear the graph.\"\"\"\n _CAPI_DGLHeteroClear(self)\n self._cache.clear()\n\n @property\n def dtype(self):\n \"\"\"Return the data type of this graph index.\n\n Returns\n -------\n DGLDataType\n The data type of the graph.\n \"\"\"\n return _CAPI_DGLHeteroDataType(self)\n\n @property\n def ctx(self):\n \"\"\"Return the context of this graph index.\n\n Returns\n -------\n DGLContext\n The context of the graph.\n \"\"\"\n return _CAPI_DGLHeteroContext(self)\n\n def bits_needed(self, etype):\n \"\"\"Return the number of integer bits needed to represent the unitgraph graph.\n\n Parameters\n ----------\n etype : int\n The edge type.\n\n Returns\n -------\n int\n The number of bits needed.\n \"\"\"\n stype, dtype = self.metagraph.find_edge(etype)\n if (self.number_of_edges(etype) >= 0x80000000 or\n self.number_of_nodes(stype) >= 0x80000000 or\n self.number_of_nodes(dtype) >= 0x80000000):\n return 64\n else:\n return 32\n\n def asbits(self, bits):\n \"\"\"Transform the graph to a new one with the given number of bits storage.\n\n NOTE: this method only works for immutable graph index\n\n Parameters\n ----------\n bits : int\n The number of integer bits (32 or 64)\n\n Returns\n -------\n HeteroGraphIndex\n The graph index stored using the given number of bits.\n \"\"\"\n return _CAPI_DGLHeteroAsNumBits(self, int(bits))\n\n def copy_to(self, ctx):\n \"\"\"Copy this immutable graph index to the given device context.\n\n NOTE: this method only works for immutable graph index\n\n Parameters\n ----------\n ctx : DGLContext\n The target device context.\n\n Returns\n -------\n HeteroGraphIndex\n The graph index on the given device context.\n \"\"\"\n return _CAPI_DGLHeteroCopyTo(self, ctx.device_type, ctx.device_id)\n\n def shared_memory(self, name, ntypes=None, etypes=None, formats=('coo', 'csr', 'csc')):\n \"\"\"Return a copy of this graph in shared memory\n\n Parameters\n ----------\n name : str\n The name of the shared memory.\n ntypes : list of str\n Name of node types\n etypes : list of str\n Name of edge types\n format : list of str\n Desired formats to be materialized.\n\n Returns\n -------\n HeteroGraphIndex\n The graph index in shared memory\n \"\"\"\n assert len(name) > 0, \"The name of shared memory cannot be empty\"\n assert len(formats) > 0\n for fmt in formats:\n assert fmt in (\"coo\", \"csr\", \"csc\")\n ntypes = [] if ntypes is None else ntypes\n etypes = [] if etypes is None else etypes\n return _CAPI_DGLHeteroCopyToSharedMem(self, name, ntypes, etypes, formats)\n\n def is_multigraph(self):\n \"\"\"Return whether the graph is a multigraph\n The time cost will be O(E)\n\n Returns\n -------\n bool\n True if it is a multigraph, False otherwise.\n \"\"\"\n return bool(_CAPI_DGLHeteroIsMultigraph(self))\n\n def is_readonly(self):\n \"\"\"Return whether the graph index is read-only.\n\n Returns\n -------\n bool\n True if it is a read-only graph, False otherwise.\n \"\"\"\n return bool(_CAPI_DGLHeteroIsReadonly(self))\n\n def number_of_nodes(self, ntype):\n \"\"\"Return the number of nodes.\n\n Parameters\n ----------\n ntype : int\n Node type\n\n Returns\n -------\n int\n The number of nodes\n \"\"\"\n return _CAPI_DGLHeteroNumVertices(self, int(ntype))\n\n def number_of_edges(self, etype):\n \"\"\"Return the number of edges.\n\n Parameters\n ----------\n etype : int\n Edge type\n\n Returns\n -------\n int\n The number of edges\n \"\"\"\n return _CAPI_DGLHeteroNumEdges(self, int(etype))\n\n def has_nodes(self, ntype, vids):\n \"\"\"Return true if the nodes exist.\n\n Parameters\n ----------\n ntype : int\n Node type\n vid : Tensor\n Node IDs\n\n Returns\n -------\n Tensor\n 0-1 array indicating existence\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroHasVertices(\n self, int(ntype), F.to_dgl_nd(vids)))\n\n def has_edges_between(self, etype, u, v):\n \"\"\"Return true if the edge exists.\n\n Parameters\n ----------\n etype : int\n Edge type\n u : Tensor\n Src node Ids.\n v : Tensor\n Dst node Ids.\n\n Returns\n -------\n Tensor\n 0-1 array indicating existence\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroHasEdgesBetween(\n self, int(etype), F.to_dgl_nd(u), F.to_dgl_nd(v)))\n\n def predecessors(self, etype, v):\n \"\"\"Return the predecessors of the node.\n\n Assume that node_type(v) == dst_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : int\n The node.\n\n Returns\n -------\n Tensor\n Array of predecessors\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroPredecessors(\n self, int(etype), int(v)))\n\n def successors(self, etype, v):\n \"\"\"Return the successors of the node.\n\n Assume that node_type(v) == src_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : int\n The node.\n\n Returns\n -------\n Tensor\n Array of successors\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroSuccessors(\n self, int(etype), int(v)))\n\n def edge_ids_all(self, etype, u, v):\n \"\"\"Return a triplet of arrays that contains the edge IDs.\n\n Parameters\n ----------\n etype : int\n Edge type\n u : Tensor\n The src nodes.\n v : Tensor\n The dst nodes.\n\n Returns\n -------\n Tensor\n The src nodes.\n Tensor\n The dst nodes.\n Tensor\n The edge ids.\n \"\"\"\n edge_array = _CAPI_DGLHeteroEdgeIdsAll(\n self, int(etype), F.to_dgl_nd(u), F.to_dgl_nd(v))\n\n src = F.from_dgl_nd(edge_array(0))\n dst = F.from_dgl_nd(edge_array(1))\n eid = F.from_dgl_nd(edge_array(2))\n\n return src, dst, eid\n\n def edge_ids_one(self, etype, u, v):\n \"\"\"Return an arrays of edge IDs.\n\n Parameters\n ----------\n etype : int\n Edge type\n u : Tensor\n The src nodes.\n v : Tensor\n The dst nodes.\n\n Returns\n -------\n Tensor\n The edge ids.\n \"\"\"\n eid = F.from_dgl_nd(_CAPI_DGLHeteroEdgeIdsOne(\n self, int(etype), F.to_dgl_nd(u), F.to_dgl_nd(v)))\n return eid\n\n def find_edges(self, etype, eid):\n \"\"\"Return a triplet of arrays that contains the edge IDs.\n\n Parameters\n ----------\n etype : int\n Edge type\n eid : Tensor\n Edge ids.\n\n Returns\n -------\n Tensor\n The src nodes.\n Tensor\n The dst nodes.\n Tensor\n The edge ids.\n \"\"\"\n edge_array = _CAPI_DGLHeteroFindEdges(\n self, int(etype), F.to_dgl_nd(eid))\n\n src = F.from_dgl_nd(edge_array(0))\n dst = F.from_dgl_nd(edge_array(1))\n eid = F.from_dgl_nd(edge_array(2))\n\n return src, dst, eid\n\n def in_edges(self, etype, v):\n \"\"\"Return the in edges of the node(s).\n\n Assume that node_type(v) == dst_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : Tensor\n Node IDs.\n\n Returns\n -------\n Tensor\n The src nodes.\n Tensor\n The dst nodes.\n Tensor\n The edge ids.\n \"\"\"\n edge_array = _CAPI_DGLHeteroInEdges_2(self, int(etype), F.to_dgl_nd(v))\n src = F.from_dgl_nd(edge_array(0))\n dst = F.from_dgl_nd(edge_array(1))\n eid = F.from_dgl_nd(edge_array(2))\n return src, dst, eid\n\n def out_edges(self, etype, v):\n \"\"\"Return the out edges of the node(s).\n\n Assume that node_type(v) == src_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : Tensor\n Node IDs.\n\n Returns\n -------\n Tensor\n The src nodes.\n Tensor\n The dst nodes.\n Tensor\n The edge ids.\n \"\"\"\n edge_array = _CAPI_DGLHeteroOutEdges_2(self, int(etype), F.to_dgl_nd(v))\n src = F.from_dgl_nd(edge_array(0))\n dst = F.from_dgl_nd(edge_array(1))\n eid = F.from_dgl_nd(edge_array(2))\n return src, dst, eid\n\n def edges(self, etype, order=None):\n \"\"\"Return all the edges\n\n Parameters\n ----------\n etype : int\n Edge type\n order : string\n The order of the returned edges. Currently support:\n\n - 'srcdst' : sorted by their src and dst ids.\n - 'eid' : sorted by edge Ids.\n - None : the arbitrary order.\n\n Returns\n -------\n Tensor\n The src nodes.\n Tensor\n The dst nodes.\n Tensor\n The edge ids.\n \"\"\"\n if order is None:\n order = \"\"\n elif order not in ['srcdst', 'eid']:\n raise DGLError(\"Expect order to be one of None, 'srcdst', 'eid', \"\n \"got {}\".format(order))\n edge_array = _CAPI_DGLHeteroEdges(self, int(etype), order)\n src = F.from_dgl_nd(edge_array(0))\n dst = F.from_dgl_nd(edge_array(1))\n eid = F.from_dgl_nd(edge_array(2))\n return src, dst, eid\n\n def in_degrees(self, etype, v):\n \"\"\"Return the in degrees of the nodes.\n\n Assume that node_type(v) == dst_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : Tensor\n The nodes.\n\n Returns\n -------\n Tensor\n The in degree array.\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroInDegrees(\n self, int(etype), F.to_dgl_nd(v)))\n\n def out_degrees(self, etype, v):\n \"\"\"Return the out degrees of the nodes.\n\n Assume that node_type(v) == src_type(etype). Thus, the ntype argument is omitted.\n\n Parameters\n ----------\n etype : int\n Edge type\n v : Tensor\n The nodes.\n\n Returns\n -------\n Tensor\n The out degree array.\n \"\"\"\n return F.from_dgl_nd(_CAPI_DGLHeteroOutDegrees(\n self, int(etype), F.to_dgl_nd(v)))\n\n def adjacency_matrix(self, etype, transpose, ctx):\n \"\"\"Return the adjacency matrix representation of this graph.\n\n By default, a row of returned adjacency matrix represents the source\n of an edge and the column represents the destination.\n\n When transpose is True, a row represents the destination and a column represents\n the source.\n\n Parameters\n ----------\n etype : int\n Edge type\n transpose : bool\n A flag to transpose the returned adjacency matrix.\n ctx : context\n The context of the returned matrix.\n\n Returns\n -------\n SparseTensor\n The adjacency matrix.\n Tensor\n A index for data shuffling due to sparse format change. Return None\n if shuffle is not required.\n \"\"\"\n if not isinstance(transpose, bool):\n raise DGLError('Expect bool value for \"transpose\" arg,'\n ' but got %s.' % (type(transpose)))\n fmt = F.get_preferred_sparse_format()\n rst = _CAPI_DGLHeteroGetAdj(self, int(etype), transpose, fmt)\n # convert to framework-specific sparse matrix\n srctype, dsttype = self.metagraph.find_edge(etype)\n nrows = self.number_of_nodes(dsttype) if transpose else self.number_of_nodes(srctype)\n ncols = self.number_of_nodes(srctype) if transpose else self.number_of_nodes(dsttype)\n nnz = self.number_of_edges(etype)\n if fmt == \"csr\":\n indptr = F.copy_to(F.from_dgl_nd(rst(0)), ctx)\n indices = F.copy_to(F.from_dgl_nd(rst(1)), ctx)\n shuffle = F.copy_to(F.from_dgl_nd(rst(2)), ctx)\n dat = F.ones(nnz, dtype=F.float32, ctx=ctx) # FIXME(minjie): data type\n spmat = F.sparse_matrix(dat, ('csr', indices, indptr), (nrows, ncols))[0]\n return spmat, shuffle\n elif fmt == \"coo\":\n idx = F.copy_to(F.from_dgl_nd(rst(0)), ctx)\n idx = F.reshape(idx, (2, nnz))\n dat = F.ones((nnz,), dtype=F.float32, ctx=ctx)\n adj, shuffle_idx = F.sparse_matrix(\n dat, ('coo', idx), (nrows, ncols))\n return adj, shuffle_idx\n else:\n raise Exception(\"unknown format\")\n\n def adjacency_matrix_tensors(self, etype, transpose, fmt):\n \"\"\"Return the adjacency matrix as a triplet of tensors.\n\n By default, a row of returned adjacency matrix represents the source\n of an edge and the column represents the destination.\n\n When transpose is True, a row represents the destination and a column represents\n the source.\n\n Parameters\n ----------\n etype : int\n Edge type\n transpose : bool\n A flag to transpose the returned adjacency matrix.\n fmt : str\n Indicates the format of returned adjacency matrix.\n\n Returns\n -------\n tuple[int, int, Tensor, Tensor] or tuple[int, int, Tensor, Tensor, Tensor]\n The number of rows and columns, followed by the adjacency matrix tensors\n whose data type and device are the same as those of the graph.\n\n If :attr:`fmt` is ``'coo'``, then the triplet will be\n the row array and column array of the COO representation.\n\n If :attr:`fmt` is ``'csr'``, then the triplet will be\n the index pointer array (``indptr``), indices array, and data array\n of the CSR representation. The data array will contain the edge ID for\n each entry of the adjacency matrix. If the data array is empty, then it is\n equivalent to a consecutive array from zero to the number of edges minus one.\n \"\"\"\n if not isinstance(transpose, bool):\n raise DGLError('Expect bool value for \"transpose\" arg,'\n ' but got %s.' % (type(transpose)))\n\n rst = _CAPI_DGLHeteroGetAdj(self, int(etype), transpose, fmt)\n srctype, dsttype = self.metagraph.find_edge(etype)\n nrows = self.number_of_nodes(dsttype) if transpose else self.number_of_nodes(srctype)\n ncols = self.number_of_nodes(srctype) if transpose else self.number_of_nodes(dsttype)\n nnz = self.number_of_edges(etype)\n if fmt == \"csr\":\n indptr = F.from_dgl_nd(rst(0))\n indices = F.from_dgl_nd(rst(1))\n data = F.from_dgl_nd(rst(2))\n return nrows, ncols, indptr, indices, data\n elif fmt == 'coo':\n idx = F.from_dgl_nd(rst(0))\n row, col = F.reshape(idx, (2, nnz))\n return nrows, ncols, row, col\n else:\n raise ValueError(\"unknown format\")\n\n def adjacency_matrix_scipy(self, etype, transpose, fmt, return_edge_ids=None):\n \"\"\"Return the scipy adjacency matrix representation of this graph.\n\n By default, a row of returned adjacency matrix represents the destination\n of an edge and the column represents the source.\n\n When transpose is True, a row represents the source and a column represents\n a destination.\n\n Parameters\n ----------\n etype : int\n Edge type\n transpose : bool\n A flag to transpose the returned adjacency matrix.\n fmt : str\n Indicates the format of returned adjacency matrix.\n return_edge_ids : bool\n Indicates whether to return edge IDs or 1 as elements.\n\n Returns\n -------\n scipy.sparse.spmatrix\n The scipy representation of adjacency matrix.\n \"\"\"\n if return_edge_ids is None:\n dgl_warning(\n \"Adjacency matrix by default currently returns edge IDs.\"\n \" As a result there is one 0 entry which is not eliminated.\"\n \" In the next release it will return 1s by default,\"\n \" and 0 will be eliminated otherwise.\",\n FutureWarning)\n return_edge_ids = True\n\n if fmt == 'csr':\n nrows, ncols, indptr, indices, data = \\\n self.adjacency_matrix_tensors(etype, transpose, fmt)\n indptr = F.asnumpy(indptr)\n indices = F.asnumpy(indices)\n data = F.asnumpy(data)\n\n # Check if edge ID is omitted\n if return_edge_ids and data.shape[0] == 0:\n data = np.arange(self.number_of_edges(etype))\n else:\n data = np.ones_like(indices)\n\n return scipy.sparse.csr_matrix((data, indices, indptr), shape=(nrows, ncols))\n elif fmt == 'coo':\n nrows, ncols, row, col = \\\n self.adjacency_matrix_tensors(etype, transpose, fmt)\n row = F.asnumpy(row)\n col = F.asnumpy(col)\n data = np.arange(self.number_of_edges(etype)) if return_edge_ids \\\n else np.ones_like(row)\n return scipy.sparse.coo_matrix((data, (row, col)), shape=(nrows, ncols))\n else:\n raise ValueError(\"unknown format\")\n\n def incidence_matrix(self, etype, typestr, ctx):\n \"\"\"Return the incidence matrix representation of this graph.\n\n An incidence matrix is an n x m sparse matrix, where n is\n the number of nodes and m is the number of edges. Each nnz\n value indicating whether the edge is incident to the node\n or not.\n\n There are three types of an incidence matrix `I`:\n * \"in\":\n - I[v, e] = 1 if e is the in-edge of v (or v is the dst node of e);\n - I[v, e] = 0 otherwise.\n * \"out\":\n - I[v, e] = 1 if e is the out-edge of v (or v is the src node of e);\n - I[v, e] = 0 otherwise.\n * \"both\":\n - I[v, e] = 1 if e is the in-edge of v;\n - I[v, e] = -1 if e is the out-edge of v;\n - I[v, e] = 0 otherwise (including self-loop).\n\n Parameters\n ----------\n etype : int\n Edge type\n typestr : str\n Can be either \"in\", \"out\" or \"both\"\n ctx : context\n The context of returned incidence matrix.\n\n Returns\n -------\n SparseTensor\n The incidence matrix.\n utils.Index\n A index for data shuffling due to sparse format change. Return None\n if shuffle is not required.\n \"\"\"\n src, dst, eid = self.edges(etype)\n srctype, dsttype = self.metagraph.find_edge(etype)\n\n m = self.number_of_edges(etype)\n if typestr == 'in':\n n = self.number_of_nodes(dsttype)\n row = F.unsqueeze(dst, 0)\n col = F.unsqueeze(eid, 0)\n idx = F.copy_to(F.cat([row, col], dim=0), ctx)\n # FIXME(minjie): data type\n dat = F.ones((m,), dtype=F.float32, ctx=ctx)\n inc, shuffle_idx = F.sparse_matrix(dat, ('coo', idx), (n, m))\n elif typestr == 'out':\n n = self.number_of_nodes(srctype)\n row = F.unsqueeze(src, 0)\n col = F.unsqueeze(eid, 0)\n idx = F.copy_to(F.cat([row, col], dim=0), ctx)\n # FIXME(minjie): data type\n dat = F.ones((m,), dtype=F.float32, ctx=ctx)\n inc, shuffle_idx = F.sparse_matrix(dat, ('coo', idx), (n, m))\n elif typestr == 'both':\n assert srctype == dsttype, \\\n \"'both' is supported only if source and destination type are the same\"\n n = self.number_of_nodes(srctype)\n # first remove entries for self loops\n mask = F.logical_not(F.equal(src, dst))\n src = F.boolean_mask(src, mask)\n dst = F.boolean_mask(dst, mask)\n eid = F.boolean_mask(eid, mask)\n n_entries = F.shape(src)[0]\n # create index\n row = F.unsqueeze(F.cat([src, dst], dim=0), 0)\n col = F.unsqueeze(F.cat([eid, eid], dim=0), 0)\n idx = F.copy_to(F.cat([row, col], dim=0), ctx)\n # FIXME(minjie): data type\n x = -F.ones((n_entries,), dtype=F.float32, ctx=ctx)\n y = F.ones((n_entries,), dtype=F.float32, ctx=ctx)\n dat = F.cat([x, y], dim=0)\n inc, shuffle_idx = F.sparse_matrix(dat, ('coo', idx), (n, m))\n else:\n raise DGLError('Invalid incidence matrix type: %s' % str(typestr))\n return inc, shuffle_idx\n\n def node_subgraph(self, induced_nodes, relabel_nodes):\n \"\"\"Return the induced node subgraph.\n\n Parameters\n ----------\n induced_nodes : list of utils.Index\n Induced nodes. The length should be equal to the number of\n node types in this heterograph.\n relabel_nodes : bool\n If True, the extracted subgraph will only have the nodes in the specified node set\n and it will relabel the nodes in order.\n\n Returns\n -------\n SubgraphIndex\n The subgraph index.\n \"\"\"\n vids = [F.to_dgl_nd(nodes) for nodes in induced_nodes]\n return _CAPI_DGLHeteroVertexSubgraph(self, vids, relabel_nodes)\n\n def edge_subgraph(self, induced_edges, preserve_nodes):\n \"\"\"Return the induced edge subgraph.\n\n Parameters\n ----------\n induced_edges : list of utils.Index\n Induced edges. The length should be equal to the number of\n edge types in this heterograph.\n preserve_nodes : bool\n Indicates whether to preserve all nodes or not.\n If true, keep the nodes which have no edge connected in the subgraph;\n If false, all nodes without edge connected to it would be removed.\n\n Returns\n -------\n SubgraphIndex\n The subgraph index.\n \"\"\"\n eids = [F.to_dgl_nd(edges) for edges in induced_edges]\n return _CAPI_DGLHeteroEdgeSubgraph(self, eids, preserve_nodes)\n\n def get_unitgraph(self, etype, ctx):\n \"\"\"Create a unitgraph graph from given edge type and copy to the given device\n context.\n\n Note: this internal function is for DGL scheduler use only\n\n Parameters\n ----------\n etype : int\n If the graph index is a Bipartite graph index, this argument must be None.\n Otherwise, it represents the edge type.\n ctx : DGLContext\n The context of the returned graph.\n\n Returns\n -------\n HeteroGraphIndex\n \"\"\"\n g = self.get_relation_graph(etype)\n return g.copy_to(ctx).asbits(self.bits_needed(etype or 0))\n\n def get_csr_shuffle_order(self, etype):\n \"\"\"Return the edge shuffling order when a coo graph is converted to csr format\n\n Parameters\n ----------\n etype : int\n The edge type\n\n Returns\n -------\n tuple of two utils.Index\n The first element of the tuple is the shuffle order for outward graph\n The second element of the tuple is the shuffle order for inward graph\n \"\"\"\n csr = _CAPI_DGLHeteroGetAdj(self, int(etype), False, \"csr\")\n order = csr(2)\n rev_csr = _CAPI_DGLHeteroGetAdj(self, int(etype), True, \"csr\")\n rev_order = rev_csr(2)\n return utils.toindex(order, self.dtype), utils.toindex(rev_order, self.dtype)\n\n def formats(self, formats=None):\n \"\"\"Get a graph index with the specified sparse format(s) or query\n for the usage status of sparse formats\n\n If the graph has multiple edge types, they will have the same\n sparse format.\n\n Parameters\n ----------\n formats : str or list of str or None\n\n * If formats is None, return the usage status of sparse formats\n * Otherwise, it can be ``'coo'``/``'csr'``/``'csc'`` or a sublist of\n them, specifying the sparse formats to use.\n\n Returns\n -------\n dict or GraphIndex\n\n * If formats is None, the result will be a dict recording the usage\n status of sparse formats.\n * Otherwise, a GraphIndex will be returned, which is a clone of the\n original graph with the specified sparse format(s) ``formats``.\n\n \"\"\"\n formats_allowed = _CAPI_DGLHeteroGetAllowedFormats(self)\n formats_created = _CAPI_DGLHeteroGetCreatedFormats(self)\n created = []\n not_created = []\n if formats is None:\n for fmt in ['coo', 'csr', 'csc']:\n if fmt in formats_allowed:\n if fmt in formats_created:\n created.append(fmt)\n else:\n not_created.append(fmt)\n return {\n 'created': created,\n 'not created': not_created\n }\n else:\n if isinstance(formats, str):\n formats = [formats]\n return _CAPI_DGLHeteroGetFormatGraph(self, formats)\n\n def create_formats_(self):\n \"\"\"Create all sparse matrices allowed for the graph.\"\"\"\n return _CAPI_DGLHeteroCreateFormat(self)\n\n def reverse(self):\n \"\"\"Reverse the heterogeneous graph adjacency\n\n The node types and edge types are not changed.\n\n Returns\n -------\n A new graph index.\n \"\"\"\n return _CAPI_DGLHeteroReverse(self)\n\n@register_object('graph.HeteroSubgraph')\nclass HeteroSubgraphIndex(ObjectBase):\n \"\"\"Hetero-subgraph data structure\"\"\"\n @property\n def graph(self):\n \"\"\"The subgraph structure\n\n Returns\n -------\n HeteroGraphIndex\n The subgraph\n \"\"\"\n return _CAPI_DGLHeteroSubgraphGetGraph(self)\n\n @property\n def induced_nodes(self):\n \"\"\"Induced nodes for each node type. The return list\n length should be equal to the number of node types.\n\n Returns\n -------\n list of utils.Index\n Induced nodes\n \"\"\"\n ret = _CAPI_DGLHeteroSubgraphGetInducedVertices(self)\n return [F.from_dgl_nd(v) for v in ret]\n\n @property\n def induced_edges(self):\n \"\"\"Induced edges for each edge type. The return list\n length should be equal to the number of edge types.\n\n Returns\n -------\n list of utils.Index\n Induced edges\n \"\"\"\n ret = _CAPI_DGLHeteroSubgraphGetInducedEdges(self)\n return [F.from_dgl_nd(v) for v in ret]\n\n\n#################################################################\n# Creators\n#################################################################\n\ndef create_metagraph_index(ntypes, canonical_etypes):\n \"\"\"Return a GraphIndex instance for a metagraph given the node types and canonical\n edge types.\n\n This function will reorder the node types and canonical edge types.\n\n Parameters\n ----------\n ntypes : Iterable[str]\n The node types.\n canonical_etypes : Iterable[tuple[str, str, str]]\n The canonical edge types.\n\n Returns\n -------\n GraphIndex\n The index object for metagraph.\n list[str]\n The reordered node types for each node in the metagraph.\n list[str]\n The reordered edge types for each edge in the metagraph.\n list[tuple[str, str, str]]\n The reordered canonical edge types for each edge in the metagraph.\n \"\"\"\n # Sort the ntypes and relation tuples to have a deterministic order for the same set\n # of type names.\n ntypes = list(sorted(ntypes))\n relations = list(sorted(canonical_etypes))\n ntype_dict = {ntype: i for i, ntype in enumerate(ntypes)}\n meta_edges_src = []\n meta_edges_dst = []\n etypes = []\n for srctype, etype, dsttype in relations:\n meta_edges_src.append(ntype_dict[srctype])\n meta_edges_dst.append(ntype_dict[dsttype])\n etypes.append(etype)\n # metagraph is DGLGraph, currently still using int64 as index dtype\n metagraph = from_coo(len(ntypes), meta_edges_src, meta_edges_dst, True)\n return metagraph, ntypes, etypes, relations\n\ndef create_unitgraph_from_coo(num_ntypes, num_src, num_dst, row, col,\n formats, row_sorted=False, col_sorted=False):\n \"\"\"Create a unitgraph graph index from COO format\n\n Parameters\n ----------\n num_ntypes : int\n Number of node types (must be 1 or 2).\n num_src : int\n Number of nodes in the src type.\n num_dst : int\n Number of nodes in the dst type.\n row : utils.Index\n Row index.\n col : utils.Index\n Col index.\n formats : list of str.\n Restrict the storage formats allowed for the unit graph.\n row_sorted : bool, optional\n Whether or not the rows of the COO are in ascending order.\n col_sorted : bool, optional\n Whether or not the columns of the COO are in ascending order within\n each row. This only has an effect when ``row_sorted`` is True.\n\n Returns\n -------\n HeteroGraphIndex\n \"\"\"\n if isinstance(formats, str):\n formats = [formats]\n return _CAPI_DGLHeteroCreateUnitGraphFromCOO(\n int(num_ntypes), int(num_src), int(num_dst),\n F.to_dgl_nd(row), F.to_dgl_nd(col),\n formats, row_sorted, col_sorted)\n\ndef create_unitgraph_from_csr(num_ntypes, num_src, num_dst, indptr, indices, edge_ids,\n formats, transpose=False):\n \"\"\"Create a unitgraph graph index from CSR format\n\n Parameters\n ----------\n num_ntypes : int\n Number of node types (must be 1 or 2).\n num_src : int\n Number of nodes in the src type.\n num_dst : int\n Number of nodes in the dst type.\n indptr : utils.Index\n CSR indptr.\n indices : utils.Index\n CSR indices.\n edge_ids : utils.Index\n Edge shuffle id.\n formats : str\n Restrict the storage formats allowed for the unit graph.\n transpose : bool, optional\n If True, treats the input matrix as CSC.\n\n Returns\n -------\n HeteroGraphIndex\n \"\"\"\n if isinstance(formats, str):\n formats = [formats]\n return _CAPI_DGLHeteroCreateUnitGraphFromCSR(\n int(num_ntypes), int(num_src), int(num_dst),\n F.to_dgl_nd(indptr), F.to_dgl_nd(indices), F.to_dgl_nd(edge_ids),\n formats, transpose)\n\ndef create_heterograph_from_relations(metagraph, rel_graphs, num_nodes_per_type):\n \"\"\"Create a heterograph from metagraph and graphs of every relation.\n\n Parameters\n ----------\n metagraph : GraphIndex\n Meta-graph.\n rel_graphs : list of HeteroGraphIndex\n Bipartite graph of each relation.\n num_nodes_per_type : utils.Index, optional\n Number of nodes per node type\n\n Returns\n -------\n HeteroGraphIndex\n \"\"\"\n if num_nodes_per_type is None:\n return _CAPI_DGLHeteroCreateHeteroGraph(metagraph, rel_graphs)\n else:\n return _CAPI_DGLHeteroCreateHeteroGraphWithNumNodes(\n metagraph, rel_graphs, num_nodes_per_type.todgltensor())\n\ndef create_heterograph_from_shared_memory(name):\n \"\"\"Create a heterograph from shared memory with the given name.\n\n Paramaters\n ----------\n name : str\n The name of the share memory\n\n Returns\n -------\n HeteroGraphIndex (in shared memory)\n ntypes : list of str\n Names of node types\n etypes : list of str\n Names of edge types\n \"\"\"\n g, ntypes, etypes = _CAPI_DGLHeteroCreateFromSharedMem(name)\n return g, list(ntypes), list(etypes)\n\ndef joint_union(metagraph, gidx_list):\n \"\"\"Return a joint union of the input heterographs.\n\n Parameters\n ----------\n metagraph : GraphIndex\n Meta-graph.\n gidx_list : list of HeteroGraphIndex\n Heterographs to be joint_unioned.\n\n Returns\n -------\n HeteroGraphIndex\n joint_unioned Heterograph.\n \"\"\"\n return _CAPI_DGLHeteroJointUnion(metagraph, gidx_list)\n\ndef disjoint_union(metagraph, graphs):\n \"\"\"Return a disjoint union of the input heterographs.\n\n Parameters\n ----------\n metagraph : GraphIndex\n Meta-graph.\n graphs : list of HeteroGraphIndex\n Heterographs to be batched.\n\n Returns\n -------\n HeteroGraphIndex\n Batched Heterograph.\n \"\"\"\n return _CAPI_DGLHeteroDisjointUnion_v2(metagraph, graphs)\n\ndef disjoint_partition(graph, bnn_all_types, bne_all_types):\n \"\"\"Partition the graph disjointly.\n\n Parameters\n ----------\n graph : HeteroGraphIndex\n The graph to be partitioned.\n bnn_all_types : list of list of int\n bnn_all_types[t] gives the number of nodes with t-th type in the batch.\n bne_all_types : list of list of int\n bne_all_types[t] gives the number of edges with t-th type in the batch.\n\n Returns\n --------\n list of HeteroGraphIndex\n Heterographs unbatched.\n \"\"\"\n bnn_all_types = utils.toindex(list(itertools.chain.from_iterable(bnn_all_types)))\n bne_all_types = utils.toindex(list(itertools.chain.from_iterable(bne_all_types)))\n return _CAPI_DGLHeteroDisjointPartitionBySizes_v2(\n graph, bnn_all_types.todgltensor(), bne_all_types.todgltensor())\n\ndef slice_gidx(graph, num_nodes, start_nid, num_edges, start_eid):\n \"\"\"Slice a chunk of the graph.\n\n Parameters\n ----------\n graph : HeteroGraphIndex\n The batched graph to slice.\n num_nodes : utils.Index\n Number of nodes per node type in the result graph.\n start_nid : utils.Index\n Start node ID per node type in the result graph.\n num_edges : utils.Index\n Number of edges per edge type in the result graph.\n start_eid : utils.Index\n Start edge ID per edge type in the result graph.\n\n Returns\n -------\n HeteroGraphIndex\n The sliced graph.\n \"\"\"\n return _CAPI_DGLHeteroSlice(\n graph, num_nodes.todgltensor(), start_nid.todgltensor(),\n num_edges.todgltensor(), start_eid.todgltensor())\n\n#################################################################\n# Data structure used by C APIs\n#################################################################\n\n@register_object(\"graph.FlattenedHeteroGraph\")\nclass FlattenedHeteroGraph(ObjectBase):\n \"\"\"FlattenedHeteroGraph object class in C++ backend.\"\"\"\n\n@register_object(\"graph.HeteroPickleStates\")\nclass HeteroPickleStates(ObjectBase):\n \"\"\"Pickle states object class in C++ backend.\"\"\"\n @property\n def version(self):\n \"\"\"Version number\n\n Returns\n -------\n int\n version number\n \"\"\"\n return _CAPI_DGLHeteroPickleStatesGetVersion(self)\n\n @property\n def meta(self):\n \"\"\"Meta info\n\n Returns\n -------\n bytearray\n Serialized meta info\n \"\"\"\n return bytearray(_CAPI_DGLHeteroPickleStatesGetMeta(self))\n\n @property\n def arrays(self):\n \"\"\"Arrays representing the graph structure (COO or CSR)\n\n Returns\n -------\n list of dgl.ndarray.NDArray\n Arrays\n \"\"\"\n num_arr = _CAPI_DGLHeteroPickleStatesGetArraysNum(self)\n arr_func = _CAPI_DGLHeteroPickleStatesGetArrays(self)\n return [arr_func(i) for i in range(num_arr)]\n\n def __getstate__(self):\n \"\"\"Issue: https://github.com/pytorch/pytorch/issues/32351\n Need to set the tensor created in the __getstate__ function\n as object attribute to avoid potential bugs\n \"\"\"\n self._pk_arrays = [F.zerocopy_from_dgl_ndarray(arr) for arr in self.arrays]\n return self.version, self.meta, self._pk_arrays\n\n def __setstate__(self, state):\n if isinstance(state[0], int):\n _, meta, arrays = state\n arrays = [F.zerocopy_to_dgl_ndarray(arr) for arr in arrays]\n self.__init_handle_by_constructor__(\n _CAPI_DGLCreateHeteroPickleStates, meta, arrays)\n else:\n metagraph, num_nodes_per_type, adjs = state\n num_nodes_per_type = F.zerocopy_to_dgl_ndarray(num_nodes_per_type)\n self.__init_handle_by_constructor__(\n _CAPI_DGLCreateHeteroPickleStatesOld, metagraph, num_nodes_per_type, adjs)\n\n_init_api(\"dgl.heterograph_index\")\n", "\"\"\"Tensorflow modules for EdgeConv Layer\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\nfrom .... import function as fn\nfrom ....base import DGLError\nfrom ....utils import expand_as_pair\n\n\nclass EdgeConv(layers.Layer):\n r\"\"\"\n Description\n -----------\n EdgeConv layer.\n Introduced in \"`Dynamic Graph CNN for Learning on Point Clouds\n <https://arxiv.org/pdf/1801.07829>`__\". Can be described as follows:\n .. math::\n h_i^{(l+1)} = \\max_{j \\in \\mathcal{N}(i)} (\n \\Theta \\cdot (h_j^{(l)} - h_i^{(l)}) + \\Phi \\cdot h_i^{(l)})\n where :math:`\\mathcal{N}(i)` is the neighbor of :math:`i`.\n :math:`\\Theta` and :math:`\\Phi` are linear layers.\n .. note::\n The original formulation includes a ReLU inside the maximum operator.\n This is equivalent to first applying a maximum operator then applying\n the ReLU.\n Parameters\n ----------\n in_feat : int\n Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.\n out_feat : int\n Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.\n batch_norm : bool\n Whether to include batch normalization on messages. Default: ``False``.\n allow_zero_in_degree : bool, optional\n If there are 0-in-degree nodes in the graph, output for those nodes will be invalid\n since no message will be passed to those nodes. This is harmful for some applications\n causing silent performance regression. This module will raise a DGLError if it detects\n 0-in-degree nodes in input graph. By setting ``True``, it will suppress the check\n and let the users handle it by themselves. Default: ``False``.\n Note\n ----\n Zero in-degree nodes will lead to invalid output value. This is because no message\n will be passed to those nodes, the aggregation function will be appied on empty input.\n A common practice to avoid this is to add a self-loop for each node in the graph if\n it is homogeneous, which can be achieved by:\n >>> g = ... # a DGLGraph\n >>> g = dgl.add_self_loop(g)\n Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph\n since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``\n to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.\n A common practise to handle this is to filter out the nodes with zere-in-degree when use\n after conv.\n \"\"\"\n def __init__(self,\n out_feats,\n batch_norm=False,\n allow_zero_in_degree=False):\n super(EdgeConv, self).__init__()\n self.batch_norm = batch_norm\n self._allow_zero_in_degree = allow_zero_in_degree\n\n self.theta = layers.Dense(out_feats)\n self.phi = layers.Dense(out_feats)\n if batch_norm:\n self.bn = layers.BatchNormalization()\n\n def set_allow_zero_in_degree(self, set_value):\n r\"\"\"\n Description\n -----------\n Set allow_zero_in_degree flag.\n Parameters\n ----------\n set_value : bool\n The value to be set to the flag.\n \"\"\"\n self._allow_zero_in_degree = set_value\n\n def call(self, g, feat):\n \"\"\"\n Description\n -----------\n Forward computation\n Parameters\n ----------\n g : DGLGraph\n The graph.\n feat : tf.Tensor or pair of tf.Tensor\n :math:`(N, D)` where :math:`N` is the number of nodes and\n :math:`D` is the number of feature dimensions.\n If a pair of tensors is given, the graph must be a uni-bipartite graph\n with only one edge type, and the two tensors must have the same\n dimensionality on all except the first axis.\n Returns\n -------\n tf.Tensor or pair of tf.Tensor\n New node features.\n Raises\n ------\n DGLError\n If there are 0-in-degree nodes in the input graph, it will raise DGLError\n since no message will be passed to those nodes. This will cause invalid output.\n The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.\n \"\"\"\n with g.local_scope():\n if not self._allow_zero_in_degree:\n if tf.math.count_nonzero(g.in_degrees() == 0) > 0:\n raise DGLError('There are 0-in-degree nodes in the graph, '\n 'output for those nodes will be invalid. '\n 'This is harmful for some applications, '\n 'causing silent performance regression. '\n 'Adding self-loop on the input graph by '\n 'calling `g = dgl.add_self_loop(g)` will resolve '\n 'the issue. Setting ``allow_zero_in_degree`` '\n 'to be `True` when constructing this module will '\n 'suppress the check and let the code run.')\n h_src, h_dst = expand_as_pair(feat, g)\n g.srcdata['x'] = h_src\n g.dstdata['x'] = h_dst\n g.apply_edges(fn.v_sub_u('x', 'x', 'theta'))\n g.edata['theta'] = self.theta(g.edata['theta'])\n g.dstdata['phi'] = self.phi(g.dstdata['x'])\n if not self.batch_norm:\n g.update_all(fn.e_add_v('theta', 'phi', 'e'), fn.max('e', 'x'))\n else:\n g.apply_edges(fn.e_add_v('theta', 'phi', 'e'))\n # for more comments on why global batch norm instead\n # of batch norm within EdgeConv go to\n # https://github.com/dmlc/dgl/blob/master/python/dgl/nn/pytorch/conv/edgeconv.py\n g.edata['e'] = self.bn(g.edata['e'])\n g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))\n return g.dstdata['x']\n", "import dgl\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport dgl.nn.pytorch as dglnn\nimport time\nimport argparse\nimport tqdm\nfrom ogb.nodeproppred import DglNodePropPredDataset\n\nclass SAGE(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation,\n dropout):\n super().__init__()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n self.layers = nn.ModuleList()\n self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))\n for i in range(1, n_layers - 1):\n self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))\n self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))\n self.dropout = nn.Dropout(dropout)\n self.activation = activation\n\n def forward(self, blocks, x):\n h = x\n for l, (layer, block) in enumerate(zip(self.layers, blocks)):\n # We need to first copy the representation of nodes on the RHS from the\n # appropriate nodes on the LHS.\n # Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst\n # would be (num_nodes_RHS, D)\n h_dst = h[:block.num_dst_nodes()]\n # Then we compute the updated representation on the RHS.\n # The shape of h now becomes (num_nodes_RHS, D)\n h = layer(block, (h, h_dst))\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n return h\n\n def inference(self, g, x, device):\n \"\"\"\n Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n for l, layer in enumerate(self.layers):\n y = th.zeros(g.num_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes).to(device)\n\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n g,\n th.arange(g.num_nodes()),\n sampler,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=args.num_workers)\n\n for input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader):\n block = blocks[0].int().to(device)\n\n h = x[input_nodes]\n h_dst = h[:block.num_dst_nodes()]\n h = layer(block, (h, h_dst))\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n\n y[output_nodes] = h\n\n x = y\n return y\n\ndef compute_acc(pred, labels):\n \"\"\"\n Compute the accuracy of prediction given the labels.\n \"\"\"\n return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)\n\ndef evaluate(model, g, nfeat, labels, val_nid, test_nid, device):\n \"\"\"\n Evaluate the model on the validation set specified by ``val_mask``.\n g : The entire graph.\n inputs : The features of all the nodes.\n labels : The labels of all the nodes.\n val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.\n device : The GPU device to evaluate on.\n \"\"\"\n model.eval()\n with th.no_grad():\n pred = model.inference(g, nfeat, device)\n model.train()\n return compute_acc(pred[val_nid], labels[val_nid]), compute_acc(pred[test_nid], labels[test_nid]), pred\n\ndef load_subtensor(nfeat, labels, seeds, input_nodes):\n \"\"\"\n Extracts features and labels for a set of nodes.\n \"\"\"\n batch_inputs = nfeat[input_nodes]\n batch_labels = labels[seeds]\n return batch_inputs, batch_labels\n\n#### Entry point\ndef run(args, device, data):\n # Unpack data\n train_nid, val_nid, test_nid, in_feats, labels, n_classes, nfeat, g = data\n\n # Create PyTorch DataLoader for constructing blocks\n sampler = dgl.dataloading.MultiLayerNeighborSampler(\n [int(fanout) for fanout in args.fan_out.split(',')])\n dataloader = dgl.dataloading.NodeDataLoader(\n g,\n train_nid,\n sampler,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=False,\n num_workers=args.num_workers)\n\n # Define model and optimizer\n model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)\n model = model.to(device)\n loss_fcn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n # Training loop\n avg = 0\n iter_tput = []\n best_eval_acc = 0\n best_test_acc = 0\n for epoch in range(args.num_epochs):\n tic = time.time()\n\n # Loop over the dataloader to sample the computation dependency graph as a list of\n # blocks.\n for step, (input_nodes, seeds, blocks) in enumerate(dataloader):\n tic_step = time.time()\n\n # copy block to gpu\n blocks = [blk.int().to(device) for blk in blocks]\n\n # Load the input features as well as output labels\n batch_inputs, batch_labels = load_subtensor(nfeat, labels, seeds, input_nodes)\n\n # Compute loss and prediction\n batch_pred = model(blocks, batch_inputs)\n loss = loss_fcn(batch_pred, batch_labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n iter_tput.append(len(seeds) / (time.time() - tic_step))\n if step % args.log_every == 0:\n acc = compute_acc(batch_pred, batch_labels)\n gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0\n print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(\n epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))\n\n toc = time.time()\n print('Epoch Time(s): {:.4f}'.format(toc - tic))\n if epoch >= 5:\n avg += toc - tic\n if epoch % args.eval_every == 0 and epoch != 0:\n eval_acc, test_acc, pred = evaluate(model, g, nfeat, labels, val_nid, test_nid, device)\n if args.save_pred:\n np.savetxt(args.save_pred + '%02d' % epoch, pred.argmax(1).cpu().numpy(), '%d')\n print('Eval Acc {:.4f}'.format(eval_acc))\n if eval_acc > best_eval_acc:\n best_eval_acc = eval_acc\n best_test_acc = test_acc\n print('Best Eval Acc {:.4f} Test Acc {:.4f}'.format(best_eval_acc, best_test_acc))\n\n print('Avg epoch time: {}'.format(avg / (epoch - 4)))\n return best_test_acc\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(\"multi-gpu training\")\n argparser.add_argument('--gpu', type=int, default=0,\n help=\"GPU device ID. Use -1 for CPU training\")\n argparser.add_argument('--num-epochs', type=int, default=20)\n argparser.add_argument('--num-hidden', type=int, default=256)\n argparser.add_argument('--num-layers', type=int, default=3)\n argparser.add_argument('--fan-out', type=str, default='5,10,15')\n argparser.add_argument('--batch-size', type=int, default=1000)\n argparser.add_argument('--val-batch-size', type=int, default=10000)\n argparser.add_argument('--log-every', type=int, default=20)\n argparser.add_argument('--eval-every', type=int, default=1)\n argparser.add_argument('--lr', type=float, default=0.003)\n argparser.add_argument('--dropout', type=float, default=0.5)\n argparser.add_argument('--num-workers', type=int, default=4,\n help=\"Number of sampling processes. Use 0 for no extra process.\")\n argparser.add_argument('--save-pred', type=str, default='')\n argparser.add_argument('--wd', type=float, default=0)\n args = argparser.parse_args()\n \n if args.gpu >= 0:\n device = th.device('cuda:%d' % args.gpu)\n else:\n device = th.device('cpu')\n\n # load ogbn-products data\n data = DglNodePropPredDataset(name='ogbn-products')\n splitted_idx = data.get_idx_split()\n train_idx, val_idx, test_idx = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']\n graph, labels = data[0]\n nfeat = graph.ndata.pop('feat').to(device)\n labels = labels[:, 0].to(device)\n\n in_feats = nfeat.shape[1]\n n_classes = (labels.max() + 1).item()\n # Create csr/coo/csc formats before launching sampling processes\n # This avoids creating certain formats in each data loader process, which saves momory and CPU.\n graph.create_formats_()\n # Pack data\n data = train_idx, val_idx, test_idx, in_feats, labels, n_classes, nfeat, graph\n\n # Run 10 times\n test_accs = []\n for i in range(10):\n test_accs.append(run(args, device, data).cpu().numpy())\n print('Average test accuracy:', np.mean(test_accs), '±', np.std(test_accs))\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.Dropout", "torch.nn.init.uniform_", "torch.Tensor", "torch.split", "torch.nn.ParameterDict", "torch.nn.ModuleDict", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.matmul", "torch.nn.init.xavier_uniform_", "torch.nn.init.zeros_" ], [ "torch.nn.init.uniform_", "torch.zeros", "torch.sum", "torch.nn.Embedding", "torch.set_num_threads", "torch.no_grad", "torch.device", "torch.save", "numpy.save", "torch.mul", "torch.arange", "torch.index_select", "torch.LongTensor", "torch.floor", "torch.nn.init.constant_", "torch.nn.functional.logsigmoid", "torch.stack", "numpy.sum", "torch.Tensor", "torch.clamp" ], [ "torch.nn.init.calculate_gain", "torch.Tensor", "torch.eye", "torch.eig", "torch.nn.init.zeros_", "torch.stack", "torch.diag" ], [ "scipy.sparse.coo_matrix", "numpy.ones_like", "scipy.sparse.csr_matrix" ], [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.BatchNormalization" ], [ "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.nn.ModuleList", "torch.cuda.max_memory_allocated", "numpy.std", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "torch.device", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/GNN_tf_2.x
[ "4b6429ed58f2c0922257600a9287d5cc5a10395b" ]
[ "graph_class.py" ]
[ "# coding=utf-8\n\nimport os\nimport shutil\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.sparse import coo_matrix\n\n\n#######################################################################################################################\n## GRAPH OBJECT CLASS #################################################################################################\n#######################################################################################################################\nclass GraphObject:\n ## CONSTRUCTORS METHODS ###########################################################################################\n def __init__(self, arcs, nodes, targets,\n problem_based: str = 'n',\n set_mask=None,\n output_mask=None,\n sample_weights=1,\n NodeGraph=None,\n ArcNode=None,\n aggregation_mode: str = 'average'):\n \"\"\" CONSTRUCTOR METHOD\n\n :param arcs: Ordered Arcs Matrix where arcs[i] = [ID Node From | ID NodeTo | Arc Label].\n :param nodes: Ordered Nodes Matrix where nodes[i] = [Node Label].\n :param targets: Targets Array with shape (Num of targeted example [nodes or arcs], dim_target example).\n :param problem_based: (str) define the problem on which graph is used: 'a' arcs-based, 'g' graph-based, 'n' node-based.\n :param set_mask: Array of {0,1} to define arcs/nodes belonging to a set, when dataset == single GraphObject.\n :param output_mask: Array of {0,1} to define the sub-set of arcs/nodes whose target is known.\n :param sample_weights: target sample weight for loss computation. It can be int, float or numpy.array of ints or floats\n > If int, all targets are weighted as sample_weights * ones.\n > If numpy.array, len(sample_weights) and targets.shape[0] must agree.\n :param NodeGraph: Matrix (nodes.shape[0],{Num graphs or 1}) used only when problem_based=='g'.\n :param ArcNode: Matrix of shape (num_of_arcs, num_of_nodes) s.t. A[i,j]=value if arc[i,2]==node[j].\n :param aggregation_mode: (str) It defines the aggregation mode for the incoming message of a node using ArcNode and Adjacency:\n > 'average': elem(matrix)={0-1} -> matmul(m,A) gives the average of incoming messages, s.t. sum(A[:,i])=1;\n > 'normalized': elem(matrix)={0-1} -> matmul(m,A) gives the normalized message wrt the total number of g.nodes;\n > 'sum': elem(matrix)={0,1} -> matmul(m,A) gives the total sum of incoming messages. In this case Adjacency\n \"\"\"\n self.dtype = tf.keras.backend.floatx()\n\n # store arcs, nodes, targets\n self.arcs = arcs.astype(self.dtype)\n self.nodes = nodes.astype(self.dtype)\n self.targets = targets.astype(self.dtype)\n self.sample_weights = sample_weights * np.ones(self.targets.shape[0])\n\n # store dimensions\n self.DIM_NODE_LABEL = nodes.shape[1]\n self.DIM_ARC_LABEL = (arcs.shape[1] - 2) # first two columns contain nodes indices\n self.DIM_TARGET = targets.shape[1]\n\n # setting the problem type: node, arcs or graph based + check existence of passed parameters in keys\n lenMask = {'n': nodes.shape[0], 'a': arcs.shape[0], 'g': nodes.shape[0]}\n\n # build set_mask, for a dataset composed of only a single graph: its nodes have to be divided in Tr, Va and Te\n self.set_mask = np.ones(lenMask[problem_based], dtype=bool) if set_mask is None else set_mask.astype(bool)\n # build output_mask\n self.output_mask = np.ones(len(self.set_mask), dtype=bool) if output_mask is None else output_mask.astype(bool)\n\n # check lengths: output_mask must be as long as set_mask\n if len(self.set_mask) != len(self.output_mask): raise ValueError('Error - len(<set_mask>) != len(<output_mask>)')\n\n # nodes and arcs aggregation\n if aggregation_mode not in ['average', 'normalized', 'sum']: raise ValueError(\"ERROR: Unknown aggregation mode\")\n self.aggregation_mode = aggregation_mode\n\n # build ArcNode matrix or acquire it from input\n self.ArcNode = self.buildArcNode() if ArcNode is None else ArcNode.astype(self.dtype)\n\n # build Adjancency Matrix. Note that it can be an Aggregated Version of the 'normal' Adjacency Matrix (with only 0 and 1)\n self.Adjacency = self.buildAdiacency()\n\n # build node_graph conversion matrix\n self.NodeGraph = self.buildNodeGraph(problem_based) if NodeGraph is None else NodeGraph.astype(self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def copy(self):\n \"\"\" COPY METHOD\n\n :return: a Deep Copy of the GraphObject instance.\n \"\"\"\n return GraphObject(arcs=self.getArcs(), nodes=self.getNodes(), targets=self.getTargets(), set_mask=self.getSetMask(),\n output_mask=self.getOutputMask(), sample_weights=self.getSampleWeights(), NodeGraph=self.getNodeGraph(),\n aggregation_mode=self.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildAdiacency(self):\n \"\"\" Build 'Aggregated' Adjacency Matrix ADJ, s.t. ADJ[i,j]=value if edge (i,j) exists in graph edges set.\n value is set by self.aggregation_mode: 'sum':1, 'normalized':1/self.nodes.shape[0], 'average':1/number_of_neighbors \"\"\"\n values = self.getArcNode().data\n indices = self.arcs[:, :2].astype(int)\n return coo_matrix((values, (indices[:, 0], indices[:, 1])), shape=(self.nodes.shape[0], self.nodes.shape[0]), dtype=self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildArcNode(self):\n \"\"\" Build ArcNode Matrix A of shape (number_of_arcs, number_of_nodes) where A[i,j]=value if arc[i,2]==node[j].\n Compute the matmul(m:=message,A) to get the incoming message on each node.\n :return: sparse ArcNode Matrix, for memory efficiency.\n :raise: Error if <aggregation_mode> is not in ['average','sum','normalized'].\n \"\"\"\n\n col = self.arcs[:, 1] # column indices of A are located in the second column of the arcs tensor\n row = np.arange(0, len(col)) # arc id (from 0 to number of arcs)\n\n # sum node aggregation - incoming message as sum of neighbors states and labels\n values_vector = np.ones(len(col))\n\n # normalized node aggregation - incoming message as sum of neighbors states and labels divided by the number of nodes in the graph\n if self.aggregation_mode == 'normalized':\n values_vector = values_vector * float(1 / len(col))\n\n # average node aggregation - incoming message as average of neighbors states and labels\n elif self.aggregation_mode == 'average':\n val, col_index, destination_node_counts = np.unique(col, return_inverse=True, return_counts=True)\n values_vector = values_vector / destination_node_counts[col_index]\n\n # isolated nodes correction: if nodes[i] is isolated, then ArcNode[:,i]=0, to maintain nodes ordering\n return coo_matrix((values_vector, (row, col)), shape=(self.arcs.shape[0], self.nodes.shape[0]), dtype=self.dtype)\n\n # -----------------------------------------------------------------------------------------------------------------\n def setAggregation(self, aggregation_mode: str):\n \"\"\" Set ArcNode values for the specified :param aggregation_mode: \"\"\"\n if aggregation_mode not in ['average', 'normalized', 'sum']: raise ValueError(\"ERROR: Unknown aggregation mode\")\n self.aggregation_mode = aggregation_mode\n self.ArcNode = self.buildArcNode()\n self.Adjacency = self.buildAdiacency()\n\n # -----------------------------------------------------------------------------------------------------------------\n def buildNodeGraph(self, problem_based: str):\n \"\"\" Build Node-Graph Aggregation Matrix, to transform a node-based problem in a graph-based one.\n nodegraph != None only if problem_based == 'g': It has dimensions (nodes.shape[0], 1) for a single graph, \n or (nodes.shape[0], Num graphs) for a graph containing 2+ graphs, built by merging the single graphs into a bigger one,\n such that after the node-graph aggregation process gnn can compute (Num graphs, targets.shape[1]) as output.\n It's normalized wrt the number of nodes whose output is computed, i.e. the number of ones in output_mask.\n :return: nodegraph matrix if :param problem_based: is 'g' else None, as nodegraph is used in graph-based problems.\n \"\"\"\n nodegraph = None\n if problem_based == 'g':\n nodes_output_coefficient = self.nodes.shape[0]\n nodegraph = np.ones((nodes_output_coefficient, 1), dtype=np.float32) * 1 / nodes_output_coefficient\n return nodegraph\n\n # -----------------------------------------------------------------------------------------------------------------\n def save(self, graph_folder_path: str) -> None:\n \"\"\" save graph in folder. All attributes are saved in numpy .npy files.\n\n :param graph_folder_path: (str) folder path in which graph is saved.\n \"\"\"\n GraphObject.save_graph(graph_folder_path, self)\n\n # -----------------------------------------------------------------------------------------------------------------\n def savetxt(self, graph_folder_path: str, format: str = '%.10g') -> None:\n \"\"\" save graph in folder. All attributes are saved in textual .txt files.\n\n :param graph_folder_path: (str) folder path in which graph is saved.\n \"\"\"\n GraphObject.save_txt(graph_folder_path, self, format)\n\n ## GETTERS ########################################################################################################\n def getArcs(self):\n return self.arcs.copy()\n\n def getNodes(self):\n return self.nodes.copy()\n\n def getTargets(self):\n return self.targets.copy()\n\n def getSetMask(self):\n return self.set_mask.copy()\n\n def getOutputMask(self):\n return self.output_mask.copy()\n\n def getAdjacency(self):\n return self.Adjacency.copy()\n\n def getArcNode(self):\n return self.ArcNode.copy()\n\n def getNodeGraph(self):\n return None if self.NodeGraph is None else self.NodeGraph.copy()\n\n def getSampleWeights(self):\n return self.sample_weights.copy()\n\n ## CLASS METHODs ##################################################################################################\n @classmethod\n def save_graph(self, graph_folder_path: str, g):\n \"\"\" Save a graph to a directory, creating txt files referring to all attributes of graph g\n Note that graph_folder_path will contain ONLY a single graph g. If folder is not empty, it is removed and re-made\n Remind that dataset folder contains one folder for each graph.\n\n :param graph_folder_path: new directory for saving the graph. \n :param g: graph of type GraphObject to be saved.\n \"\"\"\n # check folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n if os.path.exists(graph_folder_path): shutil.rmtree(graph_folder_path)\n os.makedirs(graph_folder_path)\n\n # save everything\n np.save(graph_folder_path + 'arcs.npy', g.arcs)\n np.save(graph_folder_path + 'nodes.npy', g.nodes)\n np.save(graph_folder_path + 'targets.npy', g.targets)\n if not all(g.set_mask): np.save(graph_folder_path + 'set_mask.npy', g.set_mask)\n if not all(g.output_mask): np.save(graph_folder_path + 'output_mask.npy', g.output_mask)\n if np.any(g.sample_weights != 1): np.save(graph_folder_path + 'sample_weights.npy', g.sample_weights)\n if g.NodeGraph is not None and g.targets.shape[0] > 1: np.save(graph_folder_path + 'NodeGraph.npy', g.NodeGraph)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def save_txt(self, graph_folder_path: str, g, format: str = '%.10g'):\n \"\"\" Save a graph to a directory, creating txt files referring to all attributes of graph g\n Note that graph_folder_path will contain ONLY a single graph g. If folder is not empty, it is removed and re-made.\n Remind that dataset folder contains one folder for each graph.\n\n :param graph_folder_path: new directory for saving the graph.\n :param g: graph of type GraphObject to be saved.\n :param format: param passed to np.savetxt().\n \"\"\"\n # check folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n if os.path.exists(graph_folder_path): shutil.rmtree(graph_folder_path)\n os.makedirs(graph_folder_path)\n\n # save everything\n np.savetxt(graph_folder_path + 'arcs.txt', g.arcs, fmt=format)\n np.savetxt(graph_folder_path + 'nodes.txt', g.nodes, fmt=format)\n np.savetxt(graph_folder_path + 'targets.txt', g.targets, fmt=format)\n if not all(g.set_mask): np.savetxt(graph_folder_path + 'set_mask.txt', g.set_mask, fmt=format)\n if not all(g.output_mask): np.savetxt(graph_folder_path + 'output_mask.txt', g.output_mask, fmt=format)\n if np.any(g.sample_weights != 1): np.savetxt(graph_folder_path + 'sample_weights.txt', g.sample_weights, fmt=format)\n if g.NodeGraph is not None and g.targets.shape[0] > 1: np.savetxt(graph_folder_path + 'NodeGraph.txt', g.NodeGraph, fmt=format)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def load(self, graph_folder_path: str, problem_based: str, aggregation_mode: str):\n \"\"\" Load a graph from a directory which contains at least 3 numpy files referring to nodes, arcs and targets\n\n :param graph_folder_path: directory containing at least 3 files: 'nodes.npy', 'arcs.npy' and 'targets.npy'\n > other possible files: 'NodeGraph.npy','output_mask.npy' and 'set_mask.npy'. No other files required!\n :param aggregation_mode: node aggregation mode: 'average','sum','normalized'. Go to BuildArcNode for details\n :param problem_based: (str) : 'n'-nodeBased; 'a'-arcBased; 'g'-graphBased\n > NOTE For graph_based problems, file 'NodeGraph.npy' must be present in folder\n :return: GraphObject described by files in <graph_folder_path> folder\n \"\"\"\n # load all the files inside <graph_folder_path> folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n files = os.listdir(graph_folder_path)\n keys = [i.rsplit('.')[0] for i in files] + ['problem_based', 'aggregation_mode']\n vals = [np.load(graph_folder_path + i) for i in files] + [problem_based, aggregation_mode]\n\n # create a dictionary with parameters and values to be passed to constructor and return GraphObject\n params = dict(zip(keys, vals))\n return self(**params)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def load_txt(self, graph_folder_path: str, problem_based: str, aggregation_mode: str):\n \"\"\" Load a graph from a directory which contains at least 3 txt files referring to nodes, arcs and targets\n\n :param graph_folder_path: directory containing at least 3 files: 'nodes.txt', 'arcs.txt' and 'targets.txt'\n > other possible files: 'NodeGraph.txt','output_mask.txt' and 'set_mask.txt'. No other files required!\n :param problem_based: (str) : 'n'-nodeBased; 'a'-arcBased; 'g'-graphBased\n > NOTE For graph_based problems, file 'NodeGraph.txt' must to be present in folder\n :param aggregation_mode: node aggregation mode: 'average','sum','normalized'. Go to BuildArcNode for details\n :return: GraphObject described by files in <graph_folder_path> folder\n \"\"\"\n # load all the files inside <graph_folder_path> folder\n if graph_folder_path[-1] != '/': graph_folder_path += '/'\n files = os.listdir(graph_folder_path)\n keys = [i.rsplit('.')[0] for i in files] + ['problem_based', 'aggregation_mode']\n vals = [np.loadtxt(graph_folder_path + i, ndmin=2) for i in files] + [problem_based, aggregation_mode]\n\n # create a dictionary with parameters and values to be passed to constructor and return GraphObject\n params = dict(zip(keys, vals))\n return self(**params)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def merge(self, glist, problem_based: str, aggregation_mode: str):\n \"\"\" Method to merge graphs: it takes in input a list of graphs and returns them as a single graph\n\n :param glist: list of GraphObjects\n > NOTE if problem_based=='g', new NodeGraph will have dimension (Num nodes, Num graphs) else None\n :param aggregation_mode: str, node aggregation mode for new GraphObject, go to buildArcNode for details\n :return: a new GraphObject containing all the information (nodes, arcs, targets, etc) in glist\n \"\"\"\n # check glist parameter: others parameter are in constructor\n if not (type(glist) == list and all(isinstance(x, (GraphObject, str)) for x in glist)):\n raise TypeError('type of param <glist> must be list of str \\'path-like\\' or GraphObjects')\n\n nodes, nodes_lens, arcs, targets, set_mask, output_mask, sample_weights, nodegraph_list = zip(*[(i.getNodes(), i.nodes.shape[0],\n i.getArcs(), i.getTargets(),\n i.getSetMask(), i.getOutputMask(),\n i.getSampleWeights(), i.getNodeGraph())\n for i in glist])\n\n # get single matrices for new graph\n for i, elem in enumerate(arcs): elem[:, :2] += sum(nodes_lens[:i])\n arcs = np.concatenate(arcs, axis=0)\n nodes = np.concatenate(nodes, axis=0)\n targets = np.concatenate(targets, axis=0)\n set_mask = np.concatenate(set_mask, axis=0)\n output_mask = np.concatenate(output_mask, axis=0)\n sample_weights = np.concatenate(sample_weights, axis=0)\n\n nodegraph = None\n if problem_based == 'g':\n from scipy.linalg import block_diag\n nodegraph = block_diag(*nodegraph_list)\n\n # resulting GraphObject\n return self(arcs=arcs, nodes=nodes, targets=targets, problem_based=problem_based, set_mask=set_mask, output_mask=output_mask,\n sample_weights=sample_weights, NodeGraph=nodegraph, aggregation_mode=aggregation_mode)\n\n @classmethod\n def fromGraphTensor(self, g, problem_based: str):\n nodegraph = None\n if problem_based == 'g': nodegraph = g.NodeGraph.numpy()\n return self(arcs=g.arcs.numpy(), nodes=g.nodes.numpy(), targets=g.targets.numpy(),\n set_mask=g.set_mask.numpy(), output_mask=g.output_mask.numpy(), sample_weights=g.sample_weights.numpy(),\n NodeGraph=nodegraph, aggregation_mode=g.aggregation_mode, problem_based=problem_based)\n\n\nclass GraphTensor:\n def __init__(self, nodes, arcs, targets, set_mask, output_mask, sample_weights, Adjacency, ArcNode, NodeGraph, aggregation_mode):\n dtype = tf.keras.backend.floatx()\n\n self.nodes = tf.constant(nodes, dtype=dtype)\n self.arcs = tf.constant(arcs, dtype=dtype)\n self.targets = tf.constant(targets, dtype=dtype)\n self.sample_weights = tf.constant(sample_weights, dtype=dtype)\n self.set_mask = tf.constant(set_mask, dtype=bool)\n self.output_mask = tf.constant(output_mask, dtype=bool)\n self.aggregation_mode = aggregation_mode\n self.NodeGraph = None\n if NodeGraph is not None: self.NodeGraph = tf.constant(NodeGraph, dtype=dtype)\n # Adjacency and ArcNode in GraphTensor MUST BE already transposed!\n self.Adjacency = tf.sparse.SparseTensor.from_value(Adjacency)\n self.ArcNode = tf.sparse.SparseTensor.from_value(ArcNode)\n\n # -----------------------------------------------------------------------------------------------------------------\n def copy(self):\n return GraphTensor(nodes=self.nodes, arcs=self.arcs, targets=self.targets, set_mask=self.set_mask, output_mask=self.output_mask,\n sample_weights=self.sample_weights, Adjacency=self.Adjacency, ArcNode=self.ArcNode, NodeGraph=self.NodeGraph,\n aggregation_mode=self.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def fromGraphObject(self, g: GraphObject):\n \"\"\" Create GraphTensor from GraphObject. Note that Adjacency and ArcNode are transposed so that GraphTensor.ArcNode and\n GraphTensor.Adjacency are ready for sparse_dense_matmul in Loop operations.\n \"\"\"\n return self(nodes=g.nodes, arcs=g.arcs, targets=g.targets, set_mask=g.set_mask, output_mask=g.output_mask,\n sample_weights=g.sample_weights, NodeGraph=g.NodeGraph, Adjacency=self.COO2SparseTransposedTensor(g.Adjacency),\n ArcNode=self.COO2SparseTransposedTensor(g.ArcNode), aggregation_mode=g.aggregation_mode)\n\n # -----------------------------------------------------------------------------------------------------------------\n @staticmethod\n def COO2SparseTransposedTensor(coo_matrix) -> tf.Tensor:\n \"\"\" Get the transposed sparse tensor from a sparse coo_matrix matrix \"\"\"\n # SparseTensor is created and then reordered to be correctly computable. NOTE: reorder() recommended by TF2.0+\n indices = list(zip(coo_matrix.col, coo_matrix.row))\n sparse_tensor = tf.SparseTensor(indices, values=coo_matrix.data, dense_shape=[coo_matrix.shape[1], coo_matrix.shape[0]])\n sparse_tensor = tf.sparse.reorder(sparse_tensor)\n sparse_tensor = tf.cast(sparse_tensor, dtype=tf.keras.backend.floatx())\n return sparse_tensor\n" ]
[ [ "tensorflow.keras.backend.floatx", "scipy.sparse.coo_matrix", "tensorflow.constant", "scipy.linalg.block_diag", "numpy.unique", "numpy.save", "tensorflow.SparseTensor", "numpy.concatenate", "numpy.ones", "tensorflow.sparse.reorder", "numpy.any", "numpy.savetxt", "numpy.load", "tensorflow.sparse.SparseTensor.from_value", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
sarkarpr/azure-python-labs
[ "10ad5d69175cec7fc8ff465368e9867440d034f3", "0ceae2d814e1f4af836fb117077f26339c4df026" ]
[ "2019/6-azureml-movie-recommendation/reco_utils/dataset/spark_splitters.py", "9-azure-machine-learning/utils.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\n\nfrom pyspark.sql import Window\nfrom pyspark.sql.functions import col, row_number, broadcast, rand\n\nfrom reco_utils.common.constants import (\n DEFAULT_ITEM_COL,\n DEFAULT_USER_COL,\n DEFAULT_TIMESTAMP_COL,\n DEFAULT_RATING_COL,\n)\nfrom reco_utils.dataset.split_utils import process_split_ratio, min_rating_filter_spark\n\n\ndef spark_random_split(data, ratio=0.75, seed=42):\n \"\"\"Spark random splitter\n Randomly split the data into several splits.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two halfs and the ratio argument indicates the ratio of \n training data set; if it is a list of float numbers, the splitter splits \n data into several portions corresponding to the split ratios. If a list \n is provided and the ratios are not summed to 1, they will be normalized.\n seed (int): Seed.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n multi_split, ratio = process_split_ratio(ratio)\n\n if multi_split:\n return data.randomSplit(ratio, seed=seed)\n else:\n return data.randomSplit([ratio, 1 - ratio], seed=seed)\n\n\ndef spark_chrono_split(\n data,\n ratio=0.75,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_timestamp=DEFAULT_TIMESTAMP_COL,\n):\n \"\"\"Spark chronological splitter\n This function splits data in a chronological manner. That is, for each user / item, the\n split function takes proportions of ratings which is specified by the split ratio(s).\n The split is stratified.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two sets and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits \n data into several portions corresponding to the split ratios. If a list is \n provided and the ratios are not summed to 1, they will be normalized.\n seed (int): Seed.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to filter\n with min_rating.\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n col_timestamp (str): column name of timestamps.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n if not (filter_by == \"user\" or filter_by == \"item\"):\n raise ValueError(\"filter_by should be either 'user' or 'item'.\")\n\n if min_rating < 1:\n raise ValueError(\"min_rating should be integer and larger than or equal to 1.\")\n\n multi_split, ratio = process_split_ratio(ratio)\n\n split_by_column = col_user if filter_by == \"user\" else col_item\n\n if min_rating > 1:\n data = min_rating_filter_spark(\n data,\n min_rating=min_rating,\n filter_by=filter_by,\n col_user=col_user,\n col_item=col_item,\n )\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.partitionBy(split_by_column).orderBy(col(col_timestamp))\n\n rating_grouped = (\n data.groupBy(split_by_column)\n .agg({col_timestamp: \"count\"})\n .withColumnRenamed(\"count(\" + col_timestamp + \")\", \"count\")\n )\n rating_all = data.join(broadcast(rating_grouped), on=split_by_column)\n\n rating_rank = rating_all.withColumn(\n \"rank\", row_number().over(window_spec) / col(\"count\")\n )\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i])\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n )\n\n splits.append(rating_split)\n\n return splits\n\n\ndef spark_stratified_split(\n data,\n ratio=0.75,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_rating=DEFAULT_RATING_COL,\n seed=42,\n):\n \"\"\"Spark stratified splitter\n For each user / item, the split function takes proportions of ratings which is\n specified by the split ratio(s). The split is stratified.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two halfs and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits\n data into several portions corresponding to the split ratios. If a list is\n provided and the ratios are not summed to 1, they will be normalized.\n Earlier indexed splits will have earlier times\n (e.g the latest time per user or item in split[0] <= the earliest time per user or item in split[1])\n seed (int): Seed.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to filter\n with min_rating.\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n if not (filter_by == \"user\" or filter_by == \"item\"):\n raise ValueError(\"filter_by should be either 'user' or 'item'.\")\n\n if min_rating < 1:\n raise ValueError(\"min_rating should be integer and larger than or equal to 1.\")\n\n multi_split, ratio = process_split_ratio(ratio)\n\n split_by_column = col_user if filter_by == \"user\" else col_item\n\n if min_rating > 1:\n data = min_rating_filter_spark(\n data,\n min_rating=min_rating,\n filter_by=filter_by,\n col_user=col_user,\n col_item=col_item,\n )\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.partitionBy(split_by_column).orderBy(rand(seed=seed))\n\n rating_grouped = (\n data.groupBy(split_by_column)\n .agg({col_rating: \"count\"})\n .withColumnRenamed(\"count(\" + col_rating + \")\", \"count\")\n )\n rating_all = data.join(broadcast(rating_grouped), on=split_by_column)\n\n rating_rank = rating_all.withColumn(\n \"rank\", row_number().over(window_spec) / col(\"count\")\n )\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i])\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n )\n\n splits.append(rating_split)\n\n return splits\n\n\ndef spark_timestamp_split(\n data,\n ratio=0.75,\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n col_timestamp=DEFAULT_TIMESTAMP_COL,\n):\n \"\"\"Spark timestamp based splitter\n The splitter splits the data into sets by timestamps without stratification on either\n user or item.\n The ratios are applied on the timestamp column which is divided accordingly into\n several partitions.\n\n Args:\n data (spark.DataFrame): Spark DataFrame to be split.\n ratio (float or list): Ratio for splitting data. If it is a single float number\n it splits data into two sets and the ratio argument indicates the ratio of\n training data set; if it is a list of float numbers, the splitter splits\n data into several portions corresponding to the split ratios. If a list is\n provided and the ratios are not summed to 1, they will be normalized.\n Earlier indexed splits will have earlier times\n (e.g the latest time in split[0] <= the earliest time in split[1])\n col_user (str): column name of user IDs.\n col_item (str): column name of item IDs.\n col_timestamp (str): column name of timestamps. Float number represented in\n seconds since Epoch.\n\n Returns:\n list: Splits of the input data as spark.DataFrame.\n \"\"\"\n multi_split, ratio = process_split_ratio(ratio)\n\n ratio = ratio if multi_split else [ratio, 1 - ratio]\n ratio_index = np.cumsum(ratio)\n\n window_spec = Window.orderBy(col(col_timestamp))\n rating = data.withColumn(\"rank\", row_number().over(window_spec))\n\n data_count = rating.count()\n rating_rank = rating.withColumn(\"rank\", row_number().over(window_spec) / data_count)\n\n splits = []\n for i, _ in enumerate(ratio_index):\n if i == 0:\n rating_split = rating_rank.filter(col(\"rank\") <= ratio_index[i]).drop(\n \"rank\"\n )\n else:\n rating_split = rating_rank.filter(\n (col(\"rank\") <= ratio_index[i]) & (col(\"rank\") > ratio_index[i - 1])\n ).drop(\"rank\")\n\n splits.append(rating_split)\n\n return splits\n", "import os\nimport gzip\nimport struct\n\nimport urllib\nfrom urllib import request\nimport numpy as np\n\n# load compressed MNIST gz files and return numpy arrays\ndef load_data(filename, label = False):\n with gzip.open(filename) as gz:\n magic_number = struct.unpack('I', gz.read(4))\n n_items = struct.unpack('>I', gz.read(4))\n if not label:\n n_rows = struct.unpack('>I', gz.read(4))[0]\n n_cols = struct.unpack('>I', gz.read(4))[0]\n res = np.frombuffer(gz.read(n_items[0] * n_rows * n_cols), dtype = np.uint8)\n res = res.reshape(n_items[0], n_rows * n_cols)\n else:\n res = np.frombuffer(gz.read(n_items[0]), dtype = np.uint8)\n res = res.reshape(n_items[0], 1)\n return res\n\n# one-hot encode a 1-D array\ndef one_hot_encode(array, num_of_classes):\n return np.eye(num_of_classes)[array.reshape(-1)]\n\ndef prepare_data(dataset, data_folder):\n data_folder = os.path.join(data_folder, dataset)\n print('making data directory ' + data_folder + '...')\n os.makedirs(data_folder, exist_ok = True)\n\n def download_data(url, filename):\n if not os.path.isfile(filename):\n print('downloading ' + url)\n urllib.request.urlretrieve(url, filename = filename)\n else:\n print(filename + ' exists, using it')\n\n print('downloading training data ...')\n download_data('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', './data/mnist/train-images.gz')\n download_data('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', './data/mnist/train-labels.gz')\n print('done.')\n print('downloading testing data ...')\n download_data('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', './data/mnist/test-images.gz')\n download_data('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', './data/mnist/test-labels.gz')\n print('done.')\n\n print('Prepared training dataset is stored here:', data_folder)\n\n X_train = load_data(os.path.join(data_folder, 'train-images.gz'), False) / 255.0\n X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0\n\n y_train = load_data(os.path.join(data_folder, 'train-labels.gz'), True).reshape(-1)\n y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)\n\n print(X_train.shape, y_train.shape, X_test.shape, y_test.shape, sep = '\\n')\n\n return X_train, X_test, y_train, y_test" ]
[ [ "numpy.cumsum" ], [ "numpy.eye" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tkuri/noise2noise
[ "a293f4952d3d1c997f4eb298a20fd5fac50a2dd3", "a293f4952d3d1c997f4eb298a20fd5fac50a2dd3" ]
[ "test_model.py", "plot_history.py" ]
[ "import argparse\nimport numpy as np\nfrom pathlib import Path\nimport cv2\nfrom model import get_model\n# from noise_model import get_noise_model\n\nMAX_8BIT = 255.\nMAX_16BIT = 65535.\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Test trained model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--image_dir\", type=str, required=True,\n help=\"test image dir\")\n parser.add_argument(\"--model\", type=str, default=\"srresnet\",\n help=\"model architecture ('srresnet' or 'unet')\")\n parser.add_argument(\"--weight_file\", type=str, required=True,\n help=\"trained weight file\")\n # parser.add_argument(\"--test_noise_model\", type=str, default=\"gaussian,25,25\",\n # help=\"noise model for test images\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"if set, save resulting images otherwise show result using imshow\")\n parser.add_argument('--uint16', action='store_true', help='16bit process.')\n args = parser.parse_args()\n return args\n\n\ndef get_image(image, args):\n if args.uint16:\n image = np.clip(image, 0, MAX_8BIT)\n image = image.astype(dtype=np.uint8)\n else:\n image = np.clip(image*MAX_8BIT, 0, MAX_16BIT)\n image = image.astype(dtype=np.uint16)\n return image\n\n\ndef main():\n args = get_args()\n image_dir = args.image_dir\n weight_file = args.weight_file\n # val_noise_model = get_noise_model(args.test_noise_model)\n model = get_model(args.model)\n model.load_weights(weight_file)\n\n if args.output_dir:\n output_dir = Path(args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n print(image_dir)\n# image_paths = list(Path(image_dir).glob(\"*.*\"))\n image_paths = list(Path(image_dir).glob(\"*.png\"))\n# print(image_paths)\n\n for image_path in image_paths:\n image = cv2.imread(str(image_path),-1)\n\n if args.uint16:\n image = image / MAX_8BIT # Normalize 0~255 if input is 16bit\n\n h, w, _ = image.shape\n image = image[:(h // 16) * 16, :(w // 16) * 16] # for stride (maximum 16)\n h, w, _ = image.shape\n \n pred = model.predict(np.expand_dims(image, 0))\n out_image = get_image(pred[0], args)\n\n# out_image = np.zeros((h, w * 3, 3), dtype=np.uint8)\n# noise_image = val_noise_model(image)\n# pred = model.predict(np.expand_dims(noise_image, 0))\n# denoised_image = get_image(pred[0])\n# out_image[:, :w] = image\n# out_image[:, w:w * 2] = noise_image\n# out_image[:, w * 2:] = denoised_image\n\n\n if args.output_dir:\n cv2.imwrite(str(output_dir.joinpath(image_path.name))[:-4] + \".png\", out_image)\n else:\n cv2.imshow(\"result\", out_image)\n key = cv2.waitKey(-1)\n # \"q\": quit\n if key == 113:\n return 0\n return 0\n\n\nif __name__ == '__main__':\n main()\n", "import argparse\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"This script plots training history\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--input1\", type=str, required=True,\n help=\"path to input checkout directory 1 (must include history.npz)\")\n parser.add_argument(\"--input2\", type=str, default=None,\n help=\"path to input checkout directory 2 (must include history.npz) \"\n \"if you want to compare it with input1\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_args()\n input_paths = [Path(args.input1).joinpath(\"history.npz\")]\n\n if args.input2:\n input_paths.append(Path(args.input2).joinpath(\"history.npz\"))\n\n # datum = [(np.array(np.load(str(input_path))[\"history\"], ndmin=1)[0], input_path.parent.name)\n datum = [(np.array(np.load(str(input_path), allow_pickle=True)[\"history\"], ndmin=1)[0], input_path.parent.name)\n for input_path in input_paths]\n metrics = [\"val_loss\", \"val_PSNR\"]\n\n for metric in metrics:\n for data, setting_name in datum:\n plt.plot(data[metric], label=setting_name)\n plt.xlabel(\"epochs\")\n plt.ylabel(metric)\n plt.legend()\n plt.savefig(metric + \".png\")\n plt.cla()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.expand_dims", "numpy.clip" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.cla", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
quickgrid/paper-implementations
[ "90de1e93cc664e8f5e1e49c57c030f3d9d14fdf9", "90de1e93cc664e8f5e1e49c57c030f3d9d14fdf9" ]
[ "pytorch/gaugan/gaugan.py", "pytorch/vision_transformer/vit.py" ]
[ "\"\"\"Pytorch GauGAN implementation.\n\nEither segmentation one hot mask or rgb mask can be passed to discriminator with little modification.\n\nTodo\n - Modify to try to generate and match mask also as loss.\n - Try discriminator with either segmentation image or label.\n - Use multiscale feature from discriminator to calculate loss.\n - Test conv bias, norm affine and other parameter effect on result.\n\nReferences\n - https://arxiv.org/abs/1903.07291\n - https://keras.io/examples/generative/gaugan/\n - https://github.com/quickgrid/AI-Resources/blob/master/resources/ai-notes/gaugan-series.md\n - https://github.com/NVlabs/SPADE\n\"\"\"\n\nimport os\nimport pathlib\nfrom datetime import datetime\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nfrom torchvision.models.feature_extraction import get_graph_node_names, create_feature_extractor\nfrom torchvision.transforms import transforms\nfrom torchvision import models\nfrom torch.nn.utils.spectral_norm import spectral_norm\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.nn import functional\nfrom torch.backends import cudnn\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\nclass LayerDebugger(nn.Module):\n def __init__(self) -> None:\n super(LayerDebugger, self).__init__()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n print(x.shape)\n return x\n\n\nclass ImageEncoder(nn.Module):\n def __init__(\n self,\n img_size: int,\n latent_dim: int,\n enable_dropout: bool = False,\n dropout_rate: float = 0.5,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(ImageEncoder, self).__init__()\n\n dropout_layer = list()\n if enable_dropout:\n dropout_layer = [nn.Dropout(p=dropout_rate)]\n\n def _get_conv_layer(_in_channels: int, _out_channels: int, apply_bias: bool = False) -> list:\n conv_layer = nn.Conv2d(\n in_channels=_in_channels, out_channels=_out_channels,\n kernel_size=(3, 3), padding=(1, 1), stride=(2, 2), bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n def _get_block(\n _in_channels: int,\n _out_channels: int,\n apply_norm: bool = True,\n ) -> list:\n norm_layer = list()\n if apply_norm:\n norm_layer = [nn.InstanceNorm2d(num_features=_out_channels, affine=False)]\n\n return [\n *_get_conv_layer(_in_channels=_in_channels, _out_channels=_out_channels),\n *norm_layer,\n nn.LeakyReLU(negative_slope=0.2),\n *dropout_layer,\n ]\n\n channel_in = [3, 64, 128, 256, 512, 512]\n channel_out = [64, 128, 256, 512, 512, 512]\n linear_features = 8192\n\n conv_layers = list()\n for idx, (in_channels, out_channels) in enumerate(zip(channel_in, channel_out)):\n if idx != 0:\n conv_layers.extend(_get_block(_in_channels=in_channels, _out_channels=out_channels))\n else:\n conv_layers.extend(_get_block(_in_channels=in_channels, _out_channels=out_channels, apply_norm=False))\n\n self.encoder_layers = nn.Sequential(\n *conv_layers,\n nn.Flatten(),\n nn.Linear(\n in_features=((img_size // (2 ** len(channel_out))) ** 2) * channel_out[-1],\n out_features=linear_features\n ),\n )\n\n self.mean_out = nn.Linear(in_features=linear_features, out_features=latent_dim)\n self.variance_out = nn.Linear(in_features=linear_features, out_features=latent_dim)\n\n def forward(self, img: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n x = self.encoder_layers(img)\n mean = self.mean_out(x)\n var = self.variance_out(x)\n return mean, var\n\n\nclass Discriminator(nn.Module):\n \"\"\"Conv parameters are manually calculated using formula in pytorch Conv2D docs to keep output shape same.\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n device: torch.device,\n enable_dropout: bool = False,\n dropout_rate: float = 0.5,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(Discriminator, self).__init__()\n\n dropout_layer = list()\n if enable_dropout:\n dropout_layer = [nn.Dropout(p=dropout_rate)]\n\n def _get_conv_layer(\n _in_channels: int,\n _out_channels: int,\n _stride: int,\n _padding: int,\n _dilation: int,\n apply_bias: bool = False\n ) -> list:\n conv_layer = nn.Conv2d(\n in_channels=_in_channels, out_channels=_out_channels,\n kernel_size=(4, 4),\n padding=(_padding, _padding),\n stride=(_stride, _stride),\n dilation=(_dilation, _dilation),\n device=device,\n bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n def _get_block(\n _in_channels: int,\n _out_channels: int,\n _stride: int,\n _padding: int,\n _dilation: int,\n apply_norm: bool = True,\n ) -> nn.Sequential:\n norm_layer = list()\n if apply_norm:\n # norm_layer = [nn.BatchNorm2d(num_features=_out_channels, device=device)]\n norm_layer = [nn.InstanceNorm2d(num_features=_out_channels, affine=False, device=device)]\n\n return nn.Sequential(\n *_get_conv_layer(\n _in_channels=_in_channels, _out_channels=_out_channels,\n _stride=_stride, _padding=_padding, _dilation=_dilation,\n ),\n *norm_layer,\n nn.LeakyReLU(negative_slope=0.2),\n *dropout_layer,\n )\n\n channel_in = [3 * 2, 64, 128, 256]\n # channel_in = [3 + num_classes, 64, 128, 256]\n channel_out = [64, 128, 256, 512]\n stride = [2, 2, 2, 1]\n padding = [3, 3, 3, 3]\n dilation = [2, 2, 2, 2]\n\n self.disc_multiscale_features = list()\n for idx, (in_channels, out_channels, stride, padding, dilation) in enumerate(zip(\n channel_in, channel_out, stride, padding, dilation\n )):\n if idx != 0:\n self.disc_multiscale_features.append(\n _get_block(\n _in_channels=in_channels,\n _out_channels=out_channels,\n _stride=stride,\n _padding=padding,\n _dilation=dilation,\n )\n )\n else:\n self.disc_multiscale_features.append(\n _get_block(\n _in_channels=in_channels,\n _out_channels=out_channels,\n _stride=stride,\n _padding=padding,\n _dilation=dilation,\n apply_norm=False,\n )\n )\n\n self.disc_out_layer = nn.Conv2d(\n in_channels=512, out_channels=1, kernel_size=(4, 4), padding=(3, 3), stride=(2, 2), dilation=(2, 2)\n )\n\n def forward(self, img1: torch.Tensor, img2: torch.Tensor) -> Tuple[torch.Tensor, list]:\n x = torch.cat([img1, img2], dim=1)\n multiscale_features = list()\n for layer in self.disc_multiscale_features:\n x = layer(x)\n multiscale_features.append(x)\n x = self.disc_out_layer(x)\n return x, multiscale_features\n\n\nclass SPADE(nn.Module):\n def __init__(\n self,\n out_channels: int,\n num_classes: int,\n ) -> None:\n super(SPADE, self).__init__()\n\n embed_dim = 128\n self.normalizer = nn.InstanceNorm2d(num_features=embed_dim, affine=False)\n\n self.embedding_conv = nn.Sequential(\n nn.Conv2d(in_channels=num_classes, out_channels=embed_dim, kernel_size=(3, 3), padding=(1, 1)),\n nn.ReLU(),\n )\n self.gamma_conv = nn.Conv2d(\n in_channels=embed_dim, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)\n )\n\n self.beta_conv = nn.Conv2d(\n in_channels=embed_dim, out_channels=out_channels, kernel_size=(3, 3), padding=(1, 1)\n )\n\n def forward(self, packed_tensor: torch.Tensor) -> torch.Tensor:\n prev_input, onehot_mask = packed_tensor\n normalized = self.normalizer(prev_input)\n mask = functional.interpolate(onehot_mask.float(), size=prev_input.shape[2:], mode='nearest')\n x = self.embedding_conv(mask)\n gamma = self.gamma_conv(x)\n beta = self.beta_conv(x)\n output = gamma * normalized + beta\n return output\n\n\nclass SPADEResBlock(nn.Module):\n def __init__(\n self,\n in_filters: int,\n out_filters: int,\n num_classes: int,\n apply_spectral_norm: bool = False,\n ) -> None:\n super(SPADEResBlock, self).__init__()\n self.learned_skip = (in_filters != out_filters)\n min_filters = min(in_filters, out_filters)\n\n def _get_conv_layer(in_channels: int, out_channels: int, apply_bias: bool = True) -> list:\n conv_layer = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=(3, 3),\n padding=(1, 1),\n bias=apply_bias,\n )\n if apply_spectral_norm:\n conv_layer = spectral_norm(conv_layer)\n return [conv_layer]\n return [conv_layer]\n\n self.spade_res_block_1 = nn.Sequential(\n SPADE(num_classes=num_classes, out_channels=in_filters),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=in_filters, out_channels=min_filters),\n )\n\n self.spade_res_block_2 = nn.Sequential(\n SPADE(out_channels=min_filters, num_classes=num_classes),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=min_filters, out_channels=out_filters),\n )\n\n self.learned_skip_path = nn.Sequential(\n SPADE(out_channels=in_filters, num_classes=num_classes),\n nn.LeakyReLU(negative_slope=0.2),\n *_get_conv_layer(in_channels=in_filters, out_channels=out_filters, apply_bias=True),\n )\n\n def forward(self, packed_tensor: torch.Tensor) -> torch.Tensor:\n x, onehot_mask = packed_tensor\n x_skip = x\n x = self.spade_res_block_1((x, onehot_mask))\n x = self.spade_res_block_2((x, onehot_mask))\n if self.learned_skip:\n x_skip = self.learned_skip_path((x_skip, onehot_mask))\n x = x + x_skip\n return x\n\n\nclass GaussianSampler(nn.Module):\n def __init__(\n self,\n batch_size: int,\n latent_dim: int,\n device: torch.device,\n ) -> None:\n super(GaussianSampler, self).__init__()\n self.batch_size = batch_size\n self.latent_dim = latent_dim\n self.device = device\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n mean, variance = x\n epsilon = torch.normal(mean=0.0, std=1.0, size=(self.batch_size, self.latent_dim), device=self.device)\n noise_input = mean + torch.exp(0.5 * variance) * epsilon\n return noise_input\n\n\nclass Generator(nn.Module):\n def __init__(\n self,\n latent_dim: int,\n num_classes: int,\n device: torch.device,\n ) -> None:\n super(Generator, self).__init__()\n\n def _get_res_block(_in_filters: int, _out_filters: int) -> nn.Sequential:\n return nn.Sequential(\n SPADEResBlock(\n in_filters=_in_filters, out_filters=_out_filters, num_classes=num_classes\n ).to(device=device),\n nn.Upsample(scale_factor=(2, 2)),\n )\n\n self.initial_shape = 1024\n\n filter_list = [self.initial_shape, 1024, 512, 256, 128, 128, 64]\n self.filter_list_len = len(filter_list)\n\n self.generator_middle_layers = list()\n for i in range(self.filter_list_len - 1):\n self.generator_middle_layers.append(\n _get_res_block(_in_filters=filter_list[i], _out_filters=filter_list[i+1])\n )\n\n self.generator_input_layers = nn.Sequential(\n nn.Linear(in_features=latent_dim, out_features=128 * 128),\n )\n\n # Change conv layer stride for custom image size.\n self.generator_output_layers = nn.Sequential(\n nn.LeakyReLU(negative_slope=0.2),\n nn.Conv2d(in_channels=filter_list[-1], out_channels=3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.Tanh(),\n )\n\n def forward(self, latent_vector: torch.Tensor, onehot_mask: torch.Tensor) -> torch.Tensor:\n x = self.generator_input_layers(latent_vector)\n x = x.view(-1, self.initial_shape, 4, 4)\n for mid_layer in self.generator_middle_layers:\n x = mid_layer((x, onehot_mask))\n x = self.generator_output_layers(x)\n return x\n\n\nclass VggLoss(nn.Module):\n \"\"\"Use vgg intermediate layers to calculate perceptual loss.\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n debug: bool = False,\n ) -> None:\n super(VggLoss, self).__init__()\n model = models.vgg19(pretrained=True).to(device=device)\n model.eval()\n for param in model.parameters():\n param.requires_grad = False\n\n if debug:\n print(model.features)\n train_nodes, eval_nodes = get_graph_node_names(model)\n print('train_nodes')\n print(train_nodes)\n print('eval_nodes')\n print(eval_nodes)\n\n return_nodes = {\n 'features.1': 'out_0',\n 'features.6': 'out_1',\n 'features.11': 'out_2',\n 'features.20': 'out_3',\n 'features.29': 'out_4',\n }\n self.feature_count = len(return_nodes)\n\n self.feature_extractor = create_feature_extractor(\n model,\n return_nodes=return_nodes\n )\n\n self.layer_weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> float:\n x_out = self.feature_extractor(x)\n y_out = self.feature_extractor(y)\n loss = 0.0\n for i in range(self.feature_count):\n loss += self.layer_weights[i] * functional.l1_loss(x_out[f'out_{i}'], y_out[f'out_{i}'])\n return loss\n\n\nclass GauganDataset(Dataset):\n \"\"\"Real images should be jpg and Segmentation image should be in png format.\n \"\"\"\n\n def __init__(\n self,\n root_dir: str,\n image_size: int,\n image_channels: int,\n num_classes: int,\n ) -> None:\n super(GauganDataset, self).__init__()\n\n self.num_classes = num_classes + 1\n\n self.root_dir = root_dir\n self.image_labels_files_list = list()\n for root, dirs, files in os.walk(root_dir):\n for names in files:\n if names.endswith('.jpg'):\n base_name = names.split('.')[0]\n self.image_labels_files_list.append(\n (\n os.path.join(root, f'{base_name}.jpg'),\n os.path.join(root, f'{base_name}.png'),\n )\n )\n\n self.image_files_list_len = len(self.image_labels_files_list)\n\n self.img_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5 for _ in range(image_channels)],\n std=[0.5 for _ in range(image_channels)],\n )\n ])\n\n self.segmentation_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5 for _ in range(image_channels)],\n std=[0.5 for _ in range(image_channels)],\n )\n ])\n\n self.segmentation_label_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.PILToTensor(),\n ])\n\n def __len__(self) -> int:\n return self.image_files_list_len\n\n def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n image_path, segmentation_path = self.image_labels_files_list[idx]\n\n image = Image.open(image_path)\n image = image.convert('RGB')\n image = self.img_transform(image)\n\n segmentation_image_original = Image.open(segmentation_path)\n\n segmentation_image = segmentation_image_original.convert('RGB')\n segmentation_image = self.segmentation_transform(segmentation_image)\n\n segmentation_label = segmentation_image_original.convert('P')\n segmentation_label = self.segmentation_label_transform(segmentation_label)\n segmentation_label = functional.one_hot(segmentation_label.long(), num_classes=self.num_classes)\n segmentation_label = torch.permute(segmentation_label.squeeze(), (2, 0, 1))\n\n return image, segmentation_image, segmentation_label\n\n\ndef feature_matching_loss(real_preds: torch.Tensor, fake_preds: torch.Tensor) -> float:\n pred_count_weight = 1 / len(real_preds)\n _feature_matching_loss = 0.0\n for real_features, fake_features in zip(real_preds, fake_preds):\n _feature_matching_loss += functional.l1_loss(real_features, fake_features) * pred_count_weight\n return _feature_matching_loss\n\n\nclass Trainer:\n def __init__(\n self,\n num_classes: int,\n root_dir='',\n device: str = None,\n checkpoint_path: str = None,\n save_checkpoint_every: int = 20,\n num_workers: int = 0,\n batch_size: int = 3,\n image_size: int = 256,\n image_channels: int = 3,\n num_epochs: int = 10000,\n latent_dim: int = 256,\n gen_learning_rate: float = 0.0001,\n disc_learning_rate: float = 0.0004,\n disc_iterations: int = 1,\n debug: bool = False,\n ) -> None:\n\n torch.autograd.set_detect_anomaly(False)\n torch.autograd.profiler.emit_nvtx(enabled=False)\n torch.autograd.profiler.profile(enabled=False)\n cudnn.benchmark = True\n\n if debug:\n torch.autograd.set_detect_anomaly(True)\n torch.autograd.profiler.emit_nvtx(enabled=True)\n torch.autograd.profiler.profile(enabled=True)\n cudnn.benchmark = False\n\n self.device = torch.device(device) if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.save_every = save_checkpoint_every\n self.disc_iterations = disc_iterations\n\n gan_dataset = GauganDataset(\n root_dir=root_dir,\n image_size=image_size,\n image_channels=image_channels,\n num_classes=num_classes,\n )\n self.train_loader = DataLoader(\n gan_dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n drop_last=True,\n )\n\n self.vgg_model = VggLoss(device=self.device)\n\n self.image_encoder = ImageEncoder(img_size=image_size, latent_dim=latent_dim)\n self.noise_sampler = GaussianSampler(batch_size=batch_size, latent_dim=latent_dim, device=self.device)\n self.generator = Generator(latent_dim=latent_dim, num_classes=num_classes + 1, device=self.device)\n self.discriminator = Discriminator(num_classes=num_classes + 1, device=self.device)\n\n self.image_encoder.to(device=self.device)\n self.noise_sampler.to(device=self.device)\n self.generator.to(device=self.device)\n self.discriminator.to(device=self.device)\n\n def _initialize_weights(model, mean=0.0, std=0.02):\n for m in model.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n # nn.init.normal_(m.weight.data, mean=mean, std=std)\n nn.init.xavier_normal_(m.weight.data)\n # nn.init.kaiming_normal_(m.weight.data)\n\n _initialize_weights(self.image_encoder)\n _initialize_weights(self.generator)\n _initialize_weights(self.discriminator)\n\n encoder_generator_parameters = list(self.generator.parameters()) + list(self.image_encoder.parameters())\n self.gen_optimizer = optim.Adam(\n params=encoder_generator_parameters, lr=gen_learning_rate, betas=(0.0, 0.999)\n )\n\n self.disc_optimizer = optim.Adam(\n params=self.discriminator.parameters(), lr=disc_learning_rate, betas=(0.0, 0.999)\n )\n\n self.fixed_noise = torch.randn((batch_size, latent_dim), device=self.device)\n\n current_datetime = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self.writer_real = SummaryWriter(f'logs/real/{current_datetime}/')\n self.writer_fake = SummaryWriter(f'logs/fake/{current_datetime}/')\n self.step = 0\n\n self.start_epoch = 0\n pathlib.Path('checkpoints').mkdir(parents=True, exist_ok=True)\n if checkpoint_path is not None:\n self.load_checkpoint(checkpoint_path=checkpoint_path)\n\n def load_checkpoint(self, checkpoint_path: str) -> None:\n checkpoint = torch.load(checkpoint_path)\n self.generator.load_state_dict(checkpoint['generator_state_dict'])\n self.discriminator.load_state_dict(checkpoint['discriminator_state_dict'])\n self.gen_optimizer.load_state_dict(checkpoint['generator_optimizer_state_dict'])\n self.disc_optimizer.load_state_dict(checkpoint['discriminator_optimizer_state_dict'])\n self.start_epoch = checkpoint['epoch']\n\n def train(self) -> None:\n for epoch in range(self.start_epoch, self.num_epochs):\n with tqdm(self.train_loader) as tqdm_train_loader:\n for batch_idx, (real_image, segmentation_image, segmentation_label) in enumerate(tqdm_train_loader):\n real_image = real_image.to(self.device)\n segmentation_image = segmentation_image.to(self.device)\n segmentation_label = segmentation_label.to(self.device)\n\n # Train discriminator.\n for i in range(self.disc_iterations):\n mean, var = self.image_encoder(real_image)\n latent_vector = self.noise_sampler((mean, var))\n generated_image = self.generator(latent_vector, segmentation_label)\n\n fake_pred, fake_pred_multiscale_features = self.discriminator(\n segmentation_image, generated_image\n )\n real_pred, real_pred_multiscale_features = self.discriminator(\n segmentation_image, real_image\n )\n\n fake_pred = fake_pred.reshape(-1)\n real_pred = real_pred.reshape(-1)\n\n loss_real = -torch.mean(\n torch.min(real_pred - 1, torch.zeros_like(real_pred, requires_grad=False))\n )\n loss_fake = -torch.mean(\n torch.min(-fake_pred.detach() - 1, torch.zeros_like(fake_pred, requires_grad=False))\n )\n discriminator_loss = (loss_fake + loss_real) * 0.5\n\n self.disc_optimizer.zero_grad(set_to_none=True)\n discriminator_loss.backward()\n self.disc_optimizer.step()\n\n # Train generator\n fake_pred, fake_pred_multiscale_features = self.discriminator(segmentation_image, generated_image)\n real_pred, real_pred_multiscale_features = self.discriminator(segmentation_image, real_image)\n fake_pred = fake_pred.reshape(-1)\n\n loss_gen = -torch.mean(fake_pred)\n loss_kldiv = -0.5 * torch.sum(1 + var - mean.pow(2) - var.exp())\n loss_vgg = self.vgg_model(real_image, generated_image)\n loss_features = feature_matching_loss(\n real_pred_multiscale_features,\n fake_pred_multiscale_features,\n )\n\n # generator_loss = loss_gen + 0.1 * loss_kldiv + 0.1 * loss_vgg + 10 * loss_features\n generator_loss = loss_gen + 0.1 * loss_kldiv + 10 * loss_vgg + 10 * loss_features\n\n self.gen_optimizer.zero_grad(set_to_none=True)\n generator_loss.backward()\n self.gen_optimizer.step()\n\n tqdm_train_loader.set_description(\n f'LOSS, disc: {discriminator_loss:.2f}, '\n f'generator: {generator_loss:.2f}, '\n f'gan: {loss_gen:.2f}, '\n f'kl: {loss_kldiv:.2f}, '\n f'vgg: {loss_vgg:.2f}, '\n f'features: {loss_features:.2f}'\n )\n\n if batch_idx % self.save_every == self.save_every - 1:\n self.generator.eval()\n self.discriminator.eval()\n\n with torch.no_grad():\n fake = self.generator(self.fixed_noise, segmentation_label)\n img_grid_real = torchvision.utils.make_grid(real_image[:self.batch_size], normalize=True)\n img_grid_fake = torchvision.utils.make_grid(fake[:self.batch_size], normalize=True)\n self.writer_real.add_image(\"Real\", img_grid_real, global_step=self.step)\n self.writer_fake.add_image(\"Fake\", img_grid_fake, global_step=self.step)\n self.step += 1\n\n torch.save({\n 'epoch': epoch,\n 'generator_state_dict': self.generator.state_dict(),\n 'discriminator_state_dict': self.discriminator.state_dict(),\n 'generator_optimizer_state_dict': self.gen_optimizer.state_dict(),\n 'discriminator_optimizer_state_dict': self.disc_optimizer.state_dict(),\n }, f'checkpoints/checkpoint_{epoch}.pt')\n\n self.discriminator.train()\n self.generator.train()\n\n\nif __name__ == '__main__':\n trainer = Trainer(\n root_dir=r'C:\\staging\\gaugan_data\\base',\n num_classes=12,\n # checkpoint_path='checkpoints/checkpoint_19.pt'\n )\n trainer.train()\n", "\"\"\"Implementation of Vision Transformer (ViT) in pytorch.\n\nTODO\n - Remove `drop_last` for datasets.\n - Add accuracy logic print, loss data in tensorboard.\n - Replace with einops.\n - Format output better.\n\nReferences\n - https://keras.io/examples/vision/image_classification_with_vision_transformer/\n - https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/vit.py\n - https://github.com/jeonsworld/ViT-pytorch/blob/main/models/modeling.py\n - https://docs.microsoft.com/en-us/windows/ai/windows-ml/tutorials/pytorch-analysis-train-model\n\"\"\"\n\nimport os\nimport pathlib\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\n\n\nclass VisionTransformerDataset(Dataset):\n def __init__(\n self,\n root_dir: str,\n image_size: int,\n image_channels: int,\n ) -> None:\n super(VisionTransformerDataset, self).__init__()\n class_list = os.listdir(root_dir)\n\n self.transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5 for _ in range(image_channels)],\n std=[0.5 for _ in range(image_channels)],\n )\n ])\n\n self.image_labels_files_list = list()\n for idx, class_name_folder in enumerate(class_list):\n class_path = os.path.join(root_dir, class_name_folder)\n files_list = os.listdir(class_path)\n for image_file in files_list:\n self.image_labels_files_list.append(\n (\n os.path.join(class_path, image_file),\n idx,\n )\n )\n\n self.image_files_list_len = len(self.image_labels_files_list)\n\n def __len__(self) -> int:\n return self.image_files_list_len\n\n def __getitem__(self, idx: int) -> Tuple[Image.Image, int]:\n image_path, class_label = self.image_labels_files_list[idx]\n image = Image.open(image_path)\n image = image.convert('RGB')\n image = self.transform(image)\n return image, class_label\n\n\nclass TransformerEncoderModel(nn.Module):\n def __init__(\n self,\n num_heads: int,\n embedding_dim: int,\n mlp_hidden_dim: int = 2048,\n mlp_dropout: float = 0.0,\n ) -> None:\n super(TransformerEncoderModel, self).__init__()\n self.multihead_attn = nn.MultiheadAttention(embed_dim=embedding_dim, num_heads=num_heads, dropout=0.1)\n # self.normalizer = nn.LayerNorm(embedding_dim)\n self.normalizer = nn.InstanceNorm1d(embedding_dim)\n self.mlp = nn.Sequential(\n nn.Linear(in_features=embedding_dim, out_features=mlp_hidden_dim),\n nn.GELU(),\n nn.Dropout(p=mlp_dropout),\n nn.Linear(in_features=mlp_hidden_dim, out_features=embedding_dim),\n nn.GELU(),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x_skip = x\n x = self.normalizer(x)\n attn_output, attn_output_weights = self.multihead_attn(x, x, x)\n x_skip = attn_output + x_skip\n x = self.normalizer(x_skip)\n x = self.mlp(x) + x_skip\n return x\n\n\nclass PatchPositionEncoder(nn.Module):\n def __init__(\n self,\n num_patches: int,\n embedding_dim: int,\n ) -> None:\n super(PatchPositionEncoder, self).__init__()\n self.patch_embeddings = nn.LazyLinear(out_features=embedding_dim)\n self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, embedding_dim))\n self.class_token = nn.Parameter(torch.randn(1, 1, embedding_dim))\n\n def forward(self, patch: torch.Tensor) -> torch.Tensor:\n class_token = self.class_token.expand(patch.shape[0], -1, -1)\n class_token_patch_embedding = torch.cat([class_token, self.patch_embeddings(patch)], dim=1)\n encoding = class_token_patch_embedding + self.position_embeddings\n return encoding\n\n\nclass ClassificationMLP(nn.Module):\n def __init__(\n self,\n num_classes: int,\n embedding_dim: int,\n patch_count: int,\n ) -> None:\n super(ClassificationMLP, self).__init__()\n\n self.class_mapping_layer = nn.Sequential(\n nn.Flatten(),\n nn.Linear(in_features=embedding_dim * (patch_count + 1), out_features=num_classes),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.class_mapping_layer(x)\n return x\n\n\nclass VisionTransformerModel(nn.Module):\n def __init__(\n self,\n num_heads: int,\n embedding_dim: int,\n transformer_layers_count: int,\n num_patches: int,\n patch_count: int,\n patch_size: int,\n ) -> None:\n super(VisionTransformerModel, self).__init__()\n self.patch_count = patch_count\n self.patch_size = patch_size\n\n patch_encoder = PatchPositionEncoder(\n num_patches=num_patches,\n embedding_dim=embedding_dim,\n )\n\n vision_transformer_modules = [patch_encoder]\n for _ in range(transformer_layers_count):\n vision_transformer_modules.append(\n TransformerEncoderModel(num_heads=num_heads, embedding_dim=embedding_dim),\n )\n\n self.vision_transformer_layers = nn.Sequential(*vision_transformer_modules)\n\n def forward(self, img: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n # (N C IMG_H IMG_W) -> (N C PATCH_COUNT_H PATCH_COUNT_W PATCH_SIZE_H PATCH_SIZE_W)\n # (64 3 72 72) -> (64 3 12 12 6 6) -> (64 12 12 6 6 3) -> (64 144 108)\n patches = img.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size)\n patches = patches.permute(0, 2, 3, 4, 5, 1)\n patches = patches.reshape(img.shape[0], self.patch_count, -1) # TODO: torch.view\n\n x = self.vision_transformer_layers(patches)\n return x\n\n\nclass Trainer:\n def __init__(\n self,\n dataset_path: str,\n validation_dataset_path: str = None,\n checkpoint_path: str = None,\n device: str = None,\n train_split_percentage: float = 0.6,\n num_epochs: int = 1000,\n batch_size: int = 64,\n save_every: int = 20,\n num_workers: int = 4,\n image_size: int = 72,\n patch_size: int = 6,\n learning_rate: float = 0.001,\n patch_embedding_dim: int = 48,\n transformer_layers_count: int = 5,\n num_heads: int = 4,\n image_channels: int = 3,\n ) -> None:\n self.device = device if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.start_epoch = 0\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.save_every = save_every\n self.patch_size = patch_size\n\n patch_count = (image_size // patch_size) ** 2\n class_names = os.listdir(dataset_path)\n num_classes = len(class_names)\n\n def _dataset_split():\n _vit_dataset = VisionTransformerDataset(\n root_dir=dataset_path,\n image_channels=image_channels,\n image_size=image_size,\n )\n if validation_dataset_path is None:\n _train_size = int(train_split_percentage * len(_vit_dataset))\n _test_size = len(_vit_dataset) - _train_size\n _train_dataset, _validation_dataset = torch.utils.data.random_split(\n _vit_dataset,\n [_train_size, _test_size]\n )\n return _train_dataset, _validation_dataset\n else:\n _validation_dataset = VisionTransformerDataset(\n root_dir=validation_dataset_path,\n image_channels=image_channels,\n image_size=image_size,\n )\n return _vit_dataset, _validation_dataset\n\n train_dataset, validation_dataset = _dataset_split()\n\n self.train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=True,\n drop_last=True, # Unable to train without this why?\n )\n self.validation_loader = DataLoader(\n dataset=validation_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=True,\n drop_last=True,\n )\n\n self.vit_model = VisionTransformerModel(\n num_heads=num_heads,\n embedding_dim=patch_embedding_dim,\n transformer_layers_count=transformer_layers_count,\n num_patches=patch_count,\n patch_size=patch_size,\n patch_count=patch_count,\n )\n self.mlp_head = ClassificationMLP(\n num_classes=num_classes,\n embedding_dim=patch_embedding_dim,\n patch_count=patch_count,\n )\n\n self.vit_model.to(self.device)\n self.mlp_head.to(self.device)\n\n self.optim = torch.optim.Adam(params=self.vit_model.parameters(), lr=learning_rate)\n self.loss_fn = nn.CrossEntropyLoss()\n\n self.writer_predictions = SummaryWriter(f\"logs/predictions\")\n self.step = 0\n\n self.nrows = 4\n self.ncols = 4\n\n self.start_epoch = 0\n pathlib.Path('checkpoints').mkdir(parents=True, exist_ok=True)\n if checkpoint_path is not None:\n self.load_checkpoint(checkpoint_path=checkpoint_path)\n\n def load_checkpoint(self, checkpoint_path: str) -> None:\n checkpoint = torch.load(checkpoint_path)\n self.vit_model.load_state_dict(checkpoint['vit_model_state_dict'])\n self.mlp_head.load_state_dict(checkpoint['mlp_head_state_dict'])\n self.optim.load_state_dict(checkpoint['optimizer_state_dict'])\n self.start_epoch = checkpoint['epoch']\n\n def train(self) -> None:\n best_accuracy = 0.0\n for epoch in range(self.start_epoch, self.num_epochs):\n # Training loop.\n running_train_loss = 0.0\n training_correct_preds = 0\n training_total_data = 0\n with tqdm(self.train_loader) as tqdm_train_loader:\n tqdm_train_loader.set_description(f'TRAIN EPOCH: {epoch} ')\n for idx, (img, labels) in enumerate(tqdm_train_loader):\n img = img.to(self.device)\n labels = labels.to(self.device)\n\n vit_output = self.vit_model(img)\n predicted_labels = self.mlp_head(vit_output)\n\n self.optim.zero_grad()\n loss = self.loss_fn(predicted_labels, labels)\n loss.backward()\n self.optim.step()\n\n running_train_loss += loss.item()\n training_total_data += img.shape[0]\n training_correct_preds += (torch.argmax(predicted_labels, dim=1) == labels).sum().item()\n\n # Validation loop.\n running_validation_loss = 0.0\n validation_correct_preds = 0\n validation_total_data = 0\n with torch.no_grad():\n self.vit_model.eval()\n self.mlp_head.eval()\n with tqdm(self.validation_loader) as tqdm_validation_loader:\n tqdm_validation_loader.set_description(f'VALID EPOCH: {epoch} ')\n for idx, (img, labels) in enumerate(tqdm_validation_loader):\n img = img.to(self.device)\n labels = labels.to(self.device)\n\n vit_output = self.vit_model(img)\n predicted_labels = self.mlp_head(vit_output)\n\n validation_loss = self.loss_fn(predicted_labels, labels)\n\n running_validation_loss += validation_loss.item()\n validation_total_data += img.shape[0]\n validation_correct_preds += (torch.argmax(predicted_labels, dim=1) == labels).sum().item()\n\n if idx % self.save_every == self.save_every - 1:\n fig, ax = plt.subplots(nrows=self.nrows, ncols=self.ncols)\n plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n k = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n ax[i, j].imshow(\n (img[k].permute(1, 2, 0) * 127.5 + 128).clamp(0, 255).to(\n torch.uint8).detach().cpu().numpy()\n )\n ax[i, j].text(\n 0.0, -2.0,\n f'GT:{labels[k]}, Prd:{torch.argmax(predicted_labels[k])}',\n fontsize=12\n )\n k += 1\n\n self.writer_predictions.add_figure(\n 'Validation Real vs Pred', figure=fig, global_step=self.step\n )\n self.step += 1\n\n self.vit_model.train()\n self.mlp_head.train()\n\n train_loss_value = running_train_loss / len(self.train_loader)\n training_accuracy = (100.0 * training_correct_preds / training_total_data)\n validation_loss_value = running_validation_loss / len(self.validation_loader)\n validation_accuracy = (100.0 * validation_correct_preds / validation_total_data)\n\n print(\n f\"TRAINING LOSS: {train_loss_value:.3f}, \"\n f\"TRAINING ACCURACY: {training_accuracy:.3f}, \"\n f\"VALIDATION LOSS: {validation_loss_value:.3f}, \"\n f\"VALIDATION ACCURACY: {validation_accuracy:.3f}\"\n )\n\n if validation_accuracy > best_accuracy:\n best_accuracy = validation_accuracy\n torch.save({\n 'epoch': epoch,\n 'vit_model_state_dict': self.vit_model.state_dict(),\n 'mlp_head_state_dict': self.mlp_head.state_dict(),\n 'optimizer_state_dict': self.optim.state_dict(),\n }, f'checkpoints/checkpoint_{epoch}.pt')\n\n\nif __name__ == '__main__':\n trainer = Trainer(\n dataset_path=r'C:\\portable\\staging\\classification_data',\n # checkpoint_path='checkpoints/checkpoint_4.pt',\n )\n trainer.train()\n" ]
[ [ "torch.mean", "torch.autograd.set_detect_anomaly", "torch.cat", "torch.load", "torch.nn.functional.l1_loss", "torch.utils.data.DataLoader", "torch.autograd.profiler.emit_nvtx", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.device", "torch.nn.Dropout", "torch.randn", "torch.normal", "torch.optim.Adam", "torch.nn.utils.spectral_norm.spectral_norm", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.zeros_like", "torch.exp", "torch.nn.Linear", "torch.nn.InstanceNorm2d", "torch.nn.LeakyReLU", "torch.nn.Flatten", "torch.nn.Tanh", "torch.nn.Upsample", "torch.nn.ReLU", "torch.autograd.profiler.profile" ], [ "torch.load", "torch.zeros", "torch.nn.LazyLinear", "torch.utils.data.DataLoader", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.nn.MultiheadAttention", "torch.nn.InstanceNorm1d", "torch.randn", "matplotlib.pyplot.gcf", "torch.nn.Sequential", "torch.nn.Linear", "torch.utils.data.random_split", "torch.nn.GELU", "torch.nn.Flatten", "matplotlib.pyplot.subplots", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vimalromeo/pandas
[ "9444dce96954c546333d5aecc92a06c3bfd19aa5", "7c14e4f14aff216be558bf5d4d2d00b4838c2360", "9444dce96954c546333d5aecc92a06c3bfd19aa5", "9444dce96954c546333d5aecc92a06c3bfd19aa5", "7c14e4f14aff216be558bf5d4d2d00b4838c2360" ]
[ "pandas/tests/io/parser/dialect.py", "pandas/tests/io/json/test_compression.py", "pandas/tests/indexes/timedeltas/test_indexing.py", "pandas/tests/sparse/frame/test_indexing.py", "pandas/tests/util/test_testing.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nTests that dialects are properly handled during parsing\nfor all of the parsers defined in parsers.py\n\"\"\"\n\nimport csv\n\nfrom pandas import DataFrame\nfrom pandas.compat import StringIO\nfrom pandas.errors import ParserWarning\n\nimport pandas.util.testing as tm\n\n\nclass DialectTests(object):\n\n def test_dialect(self):\n data = \"\"\"\\\nlabel1,label2,label3\nindex1,\"a,c,e\nindex2,b,d,f\n\"\"\"\n\n dia = csv.excel()\n dia.quoting = csv.QUOTE_NONE\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), dialect=dia)\n\n data = '''\\\nlabel1,label2,label3\nindex1,a,c,e\nindex2,b,d,f\n'''\n exp = self.read_csv(StringIO(data))\n exp.replace('a', '\"a', inplace=True)\n tm.assert_frame_equal(df, exp)\n\n def test_dialect_str(self):\n data = \"\"\"\\\nfruit:vegetable\napple:brocolli\npear:tomato\n\"\"\"\n exp = DataFrame({\n 'fruit': ['apple', 'pear'],\n 'vegetable': ['brocolli', 'tomato']\n })\n csv.register_dialect('mydialect', delimiter=':')\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), dialect='mydialect')\n\n tm.assert_frame_equal(df, exp)\n csv.unregister_dialect('mydialect')\n\n def test_invalid_dialect(self):\n class InvalidDialect(object):\n pass\n\n data = 'a\\n1'\n msg = 'Invalid dialect'\n\n with tm.assert_raises_regex(ValueError, msg):\n self.read_csv(StringIO(data), dialect=InvalidDialect)\n\n def test_dialect_conflict(self):\n data = 'a,b\\n1,2'\n dialect = 'excel'\n exp = DataFrame({'a': [1], 'b': [2]})\n\n with tm.assert_produces_warning(None):\n df = self.read_csv(StringIO(data), delimiter=',', dialect=dialect)\n tm.assert_frame_equal(df, exp)\n\n with tm.assert_produces_warning(ParserWarning):\n df = self.read_csv(StringIO(data), delimiter='.', dialect=dialect)\n tm.assert_frame_equal(df, exp)\n", "import pytest\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas.util.testing import assert_frame_equal, assert_raises_regex\n\n\ndef test_compression_roundtrip(compression_no_zip):\n df = pd.DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with tm.ensure_clean() as path:\n df.to_json(path, compression=compression_no_zip)\n assert_frame_equal(df, pd.read_json(path,\n compression=compression_no_zip))\n\n # explicitly ensure file was compressed.\n with tm.decompress_file(path, compression_no_zip) as fh:\n result = fh.read().decode('utf8')\n assert_frame_equal(df, pd.read_json(result))\n\n\ndef test_compress_zip_value_error():\n df = pd.DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with tm.ensure_clean() as path:\n import zipfile\n pytest.raises(zipfile.BadZipfile, df.to_json, path, compression=\"zip\")\n\n\ndef test_read_zipped_json():\n uncompressed_path = tm.get_data_path(\"tsframe_v012.json\")\n uncompressed_df = pd.read_json(uncompressed_path)\n\n compressed_path = tm.get_data_path(\"tsframe_v012.json.zip\")\n compressed_df = pd.read_json(compressed_path, compression='zip')\n\n assert_frame_equal(uncompressed_df, compressed_df)\n\n\ndef test_with_s3_url(compression_no_zip):\n boto3 = pytest.importorskip('boto3')\n pytest.importorskip('s3fs')\n moto = pytest.importorskip('moto')\n\n df = pd.read_json('{\"a\": [1, 2, 3], \"b\": [4, 5, 6]}')\n with moto.mock_s3():\n conn = boto3.resource(\"s3\", region_name=\"us-east-1\")\n bucket = conn.create_bucket(Bucket=\"pandas-test\")\n\n with tm.ensure_clean() as path:\n df.to_json(path, compression=compression_no_zip)\n with open(path, 'rb') as f:\n bucket.put_object(Key='test-1', Body=f)\n\n roundtripped_df = pd.read_json('s3://pandas-test/test-1',\n compression=compression_no_zip)\n assert_frame_equal(df, roundtripped_df)\n\n\ndef test_lines_with_compression(compression_no_zip):\n\n with tm.ensure_clean() as path:\n df = pd.read_json('{\"a\": [1, 2, 3], \"b\": [4, 5, 6]}')\n df.to_json(path, orient='records', lines=True,\n compression=compression_no_zip)\n roundtripped_df = pd.read_json(path, lines=True,\n compression=compression_no_zip)\n assert_frame_equal(df, roundtripped_df)\n\n\ndef test_chunksize_with_compression(compression_no_zip):\n\n with tm.ensure_clean() as path:\n df = pd.read_json('{\"a\": [\"foo\", \"bar\", \"baz\"], \"b\": [4, 5, 6]}')\n df.to_json(path, orient='records', lines=True,\n compression=compression_no_zip)\n\n res = pd.read_json(path, lines=True, chunksize=1,\n compression=compression_no_zip)\n roundtripped_df = pd.concat(res)\n assert_frame_equal(df, roundtripped_df)\n\n\ndef test_write_unsupported_compression_type():\n df = pd.read_json('{\"a\": [1, 2, 3], \"b\": [4, 5, 6]}')\n with tm.ensure_clean() as path:\n msg = \"Unrecognized compression type: unsupported\"\n assert_raises_regex(ValueError, msg, df.to_json,\n path, compression=\"unsupported\")\n\n\ndef test_read_unsupported_compression_type():\n with tm.ensure_clean() as path:\n msg = \"Unrecognized compression type: unsupported\"\n assert_raises_regex(ValueError, msg, pd.read_json,\n path, compression=\"unsupported\")\n", "from datetime import timedelta\n\nimport pytest\nimport numpy as np\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta\n\n\nclass TestGetItem(object):\n def test_getitem(self):\n idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')\n\n for idx in [idx1]:\n result = idx[0]\n assert result == Timedelta('1 day')\n\n result = idx[0:5]\n expected = timedelta_range('1 day', '5 day', freq='D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[0:10:2]\n expected = timedelta_range('1 day', '9 day', freq='2D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[-20:-5:3]\n expected = timedelta_range('12 day', '24 day', freq='3D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[4::-1]\n expected = TimedeltaIndex(['5 day', '4 day', '3 day',\n '2 day', '1 day'],\n freq='-1D', name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n\nclass TestWhere(object):\n # placeholder for symmetry with DatetimeIndex and PeriodIndex tests\n pass\n\n\nclass TestTake(object):\n def test_take(self):\n # GH 10295\n idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')\n\n for idx in [idx1]:\n result = idx.take([0])\n assert result == Timedelta('1 day')\n\n result = idx.take([-1])\n assert result == Timedelta('31 day')\n\n result = idx.take([0, 1, 2])\n expected = timedelta_range('1 day', '3 day', freq='D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([0, 2, 4])\n expected = timedelta_range('1 day', '5 day', freq='2D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([7, 4, 1])\n expected = timedelta_range('8 day', '2 day', freq='-3D',\n name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([3, 2, 5])\n expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n result = idx.take([-3, 2, 5])\n expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n def test_take_invalid_kwargs(self):\n idx = timedelta_range('1 day', '31 day', freq='D', name='idx')\n indices = [1, 6, 5, 9, 10, 13, 15, 3]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assert_raises_regex(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, mode='clip')\n\n # TODO: This method came from test_timedelta; de-dup with version above\n def test_take2(self):\n tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']\n idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')\n expected = TimedeltaIndex(tds, freq=None, name='idx')\n\n taken1 = idx.take([2, 4, 10])\n taken2 = idx[[2, 4, 10]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n assert isinstance(taken, TimedeltaIndex)\n assert taken.freq is None\n assert taken.name == expected.name\n\n def test_take_fill_value(self):\n # GH 12631\n idx = TimedeltaIndex(['1 days', '2 days', '3 days'],\n name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = TimedeltaIndex(['2 days', '1 days', '3 days'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = TimedeltaIndex(['2 days', '1 days', 'NaT'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = TimedeltaIndex(['2 days', '1 days', '3 days'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n\nclass TestTimedeltaIndex(object):\n\n def test_insert(self):\n\n idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')\n\n result = idx.insert(2, timedelta(days=5))\n exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')\n tm.assert_index_equal(result, exp)\n\n # insertion of non-datetime should coerce to object index\n result = idx.insert(1, 'inserted')\n expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),\n Timedelta('2day')], name='idx')\n assert not isinstance(result, TimedeltaIndex)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')\n\n # preserve freq\n expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',\n '1day 00:00:03'],\n name='idx', freq='s')\n expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',\n '1day 00:00:03', '1day 00:00:04'],\n name='idx', freq='s')\n\n # reset freq to None\n expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',\n '1day 00:00:02', '1day 00:00:03'],\n name='idx', freq=None)\n expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',\n '1day 00:00:03', '1day 00:00:05'],\n name='idx', freq=None)\n\n cases = [(0, Timedelta('1day'), expected_0),\n (-3, Timedelta('1day'), expected_0),\n (3, Timedelta('1day 00:00:04'), expected_3),\n (1, Timedelta('1day 00:00:01'), expected_1_nofreq),\n (3, Timedelta('1day 00:00:05'), expected_3_nofreq)]\n\n for n, d, expected in cases:\n result = idx.insert(n, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n # GH 18295 (test missing)\n expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day'])\n for na in (np.nan, pd.NaT, None):\n result = timedelta_range('1day', '3day').insert(1, na)\n tm.assert_index_equal(result, expected)\n\n def test_delete(self):\n idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')\n\n # prserve freq\n expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',\n name='idx')\n expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',\n name='idx')\n\n # reset freq to None\n expected_1 = TimedeltaIndex(\n ['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')\n\n cases = {0: expected_0,\n -5: expected_0,\n -1: expected_4,\n 4: expected_4,\n 1: expected_1}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n with pytest.raises((IndexError, ValueError)):\n # either depeidnig on numpy version\n result = idx.delete(5)\n\n def test_delete_slice(self):\n idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')\n\n # prserve freq\n expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',\n name='idx')\n expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',\n name='idx')\n\n # reset freq to None\n expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',\n '7 d', '8 d', '9 d', '10d'],\n freq=None, name='idx')\n\n cases = {(0, 1, 2): expected_0_2,\n (7, 8, 9): expected_7_9,\n (3, 4, 5): expected_3_5}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n result = idx.delete(slice(n[0], n[-1] + 1))\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n def test_get_loc(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n assert idx.get_loc(idx[1], method) == 1\n assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1\n assert idx.get_loc(str(idx[1]), method) == 1\n\n assert idx.get_loc(idx[1], 'pad',\n tolerance=Timedelta(0)) == 1\n assert idx.get_loc(idx[1], 'pad',\n tolerance=np.timedelta64(0, 's')) == 1\n assert idx.get_loc(idx[1], 'pad',\n tolerance=timedelta(0)) == 1\n\n with tm.assert_raises_regex(ValueError,\n 'unit abbreviation w/o a number'):\n idx.get_loc(idx[1], method='nearest', tolerance='foo')\n\n with pytest.raises(\n ValueError,\n match='tolerance size must match'):\n idx.get_loc(idx[1], method='nearest',\n tolerance=[Timedelta(0).to_timedelta64(),\n Timedelta(0).to_timedelta64()])\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n assert idx.get_loc('1 day 1 hour', method) == loc\n\n # GH 16909\n assert idx.get_loc(idx[1].to_timedelta64()) == 1\n\n # GH 16896\n assert idx.get_loc('0 days') == 0\n\n def test_get_loc_nat(self):\n tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])\n\n assert tidx.get_loc(pd.NaT) == 1\n assert tidx.get_loc(None) == 1\n assert tidx.get_loc(float('nan')) == 1\n assert tidx.get_loc(np.nan) == 1\n\n def test_get_indexer(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n tm.assert_numpy_array_equal(idx.get_indexer(idx),\n np.array([0, 1, 2], dtype=np.intp))\n\n target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),\n np.array([-1, 0, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),\n np.array([0, 1, 2], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),\n np.array([0, 1, 1], dtype=np.intp))\n\n res = idx.get_indexer(target, 'nearest',\n tolerance=Timedelta('1 hour'))\n tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))\n", "import pytest\nimport numpy as np\nfrom pandas import SparseDataFrame, DataFrame\nfrom pandas.util import testing as tm\n\n\npytestmark = pytest.mark.skip(\"Wrong SparseBlock initialization (GH 17386)\")\n\n\[email protected]('data', [\n [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]],\n [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]],\n [\n [1.0, 1.0 + 1.0j],\n [2.0 + 2.0j, 2.0],\n [3.0, 3.0 + 3.0j],\n [4.0 + 4.0j, 4.0],\n [np.nan, np.nan]\n ]\n])\[email protected](reason='Wrong SparseBlock initialization '\n '(GH 17386)')\ndef test_where_with_numeric_data(data):\n # GH 17386\n lower_bound = 1.5\n\n sparse = SparseDataFrame(data)\n result = sparse.where(sparse > lower_bound)\n\n dense = DataFrame(data)\n dense_expected = dense.where(dense > lower_bound)\n sparse_expected = SparseDataFrame(dense_expected)\n\n tm.assert_frame_equal(result, dense_expected)\n tm.assert_sp_frame_equal(result, sparse_expected)\n\n\[email protected]('data', [\n [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]],\n [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]],\n [\n [1.0, 1.0 + 1.0j],\n [2.0 + 2.0j, 2.0],\n [3.0, 3.0 + 3.0j],\n [4.0 + 4.0j, 4.0],\n [np.nan, np.nan]\n ]\n])\[email protected]('other', [\n True,\n -100,\n 0.1,\n 100.0 + 100.0j\n])\[email protected](reason='Wrong SparseBlock initialization '\n '(GH 17386)')\ndef test_where_with_numeric_data_and_other(data, other):\n # GH 17386\n lower_bound = 1.5\n\n sparse = SparseDataFrame(data)\n result = sparse.where(sparse > lower_bound, other)\n\n dense = DataFrame(data)\n dense_expected = dense.where(dense > lower_bound, other)\n sparse_expected = SparseDataFrame(dense_expected,\n default_fill_value=other)\n\n tm.assert_frame_equal(result, dense_expected)\n tm.assert_sp_frame_equal(result, sparse_expected)\n\n\[email protected](reason='Wrong SparseBlock initialization '\n '(GH 17386)')\ndef test_where_with_bool_data():\n # GH 17386\n data = [[False, False], [True, True], [False, False]]\n cond = True\n\n sparse = SparseDataFrame(data)\n result = sparse.where(sparse == cond)\n\n dense = DataFrame(data)\n dense_expected = dense.where(dense == cond)\n sparse_expected = SparseDataFrame(dense_expected)\n\n tm.assert_frame_equal(result, dense_expected)\n tm.assert_sp_frame_equal(result, sparse_expected)\n\n\[email protected]('other', [\n True,\n 0,\n 0.1,\n 100.0 + 100.0j\n])\[email protected](reason='Wrong SparseBlock initialization '\n '(GH 17386)')\ndef test_where_with_bool_data_and_other(other):\n # GH 17386\n data = [[False, False], [True, True], [False, False]]\n cond = True\n\n sparse = SparseDataFrame(data)\n result = sparse.where(sparse == cond, other)\n\n dense = DataFrame(data)\n dense_expected = dense.where(dense == cond, other)\n sparse_expected = SparseDataFrame(dense_expected,\n default_fill_value=other)\n\n tm.assert_frame_equal(result, dense_expected)\n tm.assert_sp_frame_equal(result, sparse_expected)\n", "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport pytest\nimport numpy as np\nimport sys\nfrom pandas import Series, DataFrame\nimport pandas.util.testing as tm\nimport pandas.util._test_decorators as td\nfrom pandas.util.testing import (assert_almost_equal, raise_with_traceback,\n assert_index_equal, assert_series_equal,\n assert_frame_equal, assert_numpy_array_equal,\n RNGContext)\n\n\nclass TestAssertAlmostEqual(object):\n\n def _assert_almost_equal_both(self, a, b, **kwargs):\n assert_almost_equal(a, b, **kwargs)\n assert_almost_equal(b, a, **kwargs)\n\n def _assert_not_almost_equal_both(self, a, b, **kwargs):\n pytest.raises(AssertionError, assert_almost_equal, a, b, **kwargs)\n pytest.raises(AssertionError, assert_almost_equal, b, a, **kwargs)\n\n def test_assert_almost_equal_numbers(self):\n self._assert_almost_equal_both(1.1, 1.1)\n self._assert_almost_equal_both(1.1, 1.100001)\n self._assert_almost_equal_both(np.int16(1), 1.000001)\n self._assert_almost_equal_both(np.float64(1.1), 1.1)\n self._assert_almost_equal_both(np.uint32(5), 5)\n\n self._assert_not_almost_equal_both(1.1, 1)\n self._assert_not_almost_equal_both(1.1, True)\n self._assert_not_almost_equal_both(1, 2)\n self._assert_not_almost_equal_both(1.0001, np.int16(1))\n\n def test_assert_almost_equal_numbers_with_zeros(self):\n self._assert_almost_equal_both(0, 0)\n self._assert_almost_equal_both(0, 0.0)\n self._assert_almost_equal_both(0, np.float64(0))\n self._assert_almost_equal_both(0.000001, 0)\n\n self._assert_not_almost_equal_both(0.001, 0)\n self._assert_not_almost_equal_both(1, 0)\n\n def test_assert_almost_equal_numbers_with_mixed(self):\n self._assert_not_almost_equal_both(1, 'abc')\n self._assert_not_almost_equal_both(1, [1, ])\n self._assert_not_almost_equal_both(1, object())\n\n @pytest.mark.parametrize(\n \"left_dtype\",\n ['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])\n @pytest.mark.parametrize(\n \"right_dtype\",\n ['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])\n def test_assert_almost_equal_edge_case_ndarrays(\n self, left_dtype, right_dtype):\n\n # empty compare\n self._assert_almost_equal_both(np.array([], dtype=left_dtype),\n np.array([], dtype=right_dtype),\n check_dtype=False)\n\n def test_assert_almost_equal_dicts(self):\n self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2})\n\n self._assert_not_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 3})\n self._assert_not_almost_equal_both({'a': 1, 'b': 2},\n {'a': 1, 'b': 2, 'c': 3})\n self._assert_not_almost_equal_both({'a': 1}, 1)\n self._assert_not_almost_equal_both({'a': 1}, 'abc')\n self._assert_not_almost_equal_both({'a': 1}, [1, ])\n\n def test_assert_almost_equal_dict_like_object(self):\n class DictLikeObj(object):\n\n def keys(self):\n return ('a', )\n\n def __getitem__(self, item):\n if item == 'a':\n return 1\n\n self._assert_almost_equal_both({'a': 1}, DictLikeObj(),\n check_dtype=False)\n\n self._assert_not_almost_equal_both({'a': 2}, DictLikeObj(),\n check_dtype=False)\n\n def test_assert_almost_equal_strings(self):\n self._assert_almost_equal_both('abc', 'abc')\n\n self._assert_not_almost_equal_both('abc', 'abcd')\n self._assert_not_almost_equal_both('abc', 'abd')\n self._assert_not_almost_equal_both('abc', 1)\n self._assert_not_almost_equal_both('abc', [1, ])\n\n def test_assert_almost_equal_iterables(self):\n self._assert_almost_equal_both([1, 2, 3], [1, 2, 3])\n self._assert_almost_equal_both(np.array([1, 2, 3]),\n np.array([1, 2, 3]))\n\n # class / dtype are different\n self._assert_not_almost_equal_both(np.array([1, 2, 3]), [1, 2, 3])\n self._assert_not_almost_equal_both(np.array([1, 2, 3]),\n np.array([1., 2., 3.]))\n\n # Can't compare generators\n self._assert_not_almost_equal_both(iter([1, 2, 3]), [1, 2, 3])\n\n self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 4])\n self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 3, 4])\n self._assert_not_almost_equal_both([1, 2, 3], 1)\n\n def test_assert_almost_equal_null(self):\n self._assert_almost_equal_both(None, None)\n\n self._assert_not_almost_equal_both(None, np.NaN)\n self._assert_not_almost_equal_both(None, 0)\n self._assert_not_almost_equal_both(np.NaN, 0)\n\n def test_assert_almost_equal_inf(self):\n self._assert_almost_equal_both(np.inf, np.inf)\n self._assert_almost_equal_both(np.inf, float(\"inf\"))\n self._assert_not_almost_equal_both(np.inf, 0)\n self._assert_almost_equal_both(np.array([np.inf, np.nan, -np.inf]),\n np.array([np.inf, np.nan, -np.inf]))\n self._assert_almost_equal_both(np.array([np.inf, None, -np.inf],\n dtype=np.object_),\n np.array([np.inf, np.nan, -np.inf],\n dtype=np.object_))\n\n def test_assert_almost_equal_pandas(self):\n tm.assert_almost_equal(pd.Index([1., 1.1]),\n pd.Index([1., 1.100001]))\n tm.assert_almost_equal(pd.Series([1., 1.1]),\n pd.Series([1., 1.100001]))\n tm.assert_almost_equal(pd.DataFrame({'a': [1., 1.1]}),\n pd.DataFrame({'a': [1., 1.100001]}))\n\n def test_assert_almost_equal_object(self):\n a = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]\n b = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]\n self._assert_almost_equal_both(a, b)\n\n\nclass TestUtilTesting(object):\n\n def test_raise_with_traceback(self):\n with tm.assert_raises_regex(LookupError, \"error_text\"):\n try:\n raise ValueError(\"THIS IS AN ERROR\")\n except ValueError as e:\n e = LookupError(\"error_text\")\n raise_with_traceback(e)\n with tm.assert_raises_regex(LookupError, \"error_text\"):\n try:\n raise ValueError(\"This is another error\")\n except ValueError:\n e = LookupError(\"error_text\")\n _, _, traceback = sys.exc_info()\n raise_with_traceback(e, traceback)\n\n\nclass TestAssertNumpyArrayEqual(object):\n\n @td.skip_if_windows\n def test_numpy_array_equal_message(self):\n\n expected = \"\"\"numpy array are different\n\nnumpy array shapes are different\n\\\\[left\\\\]: \\\\(2,\\\\)\n\\\\[right\\\\]: \\\\(3,\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]))\n\n # scalar comparison\n expected = \"\"\"Expected type \"\"\"\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(1, 2)\n expected = \"\"\"expected 2\\\\.00000 but got 1\\\\.00000, with decimal 5\"\"\"\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(1, 2)\n\n # array / scalar array comparison\n expected = \"\"\"numpy array are different\n\nnumpy array classes are different\n\\\\[left\\\\]: ndarray\n\\\\[right\\\\]: int\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n # numpy_array_equal only accepts np.ndarray\n assert_numpy_array_equal(np.array([1]), 1)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([1]), 1)\n\n # scalar / array comparison\n expected = \"\"\"numpy array are different\n\nnumpy array classes are different\n\\\\[left\\\\]: int\n\\\\[right\\\\]: ndarray\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(1, np.array([1]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(1, np.array([1]))\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(66\\\\.66667 %\\\\)\n\\\\[left\\\\]: \\\\[nan, 2\\\\.0, 3\\\\.0\\\\]\n\\\\[right\\\\]: \\\\[1\\\\.0, nan, 3\\\\.0\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([np.nan, 2, 3]),\n np.array([1, np.nan, 3]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([np.nan, 2, 3]),\n np.array([1, np.nan, 3]))\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[1, 2\\\\]\n\\\\[right\\\\]: \\\\[1, 3\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([1, 2]), np.array([1, 3]))\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[1\\\\.1, 2\\\\.000001\\\\]\n\\\\[right\\\\]: \\\\[1\\\\.1, 2.0\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(\n np.array([1.1, 2.000001]), np.array([1.1, 2.0]))\n\n # must pass\n assert_almost_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(16\\\\.66667 %\\\\)\n\\\\[left\\\\]: \\\\[\\\\[1, 2\\\\], \\\\[3, 4\\\\], \\\\[5, 6\\\\]\\\\]\n\\\\[right\\\\]: \\\\[\\\\[1, 3\\\\], \\\\[3, 4\\\\], \\\\[5, 6\\\\]\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([[1, 2], [3, 4], [5, 6]]),\n np.array([[1, 3], [3, 4], [5, 6]]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),\n np.array([[1, 3], [3, 4], [5, 6]]))\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(25\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[\\\\[1, 2\\\\], \\\\[3, 4\\\\]\\\\]\n\\\\[right\\\\]: \\\\[\\\\[1, 3\\\\], \\\\[3, 4\\\\]\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([[1, 2], [3, 4]]),\n np.array([[1, 3], [3, 4]]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([[1, 2], [3, 4]]),\n np.array([[1, 3], [3, 4]]))\n\n # allow to overwrite message\n expected = \"\"\"Index are different\n\nIndex shapes are different\n\\\\[left\\\\]: \\\\(2,\\\\)\n\\\\[right\\\\]: \\\\(3,\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]),\n obj='Index')\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]),\n obj='Index')\n\n @td.skip_if_windows\n def test_numpy_array_equal_object_message(self):\n\n a = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')])\n b = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')])\n\n expected = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\\\]\n\\\\[right\\\\]: \\\\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(a, b)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal(a, b)\n\n def test_numpy_array_equal_copy_flag(self):\n a = np.array([1, 2, 3])\n b = a.copy()\n c = a.view()\n expected = r'array\\(\\[1, 2, 3\\]\\) is not array\\(\\[1, 2, 3\\]\\)'\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(a, b, check_same='same')\n expected = r'array\\(\\[1, 2, 3\\]\\) is array\\(\\[1, 2, 3\\]\\)'\n with tm.assert_raises_regex(AssertionError, expected):\n assert_numpy_array_equal(a, c, check_same='copy')\n\n def test_assert_almost_equal_iterable_message(self):\n\n expected = \"\"\"Iterable are different\n\nIterable length are different\n\\\\[left\\\\]: 2\n\\\\[right\\\\]: 3\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal([1, 2], [3, 4, 5])\n\n expected = \"\"\"Iterable are different\n\nIterable values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[1, 2\\\\]\n\\\\[right\\\\]: \\\\[1, 3\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_almost_equal([1, 2], [1, 3])\n\n\nclass TestAssertIndexEqual(object):\n\n def test_index_equal_message(self):\n\n expected = \"\"\"Index are different\n\nIndex levels are different\n\\\\[left\\\\]: 1, Int64Index\\\\(\\\\[1, 2, 3\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: 2, MultiIndex\\\\(levels=\\\\[\\\\[u?'A', u?'B'\\\\], \\\\[1, 2, 3, 4\\\\]\\\\],\n labels=\\\\[\\\\[0, 0, 1, 1\\\\], \\\\[0, 1, 2, 3\\\\]\\\\]\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3])\n idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),\n ('B', 3), ('B', 4)])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, exact=False)\n\n expected = \"\"\"MultiIndex level \\\\[1\\\\] are different\n\nMultiIndex level \\\\[1\\\\] values are different \\\\(25\\\\.0 %\\\\)\n\\\\[left\\\\]: Int64Index\\\\(\\\\[2, 2, 3, 4\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: Int64Index\\\\(\\\\[1, 2, 3, 4\\\\], dtype='int64'\\\\)\"\"\"\n\n idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),\n ('B', 3), ('B', 4)])\n idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),\n ('B', 3), ('B', 4)])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, check_exact=False)\n\n expected = \"\"\"Index are different\n\nIndex length are different\n\\\\[left\\\\]: 3, Int64Index\\\\(\\\\[1, 2, 3\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: 4, Int64Index\\\\(\\\\[1, 2, 3, 4\\\\], dtype='int64'\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3])\n idx2 = pd.Index([1, 2, 3, 4])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, check_exact=False)\n\n expected = \"\"\"Index are different\n\nIndex classes are different\n\\\\[left\\\\]: Int64Index\\\\(\\\\[1, 2, 3\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: Float64Index\\\\(\\\\[1\\\\.0, 2\\\\.0, 3\\\\.0\\\\], dtype='float64'\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3])\n idx2 = pd.Index([1, 2, 3.0])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, exact=True)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, exact=True, check_exact=False)\n\n expected = \"\"\"Index are different\n\nIndex values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: Float64Index\\\\(\\\\[1.0, 2.0, 3.0], dtype='float64'\\\\)\n\\\\[right\\\\]: Float64Index\\\\(\\\\[1.0, 2.0, 3.0000000001\\\\], dtype='float64'\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3.])\n idx2 = pd.Index([1, 2, 3.0000000001])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n\n # must success\n assert_index_equal(idx1, idx2, check_exact=False)\n\n expected = \"\"\"Index are different\n\nIndex values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: Float64Index\\\\(\\\\[1.0, 2.0, 3.0], dtype='float64'\\\\)\n\\\\[right\\\\]: Float64Index\\\\(\\\\[1.0, 2.0, 3.0001\\\\], dtype='float64'\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3.])\n idx2 = pd.Index([1, 2, 3.0001])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, check_exact=False)\n # must success\n assert_index_equal(idx1, idx2, check_exact=False,\n check_less_precise=True)\n\n expected = \"\"\"Index are different\n\nIndex values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: Int64Index\\\\(\\\\[1, 2, 3\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: Int64Index\\\\(\\\\[1, 2, 4\\\\], dtype='int64'\\\\)\"\"\"\n\n idx1 = pd.Index([1, 2, 3])\n idx2 = pd.Index([1, 2, 4])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, check_less_precise=True)\n\n expected = \"\"\"MultiIndex level \\\\[1\\\\] are different\n\nMultiIndex level \\\\[1\\\\] values are different \\\\(25\\\\.0 %\\\\)\n\\\\[left\\\\]: Int64Index\\\\(\\\\[2, 2, 3, 4\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: Int64Index\\\\(\\\\[1, 2, 3, 4\\\\], dtype='int64'\\\\)\"\"\"\n\n idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),\n ('B', 3), ('B', 4)])\n idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),\n ('B', 3), ('B', 4)])\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2, check_exact=False)\n\n def test_index_equal_metadata_message(self):\n\n expected = \"\"\"Index are different\n\nAttribute \"names\" are different\n\\\\[left\\\\]: \\\\[None\\\\]\n\\\\[right\\\\]: \\\\[u?'x'\\\\]\"\"\"\n\n idx1 = pd.Index([1, 2, 3])\n idx2 = pd.Index([1, 2, 3], name='x')\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n\n # same name, should pass\n assert_index_equal(pd.Index([1, 2, 3], name=np.nan),\n pd.Index([1, 2, 3], name=np.nan))\n assert_index_equal(pd.Index([1, 2, 3], name=pd.NaT),\n pd.Index([1, 2, 3], name=pd.NaT))\n\n expected = \"\"\"Index are different\n\nAttribute \"names\" are different\n\\\\[left\\\\]: \\\\[nan\\\\]\n\\\\[right\\\\]: \\\\[NaT\\\\]\"\"\"\n\n idx1 = pd.Index([1, 2, 3], name=np.nan)\n idx2 = pd.Index([1, 2, 3], name=pd.NaT)\n with tm.assert_raises_regex(AssertionError, expected):\n assert_index_equal(idx1, idx2)\n\n\nclass TestAssertSeriesEqual(object):\n\n def _assert_equal(self, x, y, **kwargs):\n assert_series_equal(x, y, **kwargs)\n assert_series_equal(y, x, **kwargs)\n\n def _assert_not_equal(self, a, b, **kwargs):\n pytest.raises(AssertionError, assert_series_equal, a, b, **kwargs)\n pytest.raises(AssertionError, assert_series_equal, b, a, **kwargs)\n\n def test_equal(self):\n self._assert_equal(Series(range(3)), Series(range(3)))\n self._assert_equal(Series(list('abc')), Series(list('abc')))\n\n def test_not_equal(self):\n self._assert_not_equal(Series(range(3)), Series(range(3)) + 1)\n self._assert_not_equal(Series(list('abc')), Series(list('xyz')))\n self._assert_not_equal(Series(range(3)), Series(range(4)))\n self._assert_not_equal(\n Series(range(3)), Series(\n range(3), dtype='float64'))\n self._assert_not_equal(\n Series(range(3)), Series(\n range(3), index=[1, 2, 4]))\n\n # ATM meta data is not checked in assert_series_equal\n # self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True)\n\n def test_less_precise(self):\n s1 = Series([0.12345], dtype='float64')\n s2 = Series([0.12346], dtype='float64')\n\n pytest.raises(AssertionError, assert_series_equal, s1, s2)\n self._assert_equal(s1, s2, check_less_precise=True)\n for i in range(4):\n self._assert_equal(s1, s2, check_less_precise=i)\n pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)\n\n s1 = Series([0.12345], dtype='float32')\n s2 = Series([0.12346], dtype='float32')\n\n pytest.raises(AssertionError, assert_series_equal, s1, s2)\n self._assert_equal(s1, s2, check_less_precise=True)\n for i in range(4):\n self._assert_equal(s1, s2, check_less_precise=i)\n pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)\n\n # even less than less precise\n s1 = Series([0.1235], dtype='float32')\n s2 = Series([0.1236], dtype='float32')\n\n pytest.raises(AssertionError, assert_series_equal, s1, s2)\n pytest.raises(AssertionError, assert_series_equal, s1, s2, True)\n\n def test_index_dtype(self):\n df1 = DataFrame.from_records(\n {'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])\n df2 = DataFrame.from_records(\n {'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])\n self._assert_not_equal(df1.c, df2.c, check_index_type=True)\n\n def test_multiindex_dtype(self):\n df1 = DataFrame.from_records(\n {'a': [1, 2], 'b': [2.1, 1.5],\n 'c': ['l1', 'l2']}, index=['a', 'b'])\n df2 = DataFrame.from_records(\n {'a': [1.0, 2.0], 'b': [2.1, 1.5],\n 'c': ['l1', 'l2']}, index=['a', 'b'])\n self._assert_not_equal(df1.c, df2.c, check_index_type=True)\n\n def test_series_equal_message(self):\n\n expected = \"\"\"Series are different\n\nSeries length are different\n\\\\[left\\\\]: 3, RangeIndex\\\\(start=0, stop=3, step=1\\\\)\n\\\\[right\\\\]: 4, RangeIndex\\\\(start=0, stop=4, step=1\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))\n\n expected = \"\"\"Series are different\n\nSeries values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: \\\\[1, 2, 3\\\\]\n\\\\[right\\\\]: \\\\[1, 2, 4\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))\n with tm.assert_raises_regex(AssertionError, expected):\n assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),\n check_less_precise=True)\n\n\nclass TestAssertFrameEqual(object):\n\n def _assert_equal(self, x, y, **kwargs):\n assert_frame_equal(x, y, **kwargs)\n assert_frame_equal(y, x, **kwargs)\n\n def _assert_not_equal(self, a, b, **kwargs):\n pytest.raises(AssertionError, assert_frame_equal, a, b, **kwargs)\n pytest.raises(AssertionError, assert_frame_equal, b, a, **kwargs)\n\n def test_equal_with_different_row_order(self):\n # check_like=True ignores row-column orderings\n df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['a', 'b', 'c'])\n df2 = pd.DataFrame({'A': [3, 2, 1], 'B': [6, 5, 4]},\n index=['c', 'b', 'a'])\n\n self._assert_equal(df1, df2, check_like=True)\n self._assert_not_equal(df1, df2)\n\n def test_not_equal_with_different_shape(self):\n self._assert_not_equal(pd.DataFrame({'A': [1, 2, 3]}),\n pd.DataFrame({'A': [1, 2, 3, 4]}))\n\n def test_index_dtype(self):\n df1 = DataFrame.from_records(\n {'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])\n df2 = DataFrame.from_records(\n {'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])\n self._assert_not_equal(df1, df2, check_index_type=True)\n\n def test_multiindex_dtype(self):\n df1 = DataFrame.from_records(\n {'a': [1, 2], 'b': [2.1, 1.5],\n 'c': ['l1', 'l2']}, index=['a', 'b'])\n df2 = DataFrame.from_records(\n {'a': [1.0, 2.0], 'b': [2.1, 1.5],\n 'c': ['l1', 'l2']}, index=['a', 'b'])\n self._assert_not_equal(df1, df2, check_index_type=True)\n\n def test_empty_dtypes(self):\n df1 = pd.DataFrame(columns=[\"col1\", \"col2\"])\n df1[\"col1\"] = df1[\"col1\"].astype('int64')\n df2 = pd.DataFrame(columns=[\"col1\", \"col2\"])\n self._assert_equal(df1, df2, check_dtype=False)\n self._assert_not_equal(df1, df2, check_dtype=True)\n\n def test_frame_equal_message(self):\n\n expected = \"\"\"DataFrame are different\n\nDataFrame shape mismatch\n\\\\[left\\\\]: \\\\(3, 2\\\\)\n\\\\[right\\\\]: \\\\(3, 1\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),\n pd.DataFrame({'A': [1, 2, 3]}))\n\n expected = \"\"\"DataFrame\\\\.index are different\n\nDataFrame\\\\.index values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: Index\\\\(\\\\[u?'a', u?'b', u?'c'\\\\], dtype='object'\\\\)\n\\\\[right\\\\]: Index\\\\(\\\\[u?'a', u?'b', u?'d'\\\\], dtype='object'\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['a', 'b', 'c']),\n pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['a', 'b', 'd']))\n\n expected = \"\"\"DataFrame\\\\.columns are different\n\nDataFrame\\\\.columns values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: Index\\\\(\\\\[u?'A', u?'B'\\\\], dtype='object'\\\\)\n\\\\[right\\\\]: Index\\\\(\\\\[u?'A', u?'b'\\\\], dtype='object'\\\\)\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['a', 'b', 'c']),\n pd.DataFrame({'A': [1, 2, 3], 'b': [4, 5, 6]},\n index=['a', 'b', 'c']))\n\n expected = \"\"\"DataFrame\\\\.iloc\\\\[:, 1\\\\] are different\n\nDataFrame\\\\.iloc\\\\[:, 1\\\\] values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: \\\\[4, 5, 6\\\\]\n\\\\[right\\\\]: \\\\[4, 5, 7\\\\]\"\"\"\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),\n pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}))\n\n with tm.assert_raises_regex(AssertionError, expected):\n assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),\n pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}),\n by_blocks=True)\n\n\nclass TestAssertCategoricalEqual(object):\n\n def test_categorical_equal_message(self):\n\n expected = \"\"\"Categorical\\\\.categories are different\n\nCategorical\\\\.categories values are different \\\\(25\\\\.0 %\\\\)\n\\\\[left\\\\]: Int64Index\\\\(\\\\[1, 2, 3, 4\\\\], dtype='int64'\\\\)\n\\\\[right\\\\]: Int64Index\\\\(\\\\[1, 2, 3, 5\\\\], dtype='int64'\\\\)\"\"\"\n\n a = pd.Categorical([1, 2, 3, 4])\n b = pd.Categorical([1, 2, 3, 5])\n with tm.assert_raises_regex(AssertionError, expected):\n tm.assert_categorical_equal(a, b)\n\n expected = \"\"\"Categorical\\\\.codes are different\n\nCategorical\\\\.codes values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[0, 1, 3, 2\\\\]\n\\\\[right\\\\]: \\\\[0, 1, 2, 3\\\\]\"\"\"\n\n a = pd.Categorical([1, 2, 4, 3], categories=[1, 2, 3, 4])\n b = pd.Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])\n with tm.assert_raises_regex(AssertionError, expected):\n tm.assert_categorical_equal(a, b)\n\n expected = \"\"\"Categorical are different\n\nAttribute \"ordered\" are different\n\\\\[left\\\\]: False\n\\\\[right\\\\]: True\"\"\"\n\n a = pd.Categorical([1, 2, 3, 4], ordered=False)\n b = pd.Categorical([1, 2, 3, 4], ordered=True)\n with tm.assert_raises_regex(AssertionError, expected):\n tm.assert_categorical_equal(a, b)\n\n\nclass TestRNGContext(object):\n\n def test_RNGContext(self):\n expected0 = 1.764052345967664\n expected1 = 1.6243453636632417\n\n with RNGContext(0):\n with RNGContext(1):\n assert np.random.randn() == expected1\n assert np.random.randn() == expected0\n\n\nclass TestLocale(object):\n\n def test_locale(self):\n if sys.platform == 'win32':\n pytest.skip(\n \"skipping on win platforms as locale not available\")\n\n # GH9744\n locales = tm.get_locales()\n assert len(locales) >= 1\n" ]
[ [ "pandas.util.testing.assert_raises_regex", "pandas.compat.StringIO", "pandas.util.testing.assert_produces_warning", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal" ], [ "pandas.concat", "pandas.util.testing.ensure_clean", "pandas.util.testing.assert_raises_regex", "pandas.util.testing.get_data_path", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "pandas.read_json", "pandas.util.testing.decompress_file" ], [ "pandas.timedelta_range", "pandas.TimedeltaIndex", "pandas.util.testing.assert_raises_regex", "pandas.Timedelta", "numpy.timedelta64", "pandas.util.testing.assert_index_equal", "pandas.compat.iteritems", "pandas.to_timedelta", "numpy.array" ], [ "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_sp_frame_equal", "pandas.SparseDataFrame", "pandas.DataFrame" ], [ "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas.DataFrame.from_records", "pandas.util.testing.get_locales", "pandas.util.testing.assert_numpy_array_equal", "numpy.uint32", "pandas.util.testing.assert_categorical_equal", "pandas.Index", "pandas.util.testing.assert_series_equal", "pandas.Categorical", "pandas.util.testing.assert_almost_equal", "pandas.util.testing.raise_with_traceback", "numpy.array", "pandas.util.testing.RNGContext", "pandas.util.testing.assert_raises_regex", "numpy.int16", "numpy.float64", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "0.25" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luijkr/pandas
[ "bc29dfb5bf4c82f8d616857c2316fc8f17d8f2a5" ]
[ "pandas/tests/window/test_rolling.py" ]
[ "from datetime import datetime, timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import UnsupportedFunctionCall\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n MultiIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.window import Rolling\n\n\ndef test_doc_string():\n\n df = DataFrame({\"B\": [0, 1, 2, np.nan, 4]})\n df\n df.rolling(2).sum()\n df.rolling(2, min_periods=1).sum()\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n\n c = frame_or_series(range(5)).rolling\n\n # valid\n c(0)\n c(window=2)\n c(window=2, min_periods=1)\n c(window=2, min_periods=1, center=True)\n c(window=2, min_periods=1, center=False)\n\n # GH 13383\n\n msg = \"window must be an integer 0 or greater\"\n\n with pytest.raises(ValueError, match=msg):\n c(-1)\n\n\[email protected](\"w\", [2.0, \"foo\", np.array([2])])\ndef test_invalid_constructor(frame_or_series, w):\n # not valid\n\n c = frame_or_series(range(5)).rolling\n\n msg = (\n \"window must be an integer|\"\n \"passed window foo is not compatible with a datetimelike index\"\n )\n with pytest.raises(ValueError, match=msg):\n c(window=w)\n\n msg = \"min_periods must be an integer\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=w)\n\n msg = \"center must be a boolean\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=1, center=w)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3)])\ndef test_constructor_with_timedelta_window(window):\n # GH 15440\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)}, index=date_range(\"2015-12-24\", periods=n, freq=\"D\")\n )\n expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))\n\n result = df.rolling(window=window).sum()\n expected = DataFrame(\n {\"value\": expected_data},\n index=date_range(\"2015-12-24\", periods=n, freq=\"D\"),\n )\n tm.assert_frame_equal(result, expected)\n expected = df.rolling(\"3D\").sum()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3), \"3D\"])\ndef test_constructor_timedelta_window_and_minperiods(window, raw):\n # GH 15305\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)}, index=date_range(\"2017-08-08\", periods=n, freq=\"D\")\n )\n expected = DataFrame(\n {\"value\": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},\n index=date_range(\"2017-08-08\", periods=n, freq=\"D\"),\n )\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\n result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)\n tm.assert_frame_equal(result_roll_sum, expected)\n tm.assert_frame_equal(result_roll_generic, expected)\n\n\[email protected](\"method\", [\"std\", \"mean\", \"sum\", \"max\", \"min\", \"var\"])\ndef test_numpy_compat(method):\n # see gh-12811\n r = Rolling(Series([2, 4, 6]), window=2)\n\n msg = \"numpy operations are not valid with window objects\"\n\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(1, 2, 3)\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(dtype=np.float64)\n\n\ndef test_closed_fixed(closed, arithmetic_win_operators):\n # GH 34315\n func_name = arithmetic_win_operators\n df_fixed = DataFrame({\"A\": [0, 1, 2, 3, 4]})\n df_time = DataFrame({\"A\": [0, 1, 2, 3, 4]}, index=date_range(\"2020\", periods=5))\n\n result = getattr(df_fixed.rolling(2, closed=closed, min_periods=1), func_name)()\n expected = getattr(df_time.rolling(\"2D\", closed=closed), func_name)().reset_index(\n drop=True\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_closed_fixed_binary_col():\n # GH 34315\n data = [0, 1, 1, 0, 0, 1, 0, 1]\n df = DataFrame(\n {\"binary_col\": data},\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(data)),\n )\n\n rolling = df.rolling(window=len(df), closed=\"left\", min_periods=1)\n result = rolling.mean()\n expected = DataFrame(\n [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571],\n columns=[\"binary_col\"],\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(data)),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"closed\", [\"neither\", \"left\"])\ndef test_closed_empty(closed, arithmetic_win_operators):\n # GH 26005\n func_name = arithmetic_win_operators\n ser = Series(data=np.arange(5), index=date_range(\"2000\", periods=5, freq=\"2D\"))\n roll = ser.rolling(\"1D\", closed=closed)\n\n result = getattr(roll, func_name)()\n expected = Series([np.nan] * 5, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry(func):\n # GH24718\n ser = Series(data=[2], index=date_range(\"2000\", periods=1))\n result = getattr(ser.rolling(\"10D\", closed=\"left\"), func)()\n tm.assert_series_equal(result, Series([np.nan], index=ser.index))\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry_groupby(func):\n # GH24718\n ser = DataFrame(\n data={\"A\": [1, 1, 2], \"B\": [3, 2, 1]}, index=date_range(\"2000\", periods=3)\n )\n result = getattr(\n ser.groupby(\"A\", sort=False)[\"B\"].rolling(\"10D\", closed=\"left\"), func\n )()\n exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=(\"A\", None))\n expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"input_dtype\", [\"int\", \"float\"])\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"both\", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"min\", \"neither\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"left\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", \"right\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"both\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"neither\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"max\", \"left\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n ],\n)\ndef test_closed_min_max_datetime(input_dtype, func, closed, expected):\n # see gh-21704\n ser = Series(\n data=np.arange(10).astype(input_dtype), index=date_range(\"2000\", periods=10)\n )\n\n result = getattr(ser.rolling(\"3D\", closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_uneven():\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n\n # uneven\n ser = ser.drop(index=ser.index[[1, 5]])\n result = ser.rolling(\"3D\", closed=\"left\").min()\n expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"both\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"min\", \"neither\", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"left\", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"max\", \"right\", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),\n (\"max\", \"both\", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),\n (\"max\", \"neither\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),\n (\"max\", \"left\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),\n ],\n)\ndef test_closed_min_max_minp(func, closed, expected):\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n ser[ser.index[-3:]] = np.nan\n result = getattr(ser.rolling(\"3D\", min_periods=2, closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"closed,expected\",\n [\n (\"right\", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"both\", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"neither\", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"left\", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),\n ],\n)\ndef test_closed_median_quantile(closed, expected):\n # GH 26005\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n roll = ser.rolling(\"3D\", closed=closed)\n expected = Series(expected, index=ser.index)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.5)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"roller\", [\"1s\", 1])\ndef tests_empty_df_rolling(roller):\n # GH 15819 Verifies that datetime and integer rolling windows can be\n # applied to empty DataFrames\n expected = DataFrame()\n result = DataFrame().rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer rolling windows can be applied to\n # empty DataFrames with datetime index\n expected = DataFrame(index=DatetimeIndex([]))\n result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_window_median_quantile():\n # GH 26005\n expected = Series([np.nan, np.nan, np.nan])\n roll = Series(np.arange(3)).rolling(0)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero():\n # https://github.com/pandas-dev/pandas/pull/18921\n # minp=0\n x = Series([np.nan])\n result = x.rolling(1, min_periods=0).sum()\n expected = Series([0.0])\n tm.assert_series_equal(result, expected)\n\n # minp=1\n result = x.rolling(1, min_periods=1).sum()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero_variable():\n # https://github.com/pandas-dev/pandas/pull/18921\n x = Series(\n [np.nan] * 4,\n index=DatetimeIndex([\"2017-01-01\", \"2017-01-04\", \"2017-01-06\", \"2017-01-07\"]),\n )\n result = x.rolling(Timedelta(\"2d\"), min_periods=0).sum()\n expected = Series(0.0, index=x.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_multi_index_names():\n\n # GH 16789, 16825\n cols = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\", \"E\"]], names=[\"1\", \"2\"])\n df = DataFrame(np.ones((10, 6)), columns=cols)\n result = df.rolling(3).cov()\n\n tm.assert_index_equal(result.columns, df.columns)\n assert result.index.names == [None, \"1\", \"2\"]\n\n\ndef test_rolling_axis_sum(axis_frame):\n # see gh-23372.\n df = DataFrame(np.ones((10, 20)))\n axis = df._get_axis_number(axis_frame)\n\n if axis == 0:\n expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})\n else:\n # axis == 1\n expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)\n\n result = df.rolling(3, axis=axis_frame).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_count(axis_frame):\n # see gh-26055\n df = DataFrame({\"x\": range(3), \"y\": range(3)})\n\n axis = df._get_axis_number(axis_frame)\n\n if axis in [0, \"index\"]:\n expected = DataFrame({\"x\": [1.0, 2.0, 2.0], \"y\": [1.0, 2.0, 2.0]})\n else:\n expected = DataFrame({\"x\": [1.0, 1.0, 1.0], \"y\": [2.0, 2.0, 2.0]})\n\n result = df.rolling(2, axis=axis_frame, min_periods=0).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_readonly_array():\n # GH-27766\n arr = np.array([1, 3, np.nan, 3, 5])\n arr.setflags(write=False)\n result = Series(arr).rolling(2).mean()\n expected = Series([np.nan, 2, np.nan, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_datetime(axis_frame, tz_naive_fixture):\n # GH-28192\n tz = tz_naive_fixture\n df = DataFrame(\n {i: [1] * 2 for i in date_range(\"2019-8-01\", \"2019-08-03\", freq=\"D\", tz=tz)}\n )\n if axis_frame in [0, \"index\"]:\n result = df.T.rolling(\"2D\", axis=axis_frame).sum().T\n else:\n result = df.rolling(\"2D\", axis=axis_frame).sum()\n expected = DataFrame(\n {\n **{\n i: [1.0] * 2\n for i in date_range(\"2019-8-01\", periods=1, freq=\"D\", tz=tz)\n },\n **{\n i: [2.0] * 2\n for i in date_range(\"2019-8-02\", \"2019-8-03\", freq=\"D\", tz=tz)\n },\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_window_as_string():\n # see gh-22590\n date_today = datetime.now()\n days = date_range(date_today, date_today + timedelta(365), freq=\"D\")\n\n npr = np.random.RandomState(seed=421)\n\n data = npr.randint(1, high=100, size=len(days))\n df = DataFrame({\"DateCol\": days, \"metric\": data})\n\n df.set_index(\"DateCol\", inplace=True)\n result = df.rolling(window=\"21D\", min_periods=2, closed=\"left\")[\"metric\"].agg(\"max\")\n\n expData = (\n [np.nan] * 2\n + [88.0] * 16\n + [97.0] * 9\n + [98.0]\n + [99.0] * 21\n + [95.0] * 16\n + [93.0] * 5\n + [89.0] * 5\n + [96.0] * 21\n + [94.0] * 14\n + [90.0] * 13\n + [88.0] * 2\n + [90.0] * 9\n + [96.0] * 21\n + [95.0] * 6\n + [91.0]\n + [87.0] * 6\n + [92.0] * 21\n + [83.0] * 2\n + [86.0] * 10\n + [87.0] * 5\n + [98.0] * 21\n + [97.0] * 14\n + [93.0] * 7\n + [87.0] * 4\n + [86.0] * 4\n + [95.0] * 21\n + [85.0] * 14\n + [83.0] * 2\n + [76.0] * 5\n + [81.0] * 2\n + [98.0] * 21\n + [95.0] * 14\n + [91.0] * 7\n + [86.0]\n + [93.0] * 3\n + [95.0] * 20\n )\n\n expected = Series(\n expData, index=days.rename(\"DateCol\")._with_freq(None), name=\"metric\"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_min_periods1():\n # GH#6795\n df = DataFrame([0, 1, 2, 1, 0], columns=[\"a\"])\n result = df[\"a\"].rolling(3, center=True, min_periods=1).max()\n expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_count_with_min_periods(frame_or_series):\n # GH 26996\n result = frame_or_series(range(5)).rolling(3, min_periods=3).count()\n expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0])\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_count_default_min_periods_with_null_values(frame_or_series):\n # GH 26996\n values = [1, 2, 3, np.nan, 4, 5, 6]\n expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]\n\n # GH 31302\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = frame_or_series(values).rolling(3).count()\n expected = frame_or_series(expected_counts)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"df,expected,window,min_periods\",\n [\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n None,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 2,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 0,\n ),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, None),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, 1),\n (DataFrame(), [({}, [])], 2, None),\n (\n DataFrame({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}),\n [\n ({\"A\": [1.0], \"B\": [np.nan]}, [0]),\n ({\"A\": [1, np.nan], \"B\": [np.nan, 5]}, [0, 1]),\n ({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n 2,\n ),\n ],\n)\ndef test_iter_rolling_dataframe(df, expected, window, min_periods):\n # GH 11704\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, df.rolling(window, min_periods=min_periods)\n ):\n tm.assert_frame_equal(actual, expected)\n\n\[email protected](\n \"expected,window\",\n [\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n \"2D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n \"3D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n \"1D\",\n ),\n ],\n)\ndef test_iter_rolling_on_dataframe(expected, window):\n # GH 11704\n df = DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5],\n \"B\": [4, 5, 6, 7, 8],\n \"C\": date_range(start=\"2016-01-01\", periods=5, freq=\"D\"),\n }\n )\n\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n for (expected, actual) in zip(expected, df.rolling(window, on=\"C\")):\n tm.assert_frame_equal(actual, expected)\n\n\[email protected](\n \"ser,expected,window, min_periods\",\n [\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n None,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n 1,\n ),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], 2, 1),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], 2, 2),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1),\n (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0),\n (Series([], dtype=\"int64\"), [], 2, 1),\n ],\n)\ndef test_iter_rolling_series(ser, expected, window, min_periods):\n # GH 11704\n expected = [Series(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, ser.rolling(window, min_periods=min_periods)\n ):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"expected,expected_index,window\",\n [\n (\n [[0], [1], [2], [3], [4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-02\", periods=1, freq=\"D\"),\n date_range(\"2020-01-03\", periods=1, freq=\"D\"),\n date_range(\"2020-01-04\", periods=1, freq=\"D\"),\n date_range(\"2020-01-05\", periods=1, freq=\"D\"),\n ],\n \"1D\",\n ),\n (\n [[0], [0, 1], [1, 2], [2, 3], [3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-02\", periods=2, freq=\"D\"),\n date_range(\"2020-01-03\", periods=2, freq=\"D\"),\n date_range(\"2020-01-04\", periods=2, freq=\"D\"),\n ],\n \"2D\",\n ),\n (\n [[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-01\", periods=3, freq=\"D\"),\n date_range(\"2020-01-02\", periods=3, freq=\"D\"),\n date_range(\"2020-01-03\", periods=3, freq=\"D\"),\n ],\n \"3D\",\n ),\n ],\n)\ndef test_iter_rolling_datetime(expected, expected_index, window):\n # GH 11704\n ser = Series(range(5), index=date_range(start=\"2020-01-01\", periods=5, freq=\"D\"))\n\n expected = [\n Series(values, index=idx) for (values, idx) in zip(expected, expected_index)\n ]\n\n for (expected, actual) in zip(expected, ser.rolling(window)):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"grouping,_index\",\n [\n (\n {\"level\": 0},\n MultiIndex.from_tuples(\n [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]\n ),\n ),\n (\n {\"by\": \"X\"},\n MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=[\"X\", None]\n ),\n ),\n ],\n)\ndef test_rolling_positional_argument(grouping, _index, raw):\n # GH 34605\n\n def scaled_sum(*args):\n if len(args) < 2:\n raise ValueError(\"The function needs two arguments\")\n array, scale = args\n return array.sum() / scale\n\n df = DataFrame(data={\"X\": range(5)}, index=[0, 0, 1, 1, 1])\n\n expected = DataFrame(data={\"X\": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index)\n result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"add\", [0.0, 2.0])\ndef test_rolling_numerical_accuracy_kahan_mean(add):\n # GH: 36031 implementing kahan summation\n df = DataFrame(\n {\"A\": [3002399751580331.0 + add, -0.0, -0.0]},\n index=[\n Timestamp(\"19700101 09:00:00\"),\n Timestamp(\"19700101 09:00:03\"),\n Timestamp(\"19700101 09:00:06\"),\n ],\n )\n result = (\n df.resample(\"1s\").ffill().rolling(\"3s\", closed=\"left\", min_periods=3).mean()\n )\n dates = date_range(\"19700101 09:00:00\", periods=7, freq=\"S\")\n expected = DataFrame(\n {\n \"A\": [\n np.nan,\n np.nan,\n np.nan,\n 3002399751580330.5,\n 2001599834386887.25,\n 1000799917193443.625,\n 0.0,\n ]\n },\n index=dates,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_kahan_sum():\n # GH: 13254\n df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=[\"x\"])\n result = df[\"x\"].rolling(3).sum()\n expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name=\"x\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_jump():\n # GH: 32761\n index = date_range(start=\"2020-01-01\", end=\"2020-01-02\", freq=\"60s\").append(\n DatetimeIndex([\"2020-01-03\"])\n )\n data = np.random.rand(len(index))\n\n df = DataFrame({\"data\": data}, index=index)\n result = df.rolling(\"60s\").mean()\n tm.assert_frame_equal(result, df[[\"data\"]])\n\n\ndef test_rolling_numerical_accuracy_small_values():\n # GH: 10319\n s = Series(\n data=[0.00012456, 0.0003, -0.0, -0.0],\n index=date_range(\"1999-02-03\", \"1999-02-06\"),\n )\n result = s.rolling(1).mean()\n tm.assert_series_equal(result, s)\n\n\ndef test_rolling_numerical_too_large_numbers():\n # GH: 11645\n dates = date_range(\"2015-01-01\", periods=10, freq=\"D\")\n ds = Series(data=range(10), index=dates, dtype=np.float64)\n ds[2] = -9e33\n result = ds.rolling(5).mean()\n expected = Series(\n [np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 5.0, 6.0, 7.0],\n index=dates,\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"func\", \"value\"),\n [(\"sum\", 2.0), (\"max\", 1.0), (\"min\", 1.0), (\"mean\", 1.0), (\"median\", 1.0)],\n)\ndef test_rolling_mixed_dtypes_axis_1(func, value):\n # GH: 20649\n df = DataFrame(1, index=[1, 2], columns=[\"a\", \"b\", \"c\"])\n df[\"c\"] = 1.0\n result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)()\n expected = DataFrame(\n {\"a\": [1.0, 1.0], \"b\": [value, value], \"c\": [value, value]}, index=[1, 2]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_one_with_nan():\n # GH: 35596\n df = DataFrame(\n [\n [0, 1, 2, 4, np.nan, np.nan, np.nan],\n [0, 1, 2, np.nan, np.nan, np.nan, np.nan],\n [0, 2, 2, np.nan, 2, np.nan, 1],\n ]\n )\n result = df.rolling(window=7, min_periods=1, axis=\"columns\").sum()\n expected = DataFrame(\n [\n [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],\n [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],\n [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],\n ]\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"value\",\n [\"test\", to_datetime(\"2019-12-31\"), to_timedelta(\"1 days 06:05:01.00003\")],\n)\ndef test_rolling_axis_1_non_numeric_dtypes(value):\n # GH: 20649\n df = DataFrame({\"a\": [1, 2]})\n df[\"b\"] = value\n result = df.rolling(window=2, min_periods=1, axis=1).sum()\n expected = DataFrame({\"a\": [1.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_on_df_transposed():\n # GH: 32724\n df = DataFrame({\"A\": [1, None], \"B\": [4, 5], \"C\": [7, 8]})\n expected = DataFrame({\"A\": [1.0, np.nan], \"B\": [5.0, 5.0], \"C\": [11.0, 13.0]})\n result = df.rolling(min_periods=1, window=2, axis=1).sum()\n tm.assert_frame_equal(result, expected)\n\n result = df.T.rolling(min_periods=1, window=2).sum().T\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 08:08\", freq=\"T\"),\n \"2T\",\n ),\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 12:00\", freq=\"30T\"),\n \"1h\",\n ),\n ],\n)\[email protected](\n (\"func\", \"values\"),\n [\n (\"min\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"sum\", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),\n ],\n)\ndef test_rolling_period_index(index, window, func, values):\n # GH: 34225\n ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)\n result = getattr(ds.rolling(window, closed=\"left\"), func)()\n expected = Series(values, index=index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_sem(frame_or_series):\n # GH: 26476\n obj = frame_or_series([0, 1, 2])\n result = obj.rolling(2, min_periods=1).sem()\n if isinstance(result, DataFrame):\n result = Series(result[0].values)\n expected = Series([np.nan] + [0.707107] * 2)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"func\", \"third_value\", \"values\"),\n [\n (\"var\", 1, [5e33, 0, 0.5, 0.5, 2, 0]),\n (\"std\", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]),\n (\"var\", 2, [5e33, 0.5, 0, 0.5, 2, 0]),\n (\"std\", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]),\n ],\n)\ndef test_rolling_var_numerical_issues(func, third_value, values):\n # GH: 37051\n ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])\n result = getattr(ds.rolling(2), func)()\n expected = Series([np.nan] + values)\n tm.assert_series_equal(result, expected)\n\n\ndef test_timeoffset_as_window_parameter_for_corr():\n # GH: 28266\n exp = DataFrame(\n {\n \"B\": [\n np.nan,\n np.nan,\n 0.9999999999999998,\n -1.0,\n 1.0,\n -0.3273268353539892,\n 0.9999999999999998,\n 1.0,\n 0.9999999999999998,\n 1.0,\n ],\n \"A\": [\n np.nan,\n np.nan,\n -1.0,\n 1.0000000000000002,\n -0.3273268353539892,\n 0.9999999999999966,\n 1.0,\n 1.0000000000000002,\n 1.0,\n 1.0000000000000002,\n ],\n },\n index=MultiIndex.from_tuples(\n [\n (Timestamp(\"20130101 09:00:00\"), \"B\"),\n (Timestamp(\"20130101 09:00:00\"), \"A\"),\n (Timestamp(\"20130102 09:00:02\"), \"B\"),\n (Timestamp(\"20130102 09:00:02\"), \"A\"),\n (Timestamp(\"20130103 09:00:03\"), \"B\"),\n (Timestamp(\"20130103 09:00:03\"), \"A\"),\n (Timestamp(\"20130105 09:00:05\"), \"B\"),\n (Timestamp(\"20130105 09:00:05\"), \"A\"),\n (Timestamp(\"20130106 09:00:06\"), \"B\"),\n (Timestamp(\"20130106 09:00:06\"), \"A\"),\n ]\n ),\n )\n\n df = DataFrame(\n {\"B\": [0, 1, 2, 4, 3], \"A\": [7, 4, 6, 9, 3]},\n index=[\n Timestamp(\"20130101 09:00:00\"),\n Timestamp(\"20130102 09:00:02\"),\n Timestamp(\"20130103 09:00:03\"),\n Timestamp(\"20130105 09:00:05\"),\n Timestamp(\"20130106 09:00:06\"),\n ],\n )\n\n res = df.rolling(window=\"3d\").corr()\n\n tm.assert_frame_equal(exp, res)\n\n\[email protected](\"method\", [\"var\", \"sum\", \"mean\", \"skew\", \"kurt\", \"min\", \"max\"])\ndef test_rolling_decreasing_indices(method):\n \"\"\"\n Make sure that decreasing indices give the same results as increasing indices.\n\n GH 36933\n \"\"\"\n df = DataFrame({\"values\": np.arange(-15, 10) ** 2})\n df_reverse = DataFrame({\"values\": df[\"values\"][::-1]}, index=df.index[::-1])\n\n increasing = getattr(df.rolling(window=5), method)()\n decreasing = getattr(df_reverse.rolling(window=5), method)()\n\n assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12\n\n\[email protected](\n \"method,expected\",\n [\n (\n \"var\",\n [\n float(\"nan\"),\n 43.0,\n float(\"nan\"),\n 136.333333,\n 43.5,\n 94.966667,\n 182.0,\n 318.0,\n ],\n ),\n (\"mean\", [float(\"nan\"), 7.5, float(\"nan\"), 21.5, 6.0, 9.166667, 13.0, 17.5]),\n (\"sum\", [float(\"nan\"), 30.0, float(\"nan\"), 86.0, 30.0, 55.0, 91.0, 140.0]),\n (\n \"skew\",\n [\n float(\"nan\"),\n 0.709296,\n float(\"nan\"),\n 0.407073,\n 0.984656,\n 0.919184,\n 0.874674,\n 0.842418,\n ],\n ),\n (\n \"kurt\",\n [\n float(\"nan\"),\n -0.5916711736073559,\n float(\"nan\"),\n -1.0028993131317954,\n -0.06103844629409494,\n -0.254143227116194,\n -0.37362637362637585,\n -0.45439658241367054,\n ],\n ),\n ],\n)\ndef test_rolling_non_monotonic(method, expected):\n \"\"\"\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n \"\"\"\n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = i + 1\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=4, use_expanding=use_expanding)\n\n result = getattr(df.rolling(indexer), method)()\n expected = DataFrame({\"values\": expected})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [([0, 1, 2, 3, 4], 2), (date_range(\"2001-01-01\", freq=\"D\", periods=5), \"2D\")],\n)\ndef test_rolling_corr_timedelta_index(index, window):\n # GH: 31286\n x = Series([1, 2, 3, 4, 5], index=index)\n y = x.copy()\n x[0:2] = 0.0\n result = x.rolling(window).corr(y)\n expected = Series([np.nan, np.nan, 1, 1, 1], index=index)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_groupby_rolling_nan_included():\n # GH 35542\n data = {\"group\": [\"g1\", np.nan, \"g1\", \"g2\", np.nan], \"B\": [0, 1, 2, 3, 4]}\n df = DataFrame(data)\n result = df.groupby(\"group\", dropna=False).rolling(1, min_periods=1).mean()\n expected = DataFrame(\n {\"B\": [0.0, 2.0, 3.0, 1.0, 4.0]},\n # GH-38057 from_tuples puts the NaNs in the codes, result expects them\n # to be in the levels, at the moment\n # index=MultiIndex.from_tuples(\n # [(\"g1\", 0), (\"g1\", 2), (\"g2\", 3), (np.nan, 1), (np.nan, 4)],\n # names=[\"group\", None],\n # ),\n index=MultiIndex(\n [[\"g1\", \"g2\", np.nan], [0, 1, 2, 3, 4]],\n [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],\n names=[\"group\", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"method\", [\"skew\", \"kurt\"])\ndef test_rolling_skew_kurt_numerical_stability(method):\n # GH: 6929\n s = Series(np.random.rand(10))\n expected = getattr(s.rolling(3), method)()\n s = s + 50000\n result = getattr(s.rolling(3), method)()\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"method\", \"values\"),\n [\n (\"skew\", [2.0, 0.854563, 0.0, 1.999984]),\n (\"kurt\", [4.0, -1.289256, -1.2, 3.999946]),\n ],\n)\ndef test_rolling_skew_kurt_large_value_range(method, values):\n # GH: 37557\n s = Series([3000000, 1, 1, 2, 3, 4, 999])\n result = getattr(s.rolling(4), method)()\n expected = Series([np.nan] * 3 + values)\n tm.assert_series_equal(result, expected)\n" ]
[ [ "pandas._testing.assert_almost_equal", "pandas.to_datetime", "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas._testing.assert_frame_equal", "numpy.arange", "pandas.DatetimeIndex", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._testing.assert_produces_warning", "pandas.MultiIndex", "pandas.Timedelta", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "numpy.array", "numpy.random.RandomState", "pandas._testing.assert_equal", "numpy.abs", "pandas.period_range", "pandas.MultiIndex.from_arrays", "numpy.ones", "pandas.to_timedelta", "pandas.Timestamp", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tyler-e-marshall/prymetime
[ "c1daa783c8091adbc5900a51d98522b1269d0107" ]
[ "PRYMETIME/nucmer4.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTitle: Sending Contigs to Nucmer\nCreated on Tue Aug 13 2019\n\n@author: Eric\n@email: [email protected]\n\"\"\"\nimport glob, os\nimport pandas as pd\nfrom Bio import SeqIO\nfrom pymummer import nucmer\nfrom pathlib import Path\n\npath_to_file = \"pilon.fasta\"\npath = Path(path_to_file)\n\nshort_contigs = []\ncontigs = []\n\nif path.is_file():\n\n for x in SeqIO.parse(open(\"pilon.fasta\"),'fasta'):\n\n if len(x.seq) < 50000:\n short_contigs.append(x)\n SeqIO.write(x, \"%(x)s.fasta\" % {'x':x.id}, 'fasta')\n\n else:\n contigs.append(x)\n #print(\"long\", x.id)\n\n for pathname in glob.glob(\"*.fasta\"):\n basename = os.path.basename(pathname)\n\n for x in short_contigs:\n\n if x.id in basename :\n runner = nucmer.Runner(basename, basename, \"%(x)s_out.coords\" % {'x':x.id},\n maxmatch=True, simplify=False, mincluster=2000, min_id=99, min_length=2000, coords_header=True)\n\n runner.run()\n\n# The below lines are for saving fasta files of the contigs if desired\n#SeqIO.write(short_contigs , \"short_contigs.fasta\", \"fasta\")\n#SeqIO.write(lin_contigs , \"lin_contigs.fasta\", \"fasta\")\n\n# The below lines are for visually checking which files are repetitive or not\n'''\nfor pathname in glob.glob(\"*.coords\"):\n\n basename = os.path.basename(pathname)\n name = basename.split(\".\")\n\n df = pd.read_csv(basename)\n\n print(df)\n\n if len(df.index) > 1 :\n\n print(name[0], \"morethan 1\")\n'''\n\ncir_path = \"cir_contigs.fasta\"\npath_cir = Path(cir_path)\n\nif path_cir.is_file():\n\n cir_rep_contigs = [x for x in SeqIO.parse(open(\"cir_contigs.fasta\"), 'fasta')]\n\n for x in short_contigs:\n if len(pd.read_csv(\"%(x)s_out.coords\" % {'x': x.id}).index) > 4 :\n cir_rep_contigs.append(x)\n else:\n #print(x.id)\n contigs.append(x)\n\n for x in cir_rep_contigs :\n SeqIO.write(cir_rep_contigs, \"cir_rep_contigs.fasta\", \"fasta\")\n\nSeqIO.write(contigs, \"polished_contigs.fasta\", \"fasta\")\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pfistfl/openml-defaults
[ "0678167f807512bd0c957f82a83ff8181461090c", "0678167f807512bd0c957f82a83ff8181461090c", "0678167f807512bd0c957f82a83ff8181461090c" ]
[ "examples/legacy/evaluate_defaults_live.py", "openmldefaults/utils/io.py", "tests/test_pareto.py" ]
[ "import argparse\nimport copy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport openmldefaults\nimport os\nimport pandas as pd\n\n\n# sshfs [email protected]:/rigel/home/jv2657/experiments ~/habanero_experiments\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_path', type=str,\n default=os.path.expanduser('~') + '/data/openml-defaults/surrogate__adaboost__predictive_accuracy__c8.arff')\n parser.add_argument('--resized_grid_size', type=int, default=8)\n parser.add_argument('--input_file', type=str, default=os.path.expanduser('~') + '/habanero_experiments/openml-defaults/20180826/surrogate__adaboost__predictive_accuracy__c8.arff/live_random_search/results.csv')\n return parser.parse_args()\n\n\ndef plot(df, y_label, output_file):\n sns_plot = sns.boxplot(x='n_defaults', y='evaluation', hue='strategy_type', data=df, palette=\"Set3\")\n fig = sns_plot.get_figure()\n fig.savefig(output_file)\n plt.clf()\n print(openmldefaults.utils.get_time(), 'saved to', output_file)\n\n\ndef count_results(df):\n print(df.groupby([\"strategy_type\", \"n_defaults\"]).agg(\"count\"))\n\n\ndef normalize_scores(df, task_minscore, task_maxscore):\n def normalize(row):\n eval = row['evaluation']\n min_score = task_minscore[row['task_id']]\n max_score = task_maxscore[row['task_id']]\n if min_score != max_score:\n return (eval - min_score) / (max_score - min_score)\n else:\n return min_score\n\n df = copy.deepcopy(df)\n df['evaluation'] = df.apply(lambda row: normalize(row), axis=1)\n return df\n\n\ndef run():\n args = parse_args()\n if not os.path.isfile(args.input_file):\n raise ValueError('Could not locate input file: %s' % args.input_file)\n\n dataset_name = os.path.basename(args.dataset_path)\n output_dir = os.path.dirname(args.input_file)\n df = pd.read_csv(filepath_or_buffer=args.input_file, sep=',')\n meta_data = openmldefaults.utils.get_dataset_metadata(args.dataset_path)\n\n df['strategy_type'] = df['strategy_name'].apply(lambda x: x.split('__')[0])\n df['n_defaults'] = df['strategy_name'].apply(lambda x: int(x.split('__')[1]))\n df = df.groupby(['strategy_name', 'task_id', 'strategy_type', 'n_defaults']).mean().reset_index()\n # removed unnamed columns\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\n\n df = df.loc[df['configuration_specification'] == args.resized_grid_size]\n\n # print statistics\n count_results(df)\n\n # normalize\n task_minscores = dict()\n task_maxscores = dict()\n for task_id in getattr(df, 'task_id').unique():\n df_task = df.loc[df['task_id'] == task_id]\n task_min = df_task.evaluation.min()\n task_max = df_task.evaluation.max()\n\n task_minscores[task_id] = task_min\n task_maxscores[task_id] = task_max\n\n outputfile_vanilla = os.path.join(output_dir, \"%s_live.png\" % dataset_name)\n plot(df, meta_data['scoring'], outputfile_vanilla)\n\n df_normalized = normalize_scores(df, task_minscores, task_maxscores)\n outputfile_normalized = os.path.join(output_dir, \"%s_live__normalized.png\" % dataset_name)\n plot(df_normalized, meta_data['scoring'], outputfile_normalized)\n\n\nif __name__ == '__main__':\n run()\n", "import arff\nimport ConfigSpace\nimport json\nimport numpy as np\nimport openmldefaults\nimport pandas as pd\n\nfrom typing import List\n\n\ndef get_setup_dirname(resized_grid_size, num_defaults):\n return 'c%d_d%d' % (resized_grid_size, num_defaults)\n\n\ndef print_columns(df, params):\n for param in params:\n unique = np.array(df[param].unique())\n print(openmldefaults.utils.get_time(), '%s (%s) unique values: %s (%d)' % (param, df[param].dtype, unique,\n len(unique)))\n\n\ndef get_meta_data_config_space(meta_data):\n cs_fn = getattr(openmldefaults.config_spaces,\n 'get_%s_%s_search_space' % (meta_data['classifier'], meta_data['config_space']))\n return cs_fn()\n\n\ndef get_component_mapping(config_space: ConfigSpace.ConfigurationSpace):\n \"\"\"\n Each hyperparameter has both a name and a meta-field, containing an component prefix.\n This function returns a mapping from the concatenated component prefix and hyperparameter\n name to the hyperparameter name (by which it can be obtained from the config space)\n \"\"\"\n result = dict()\n for param in config_space.get_hyperparameters():\n component_name = param.meta['component'] + '__' + param.name\n result[component_name] = param.name\n return result\n\n\ndef cast_columns_of_dataframe(df: pd.DataFrame, params: List, config_space: ConfigSpace.ConfigurationSpace):\n for param in params:\n hyperparameter = config_space.get_hyperparameter(param)\n\n if isinstance(hyperparameter, ConfigSpace.UniformIntegerHyperparameter) or \\\n (isinstance(hyperparameter, ConfigSpace.Constant) and isinstance(hyperparameter.value, int)) or \\\n (isinstance(hyperparameter, ConfigSpace.UnParametrizedHyperparameter) and isinstance(hyperparameter.value, int)):\n # limitation of pandas: can't mix nan and integer\n df[param] = df[param].dropna().apply(lambda x: str(int(x)))\n return df\n\n\ndef get_dataset_metadata(dataset_path):\n with open(dataset_path) as fp:\n first_line = fp.readline()\n if first_line[0] != '%':\n raise ValueError('arff data file should start with comment for meta-data')\n meta_data = json.loads(first_line[1:])\n return meta_data\n\n\ndef load_dataset(dataset_path, params, resized_grid_size, flip_performances, condition_on=None):\n if dataset_path.endswith('.feather'):\n import feather\n df = feather.read_dataframe(dataset_path)\n elif dataset_path.endswith('.arff'):\n with open(dataset_path, 'r') as fp:\n dataset = arff.load(fp)\n # see if there is meta_data\n fp.seek(0)\n try:\n first_line = fp.readline()\n meta_data = json.loads(first_line[1:])\n except json.decoder.JSONDecodeError:\n meta_data = None\n columns = [column_name for column_name, colum_type in dataset['attributes']]\n df = pd.DataFrame(data=dataset['data'], columns=columns)\n if meta_data is not None:\n config_space = get_meta_data_config_space(meta_data)\n df = cast_columns_of_dataframe(df, params, config_space)\n else:\n raise ValueError()\n print(openmldefaults.utils.get_time(), 'Original data frame dimensions:', df.shape)\n\n for param in params:\n if param not in df.columns.values:\n raise ValueError('Param column not found. Columns %s, illegal: %s' % (df.columns.values, param))\n\n if resized_grid_size is not None:\n df = openmldefaults.utils.reshape_configs(df, params, resized_grid_size)\n\n print_columns(df, params)\n\n # remove values that are not according to the condition\n if condition_on is not None:\n for column, value in condition_on.items():\n df = df.loc[df[column] == value]\n\n # always set the index\n df = df.set_index(params)\n\n num_obs, num_tasks = df.shape\n if flip_performances:\n for i in range(num_obs):\n for j in range(num_tasks):\n df.iloc[i, j] = -1 * df.iloc[i, j]\n\n return df\n", "import openmldefaults\nimport pandas as pd\nimport unittest\n\n\nclass TestParetoFunctions(unittest.TestCase):\n\n def test_simple_cull(self):\n frame = pd.DataFrame(data=[[0.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 2.0, 3.0],\n [2.0, 3.0, 2.0, 1.0],\n [3.0, 4.0, 1.0, 1.0]],\n columns=['idx', 'task 1', 'task 2', 'task 3'],\n dtype=float).set_index('idx')\n\n pareto_points, dominated_points = openmldefaults.utils.simple_cull(frame, openmldefaults.utils.dominates)\n reconstructed = pareto_points.append(dominated_points).sort_index().astype(float)\n pd.testing.assert_frame_equal(frame, reconstructed)\n\n def test_simple_cull_multi_level_index(self):\n frame = pd.DataFrame(data=[[0.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 2.0, 3.0],\n [1.0, 0.0, 3.0, 2.0, 1.0],\n [1.0, 1.0, 4.0, 1.0, 1.0]],\n columns=['idx0', 'idx1', 'task 1', 'task 2', 'task 3'],\n dtype=float).set_index(['idx0', 'idx1'])\n\n pareto_points, dominated_points = openmldefaults.utils.simple_cull(frame, openmldefaults.utils.dominates)\n reconstructed = pareto_points.append(dominated_points).sort_index().astype(float)\n pd.testing.assert_frame_equal(frame, reconstructed)\n" ]
[ [ "matplotlib.pyplot.clf", "pandas.read_csv" ], [ "pandas.DataFrame" ], [ "pandas.testing.assert_frame_equal", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
TileDB-Inc/TileDB-CLI
[ "e18e148fe5c6044b87d28595f5370eecac0b3c8f" ]
[ "tiledb_cli/tests/test_convert_from.py" ]
[ "import tiledb\nfrom tiledb_cli.root import root\nfrom tiledb_cli.convert_from import parse_kwargs\n\nfrom click.testing import CliRunner\nimport os\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n\[email protected](autouse=True, scope=\"session\")\ndef create_test_simple_csv(temp_rootdir):\n \"\"\"\n Create a simple dense test array.\n \"\"\"\n path = os.path.abspath(os.path.join(temp_rootdir, \"simple.csv\"))\n\n with open(path, mode=\"w\") as csv_input:\n csv_input.write(\n (\n \"a,b,c,date\\n\"\n '1,\"text\",3.4,Mar/02/2021\\n'\n '2,\"hello\",1.234,Apr/07/2021\\n'\n '3,\"goodbye\",111.232,Dec/17/2021\\n'\n '4,\"world\",123123.12,Jul/21/2021\\n'\n '10,\"raisins\",14.232,Nov/09/2021\\n'\n )\n )\n\n expected_output = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 10],\n \"b\": [\"text\", \"hello\", \"goodbye\", \"world\", \"raisins\"],\n \"c\": [3.400, 1.234, 111.232, 123123.120, 14.232],\n \"date\": [\n \"Mar/02/2021\",\n \"Apr/07/2021\",\n \"Dec/17/2021\",\n \"Jul/21/2021\",\n \"Nov/09/2021\",\n ],\n }\n )\n\n return (\"simple\", expected_output)\n\n\nclass TestCSV:\n def test_parse_kwargs(self):\n kwargs = parse_kwargs(\n [\n \"--bool\",\n \"True\",\n \"--str\",\n \"helloworld\",\n \"--num\",\n \"2\",\n \"--numisstr\",\n '\"2\"',\n \"--boolisstr\",\n '\"False\"',\n \"--dictints\",\n \"hello:1;world:2\",\n \"--dictstrs\",\n 'hello:world;\"1\":\"2\"',\n \"--dictbools\",\n \"good:True;bye:False\",\n \"--dictmix\",\n 'bool:False;str:\"1\";int:2;3:\"three\";list:hey,\"hi\",True,1',\n \"--listabc\",\n \"a,b,c\",\n \"--list123\",\n \"1,2,3\",\n \"--listbool\",\n \"True,True,False\",\n \"--listmix\",\n 'False,\"1\",2',\n ]\n )\n\n assert kwargs[\"bool\"] == True\n assert kwargs[\"str\"] == \"helloworld\"\n assert kwargs[\"num\"] == 2\n assert kwargs[\"numisstr\"] == \"2\"\n assert kwargs[\"boolisstr\"] == \"False\"\n assert kwargs[\"dictints\"] == {\"hello\": 1, \"world\": 2}\n assert kwargs[\"dictstrs\"] == {\"hello\": \"world\", \"1\": \"2\"}\n assert kwargs[\"dictbools\"] == {\"good\": True, \"bye\": False}\n assert kwargs[\"dictmix\"] == {\n \"bool\": False,\n \"str\": \"1\",\n \"int\": 2,\n 3: \"three\",\n \"list\": [\"hey\", \"hi\", True, 1],\n }\n assert kwargs[\"listabc\"] == [\"a\", \"b\", \"c\"]\n assert kwargs[\"list123\"] == [1, 2, 3]\n assert kwargs[\"listbool\"] == [True, True, False]\n assert kwargs[\"listmix\"] == [False, \"1\", 2]\n\n def test_no_options(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri]\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert pd.DataFrame.equals(array.df[:], expected_output)\n\n def test_sparse(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sparse True\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_sparse.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--sparse\", \"True\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.sparse == True\n\n def test_dense(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sparse False\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_dense.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--sparse\", \"False\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.sparse == False\n\n def test_duplicates(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --allows-duplicates (False|True)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n\n uri = os.path.join(temp_rootdir, \"test_no_duplicates.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--allows-duplicates\",\n \"False\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.allows_duplicates == False\n\n uri = os.path.join(temp_rootdir, \"test_allows_duplicates.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--allows-duplicates\",\n \"True\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.allows_duplicates == True\n\n def test_capacity(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --capacity <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_capacity.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--capacity\", \"123456\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.capacity == 123456\n\n def test_cell_order(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --cell-order (row-major|col-major|global)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_cell_order.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--cell-order\", \"global\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.cell_order == \"global\"\n\n def test_full_domain(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --full-domain (True|False)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_full_domain.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--full-domain\", \"True\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n dim = array.schema.domain.dim(\"__tiledb_rows\")\n assert dim.domain[0] == np.iinfo(np.uint64).min\n assert dim.domain[1] == np.iinfo(np.uint64).max - dim.tile\n\n def test_date_spec(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --date-spec <column>:<datetime format spec>,...\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_date_spec.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--date-spec\", \"date:%b/%d/%Y\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert pd.DataFrame.equals(\n array.query([\"date\"]).df[:],\n pd.DataFrame(pd.to_datetime(expected_output[\"date\"])),\n )\n\n def test_mode_schema_only(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --mode (ingest|schema_only|append)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_mode_schema_only.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"schema_only\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.query(use_arrow=False).df[0].empty\n\n def test_row_start_idx(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --row-start-idx <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_row_start_idx.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"False\",\n \"--row-start-idx\",\n \"5\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.df[:].index.to_numpy()[0] == 5\n assert array.df[:].index.to_numpy()[-1] == 9\n\n def test_cell_order(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --cell-order (row-major|col-major|global)\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_cell_order.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--cell-order\", \"col-major\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.cell_order == \"col-major\"\n\n def test_tile_int(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --tile <int>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_tile_int.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--tile\", \"2\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(\"__tiledb_rows\").tile == 2\n\n def test_tile_with_attr(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --tile <attr>:<int>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_tile_with_attr.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--tile\", \"__tiledb_rows:2\"],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(\"__tiledb_rows\").tile == 2\n\n def test_timestamp(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --timestamp <int>\n \"\"\"\n test_name, expected_output = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_timestamp.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"ingest\",\n \"--timestamp\",\n \"1\",\n ],\n )\n\n assert result.exit_code == 0\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--sparse\",\n \"True\",\n \"--mode\",\n \"append\",\n \"--timestamp\",\n \"2\",\n ],\n )\n\n assert result.exit_code == 0\n\n with tiledb.open(uri, timestamp=1) as array:\n assert pd.DataFrame.equals(\n array.df[:].loc[:, array.df[:].columns != \"__tiledb_rows\"],\n expected_output,\n )\n\n with tiledb.open(uri, timestamp=2) as array:\n assert pd.DataFrame.equals(\n array.df[:].loc[:, array.df[:].columns != \"__tiledb_rows\"],\n expected_output.append(expected_output, ignore_index=True),\n )\n\n def test_attr_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --attr-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_attr_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--attr-filters\", \"GzipFilter=9\"],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.attr(\"a\").filters.nfilters == 1\n assert array.schema.attr(\"a\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"b\").filters.nfilters == 1\n assert array.schema.attr(\"b\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"c\").filters.nfilters == 1\n assert array.schema.attr(\"c\").filters[0] == tiledb.GzipFilter(9)\n\n assert array.schema.attr(\"date\").filters.nfilters == 1\n assert array.schema.attr(\"date\").filters[0] == tiledb.GzipFilter(9)\n\n def test_attr_filters_multi(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --attr-filters <attr name>:<filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_attr_filters_multi.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--attr-filters\",\n (\n \"a:LZ4Filter=10,BitShuffleFilter;\"\n \"b:DoubleDeltaFilter,PositiveDeltaFilter=3\"\n ),\n ],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.attr(\"a\").filters.nfilters == 2\n assert array.schema.attr(\"a\").filters[0] == tiledb.LZ4Filter(10)\n assert array.schema.attr(\"a\").filters[1] == tiledb.BitShuffleFilter()\n\n assert array.schema.attr(\"b\").filters.nfilters == 2\n assert array.schema.attr(\"b\").filters[0] == tiledb.DoubleDeltaFilter()\n assert array.schema.attr(\"b\").filters[1] == tiledb.PositiveDeltaFilter(3)\n\n assert array.schema.attr(\"c\").filters.nfilters == 0\n\n assert array.schema.attr(\"date\").filters.nfilters == 0\n\n def test_coords_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --coords-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_coords_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--coords-filters\",\n \"GzipFilter=9\",\n ],\n )\n\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.coords_filters.nfilters == 1\n assert array.schema.coords_filters[0] == tiledb.GzipFilter(9)\n\n def test_dim_filters(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --dim-filters <filter name>,<filter name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_dim_filters.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--dim-filters\", \"GzipFilter=9\"],\n )\n print(result.stdout)\n assert result.exit_code == 0\n\n with tiledb.open(uri) as array:\n assert array.schema.domain.dim(0).filters.nfilters == 1\n assert array.schema.domain.dim(0).filters[0] == tiledb.GzipFilter(9)\n\n def test_sep(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --sep <str>\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_sep.tdb\")\n\n result = runner.invoke(\n root, [\"convert-from\", \"csv\", input_path, uri, \"--sep\", \" \"]\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert len(array.df[:].columns) == 1\n\n def test_header_and_names(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --header 0 --names <column name>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_names.tdb\")\n\n result = runner.invoke(\n root,\n [\n \"convert-from\",\n \"csv\",\n input_path,\n uri,\n \"--header\",\n \"0\",\n \"--names\",\n \"d,c,b,a\",\n ],\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert array.df[:].columns[0] == \"d\"\n assert array.df[:].columns[1] == \"c\"\n assert array.df[:].columns[2] == \"b\"\n assert array.df[:].columns[3] == \"a\"\n\n @pytest.mark.skip(\"does not work on windows?\")\n def test_skiprows(self, runner, temp_rootdir, create_test_simple_csv):\n \"\"\"\n Test for command\n\n tiledb convert_from [csv_file] [uri] --skiprows <int>,...\n \"\"\"\n test_name, _ = create_test_simple_csv\n input_path = os.path.join(temp_rootdir, f\"{test_name}.csv\")\n uri = os.path.join(temp_rootdir, \"test_skiprows.tdb\")\n\n result = runner.invoke(\n root,\n [\"convert-from\", \"csv\", input_path, uri, \"--skiprows\", \"0,1\"],\n )\n\n assert result.exit_code == 0\n with tiledb.open(uri) as array:\n assert len(array.df[:]) == 3\n" ]
[ [ "pandas.DataFrame.equals", "pandas.to_datetime", "numpy.iinfo", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
hankyul2/EfficientNetV2-pytorch
[ "bce59dae3ce69e3e7e8aa99e4f32214b015dd1f8" ]
[ "efficientnetv2/efficientnet_v2.py" ]
[ "import copy\nfrom functools import partial\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\n\nfrom efficientnetv2 import get_efficientnet_v2_structure\nfrom efficientnetv2 import load_from_zoo\n\n\nclass ConvBNAct(nn.Sequential):\n \"\"\"Convolution-Normalization-Activation Module\"\"\"\n def __init__(self, in_channel, out_channel, kernel_size, stride, groups, norm_layer, act, conv_layer=nn.Conv2d):\n super(ConvBNAct, self).__init__(\n conv_layer(in_channel, out_channel, kernel_size, stride=stride, padding=(kernel_size-1)//2, groups=groups, bias=False),\n norm_layer(out_channel),\n act()\n )\n\n\nclass SEUnit(nn.Module):\n \"\"\"Squeeze-Excitation Unit\n\n paper: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper\n\n \"\"\"\n def __init__(self, in_channel, reduction_ratio=4, act1=partial(nn.SiLU, inplace=True), act2=nn.Sigmoid):\n super(SEUnit, self).__init__()\n hidden_dim = in_channel // reduction_ratio\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc1 = nn.Conv2d(in_channel, hidden_dim, (1, 1), bias=True)\n self.fc2 = nn.Conv2d(hidden_dim, in_channel, (1, 1), bias=True)\n self.act1 = act1()\n self.act2 = act2()\n\n def forward(self, x):\n return x * self.act2(self.fc2(self.act1(self.fc1(self.avg_pool(x)))))\n\n\nclass StochasticDepth(nn.Module):\n \"\"\"StochasticDepth\n\n paper: https://link.springer.com/chapter/10.1007/978-3-319-46493-0_39\n\n :arg\n - prob: Probability of dying\n - mode: \"row\" or \"all\". \"row\" means that each row survives with different probability\n \"\"\"\n def __init__(self, prob, mode):\n super(StochasticDepth, self).__init__()\n self.prob = prob\n self.survival = 1.0 - prob\n self.mode = mode\n\n def forward(self, x):\n if self.prob == 0.0 or not self.training:\n return x\n else:\n shape = [x.size(0)] + [1] * (x.ndim - 1) if self.mode == 'row' else [1]\n return x * torch.empty(shape).bernoulli_(self.survival).div_(self.survival).to(x.device)\n\n\nclass MBConvConfig:\n \"\"\"EfficientNet Building block configuration\"\"\"\n def __init__(self, expand_ratio: float, kernel: int, stride: int, in_ch: int, out_ch: int, layers: int,\n use_se: bool, fused: bool, act=nn.SiLU, norm_layer=nn.BatchNorm2d):\n self.expand_ratio = expand_ratio\n self.kernel = kernel\n self.stride = stride\n self.in_ch = in_ch\n self.out_ch = out_ch\n self.num_layers = layers\n self.act = act\n self.norm_layer = norm_layer\n self.use_se = use_se\n self.fused = fused\n\n @staticmethod\n def adjust_channels(channel, factor, divisible=8):\n new_channel = channel * factor\n divisible_channel = max(divisible, (int(new_channel + divisible / 2) // divisible) * divisible)\n divisible_channel += divisible if divisible_channel < 0.9 * new_channel else 0\n return divisible_channel\n\n\nclass MBConv(nn.Module):\n \"\"\"EfficientNet main building blocks\n\n :arg\n - c: MBConvConfig instance\n - sd_prob: stochastic path probability\n \"\"\"\n def __init__(self, c, sd_prob=0.0):\n super(MBConv, self).__init__()\n inter_channel = c.adjust_channels(c.in_ch, c.expand_ratio)\n block = []\n\n if c.expand_ratio == 1:\n block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))\n elif c.fused:\n block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))\n block.append(('fused_point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))\n else:\n block.append(('linear_bottleneck', ConvBNAct(c.in_ch, inter_channel, 1, 1, 1, c.norm_layer, c.act)))\n block.append(('depth_wise', ConvBNAct(inter_channel, inter_channel, c.kernel, c.stride, inter_channel, c.norm_layer, c.act)))\n block.append(('se', SEUnit(inter_channel, 4 * c.expand_ratio)))\n block.append(('point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))\n\n self.block = nn.Sequential(OrderedDict(block))\n self.use_skip_connection = c.stride == 1 and c.in_ch == c.out_ch\n self.stochastic_path = StochasticDepth(sd_prob, \"row\")\n\n def forward(self, x):\n out = self.block(x)\n if self.use_skip_connection:\n out = x + self.stochastic_path(out)\n return out\n\n\nclass EfficientNetV2(nn.Module):\n \"\"\"Pytorch Implementation of EfficientNetV2\n\n paper: https://arxiv.org/abs/2104.00298\n\n - reference 1 (pytorch): https://github.com/d-li14/efficientnetv2.pytorch/blob/main/effnetv2.py\n - reference 2 (official): https://github.com/google/automl/blob/master/efficientnetv2/effnetv2_configs.py\n\n :arg\n - layer_infos: list of MBConvConfig\n - out_channels: bottleneck channel\n - nlcass: number of class\n - dropout: dropout probability before classifier layer\n - stochastic depth: stochastic depth probability\n \"\"\"\n def __init__(self, layer_infos, out_channels=1280, nclass=0, dropout=0.2, stochastic_depth=0.0,\n block=MBConv, act_layer=nn.SiLU, norm_layer=nn.BatchNorm2d):\n super(EfficientNetV2, self).__init__()\n self.layer_infos = layer_infos\n self.norm_layer = norm_layer\n self.act = act_layer\n\n self.in_channel = layer_infos[0].in_ch\n self.final_stage_channel = layer_infos[-1].out_ch\n self.out_channels = out_channels\n\n self.cur_block = 0\n self.num_block = sum(stage.num_layers for stage in layer_infos)\n self.stochastic_depth = stochastic_depth\n\n self.stem = ConvBNAct(3, self.in_channel, 3, 2, 1, self.norm_layer, self.act)\n self.blocks = nn.Sequential(*self.make_stages(layer_infos, block))\n self.head = nn.Sequential(OrderedDict([\n ('bottleneck', ConvBNAct(self.final_stage_channel, out_channels, 1, 1, 1, self.norm_layer, self.act)),\n ('avgpool', nn.AdaptiveAvgPool2d((1, 1))),\n ('flatten', nn.Flatten()),\n ('dropout', nn.Dropout(p=dropout, inplace=True)),\n ('classifier', nn.Linear(out_channels, nclass) if nclass else nn.Identity())\n ]))\n\n def make_stages(self, layer_infos, block):\n return [layer for layer_info in layer_infos for layer in self.make_layers(copy.copy(layer_info), block)]\n\n def make_layers(self, layer_info, block):\n layers = []\n for i in range(layer_info.num_layers):\n layers.append(block(layer_info, sd_prob=self.get_sd_prob()))\n layer_info.in_ch = layer_info.out_ch\n layer_info.stride = 1\n return layers\n\n def get_sd_prob(self):\n sd_prob = self.stochastic_depth * (self.cur_block / self.num_block)\n self.cur_block += 1\n return sd_prob\n\n def forward(self, x):\n return self.head(self.blocks(self.stem(x)))\n\n def change_dropout_rate(self, p):\n self.head[-2] = nn.Dropout(p=p, inplace=True)\n\n\ndef efficientnet_v2_init(model):\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0.0, std=0.01)\n nn.init.zeros_(m.bias)\n\n\ndef get_efficientnet_v2(model_name, pretrained, nclass=0, dropout=0.1, stochastic_depth=0.2, **kwargs):\n residual_config = [MBConvConfig(*layer_config) for layer_config in get_efficientnet_v2_structure(model_name)]\n model = EfficientNetV2(residual_config, 1280, nclass, dropout=dropout, stochastic_depth=stochastic_depth, block=MBConv, act_layer=nn.SiLU)\n efficientnet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model" ]
[ [ "torch.nn.Dropout", "torch.empty", "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.init.ones_", "torch.nn.Linear", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.init.zeros_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nlp-tlp/quickgraph
[ "34d888b055a78939095005f9cef363c0430664be" ]
[ "server_cluster/app.py" ]
[ "'''\n API for rank order clustering documents.\n'''\n\nimport itertools\nimport pathlib\nfrom collections import Counter, defaultdict\nfrom enum import Enum\nfrom typing import List, Optional\n\nimport numpy as np\nimport uvicorn\nfrom fastapi import Body, FastAPI, HTTPException\nfrom loguru import logger\nfrom nltk import FreqDist\nfrom pydantic import BaseModel, Field\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\nfrom sklearn.decomposition import LatentDirichletAllocation\n\nlog_path = pathlib.Path(__file__).parent.resolve()\n\n\nlogger.add(\n f\"{log_path}/api.log\", rotation=\"10 MB\")\n\napp = FastAPI()\n\n# Load SBERT model\nlogger.info(f'Loading model')\nmodel_checkpoint = 'all-distilroberta-v1'\nmodel = SentenceTransformer(model_checkpoint)\nlogger.info(f'{model_checkpoint} loaded')\n\n\[email protected](\"/ping\")\ndef ping_pong():\n ''' Checks API service '''\n return {\"message\": \"pong\"}\n\n\nclass Data(BaseModel):\n corpus: List[str]\n\n\[email protected](\"/rank_cluster\")\ndef rank_cluster(data: Data):\n '''\n\n '''\n\n logger.info(\n \"Performing rank order clustering with SentBERT and Agglomerative clustering\")\n logger.info(f'Corpus size: {len(data.corpus)}')\n\n # Embed sentences\n logger.info(f'Corpus embedding started')\n corpus_embeddings = model.encode(\n data.corpus, batch_size=64) # show_progress_bar=False, convert_to_tensor=True\n logger.info(f'Corpus embedding finished')\n\n logger.info(f'Clustering started')\n logger.info('Transforming embedding for agglomerative clustering')\n # Normalize the embeddings to unit length\n corpus_embeddings = corpus_embeddings / \\\n np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)\n\n # , affinity='cosine', linkage='average', distance_threshold=0.4)\n clustering_model = AgglomerativeClustering(\n n_clusters=None, distance_threshold=1.5)\n\n clustering_model.fit(corpus_embeddings)\n logger.info('fitted cluster model')\n\n cluster_assignment = clustering_model.labels_\n # logger.debug(cluster_assignment)\n logger.info(f'Clustering finished')\n\n clustered_corpus = []\n for sentence_id, cluster_id in enumerate(cluster_assignment):\n # print(sentence_id, cluster_id)\n clustered_corpus.append({\"id\": int(sentence_id), \"cluster\": int(\n cluster_id), \"sentence\": data.corpus[sentence_id]})\n\n # Get human-interpretable label for cluster\n groups = defaultdict(list)\n\n # Group clusters into arrays\n for obj in clustered_corpus:\n groups[obj[\"cluster\"]].append(obj)\n\n # Find topn terms in clusters\n cluster_terms = {}\n for cluster in groups.values():\n cluster_number = cluster[0]['cluster']\n\n cluster_tokens = list(itertools.chain(\n *[text['sentence'].split() for text in cluster]))\n\n token_freq_dist = FreqDist(cluster_tokens)\n top_n_terms = token_freq_dist.most_common(5)\n top_n_term_string = \"|\".join([term for term, _ in top_n_terms])\n cluster_terms[cluster_number] = top_n_term_string\n\n # Get cluster counts / distribution\n cluster_distribution = Counter(\n sentence['cluster'] for sentence in clustered_corpus)\n # print(cluster_distribution)\n\n cluster_details = [{\"cluster_number\": cluster_no, 'count': cluster_distribution[cluster_no],\n 'top_n_terms': cluster_terms[cluster_no]} for cluster_no in cluster_distribution.keys()]\n\n cluster_details_sorted = sorted(\n cluster_details, key=lambda d: d['cluster_number'])\n\n return {'clustered_corpus': clustered_corpus, 'cluster_details': cluster_details_sorted}\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n" ]
[ [ "sklearn.cluster.AgglomerativeClustering", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
ritchie46/flopy
[ "8e7284dcb3aaf5c12293d442248c2c2d9959f835", "8e7284dcb3aaf5c12293d442248c2c2d9959f835", "8e7284dcb3aaf5c12293d442248c2c2d9959f835", "8e7284dcb3aaf5c12293d442248c2c2d9959f835" ]
[ "flopy/modflow/mfdrn.py", "flopy/mf6/utils/binaryfile_utils.py", "flopy/modflow/mfswt.py", "flopy/mf6/data/mfdataarray.py" ]
[ "\"\"\"\r\nmfdrn module. Contains the ModflowDrn class. Note that the user can access\r\nthe ModflowDrn class as `flopy.modflow.ModflowDrn`.\r\n\r\nAdditional information for this MODFLOW package can be found at the `Online\r\nMODFLOW Guide\r\n<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_.\r\n\r\n\"\"\"\r\nimport sys\r\nimport numpy as np\r\nfrom ..pakbase import Package\r\nfrom ..utils.util_list import MfList\r\nfrom ..utils.recarray_utils import create_empty_recarray\r\n\r\n\r\nclass ModflowDrn(Package):\r\n \"\"\"\r\n MODFLOW Drain Package Class.\r\n\r\n Parameters\r\n ----------\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to which\r\n this package will be added.\r\n ipakcb : int\r\n A flag that is used to determine if cell-by-cell budget data should be\r\n saved. If ipakcb is non-zero cell-by-cell budget data will be saved.\r\n (default is None).\r\n stress_period_data : list of boundaries, recarrays, or dictionary of\r\n boundaries.\r\n Each drain cell is defined through definition of\r\n layer(int), row(int), column(int), elevation(float),\r\n conductance(float).\r\n The simplest form is a dictionary with a lists of boundaries for each\r\n stress period, where each list of boundaries itself is a list of\r\n boundaries. Indices of the dictionary are the numbers of the stress\r\n period. This gives the form of::\r\n\r\n stress_period_data =\r\n {0: [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ],\r\n 1: [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ], ...\r\n kper:\r\n [\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n [lay, row, col, stage, cond],\r\n ]\r\n }\r\n\r\n Note that if no values are specified for a certain stress period, then\r\n the list of boundaries for the previous stress period for which values\r\n were defined is used. Full details of all options to specify\r\n stress_period_data can be found in the flopy3boundaries Notebook in\r\n the basic subdirectory of the examples directory.\r\n dtype : dtype definition\r\n if data type is different from default\r\n options : list of strings\r\n Package options. (default is None).\r\n extension : string\r\n Filename extension (default is 'drn')\r\n unitnumber : int\r\n File unit number (default is None).\r\n filenames : str or list of str\r\n Filenames to use for the package and the output files. If\r\n filenames=None the package name will be created using the model name\r\n and package extension and the cbc output name will be created using\r\n the model name and .cbc extension (for example, modflowtest.cbc),\r\n if ipakcbc is a number greater than zero. If a single string is passed\r\n the package will be set to the string and cbc output names will be\r\n created using the model name and .cbc extension, if ipakcbc is a\r\n number greater than zero. To define the names for all package files\r\n (input and output) the length of the list of strings should be 2.\r\n Default is None.\r\n\r\n Attributes\r\n ----------\r\n\r\n Methods\r\n -------\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n Parameters are not supported in FloPy.\r\n If \"RETURNFLOW\" in passed in options, the drain return package (DRT) activated, which expects\r\n a different (longer) dtype for stress_period_data\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> ml = flopy.modflow.Modflow()\r\n >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all\r\n >>> #stress periods\r\n >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec)\r\n\r\n \"\"\"\r\n\r\n def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None,\r\n extension='drn', unitnumber=None, options=None,\r\n filenames=None, **kwargs):\r\n\r\n # set default unit number of one is not specified\r\n if unitnumber is None:\r\n unitnumber = ModflowDrn.defaultunit()\r\n\r\n # set filenames\r\n if filenames is None:\r\n filenames = [None, None]\r\n elif isinstance(filenames, str):\r\n filenames = [filenames, None]\r\n elif isinstance(filenames, list):\r\n if len(filenames) < 2:\r\n filenames.append(None)\r\n\r\n # update external file information with cbc output, if necessary\r\n if ipakcb is not None:\r\n fname = filenames[1]\r\n model.add_output_file(ipakcb, fname=fname,\r\n package=ModflowDrn.ftype())\r\n else:\r\n ipakcb = 0\r\n\r\n if options is None:\r\n options = []\r\n self.is_drt = False\r\n for opt in options:\r\n if opt.upper() == \"RETURNFLOW\":\r\n self.is_drt = True\r\n break\r\n if self.is_drt:\r\n name = [\"DRT\"]\r\n else:\r\n name = [ModflowDrn.ftype()]\r\n units = [unitnumber]\r\n extra = ['']\r\n\r\n # set package name\r\n fname = [filenames[0]]\r\n\r\n # Call ancestor's init to set self.parent, extension, name and unit number\r\n Package.__init__(self, model, extension=extension, name=name,\r\n unit_number=units, extra=extra, filenames=fname)\r\n\r\n self.heading = '# {} package for '.format(self.name[0]) + \\\r\n ' {}, '.format(model.version_types[model.version]) + \\\r\n 'generated by Flopy.'\r\n self.url = 'drn.htm'\r\n\r\n self.ipakcb = ipakcb\r\n\r\n self.np = 0\r\n\r\n self.options = options\r\n if dtype is not None:\r\n self.dtype = dtype\r\n else:\r\n self.dtype = self.get_default_dtype(\r\n structured=self.parent.structured, is_drt=self.is_drt)\r\n self.stress_period_data = MfList(self, stress_period_data)\r\n self.parent.add_package(self)\r\n\r\n @staticmethod\r\n def get_default_dtype(structured=True, is_drt=False):\r\n if structured:\r\n if not is_drt:\r\n dtype = np.dtype([(\"k\", np.int), (\"i\", np.int),\r\n (\"j\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32)])\r\n else:\r\n dtype = np.dtype([(\"k\", np.int), (\"i\", np.int),\r\n (\"j\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32), (\"layr\", np.int),\r\n (\"rowr\", np.int), (\"colr\", np.int),\r\n (\"rfprop\", np.float32)])\r\n else:\r\n dtype = np.dtype([(\"node\", np.int), (\"elev\", np.float32),\r\n (\"cond\", np.float32)])\r\n return dtype\r\n\r\n def ncells(self):\r\n # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package)\r\n # print 'Function must be implemented properly for drn package'\r\n return self.stress_period_data.mxact\r\n\r\n def write_file(self, check=True):\r\n \"\"\"\r\n Write the package file.\r\n\r\n Parameters\r\n ----------\r\n check : boolean\r\n Check package data for common errors. (default True)\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n if check: # allows turning off package checks when writing files at model level\r\n self.check(f='{}.chk'.format(self.name[0]),\r\n verbose=self.parent.verbose, level=1)\r\n f_drn = open(self.fn_path, 'w')\r\n f_drn.write('{0}\\n'.format(self.heading))\r\n # f_drn.write('%10i%10i\\n' % (self.mxactd, self.idrncb))\r\n line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact,\r\n self.ipakcb)\r\n\r\n if self.is_drt:\r\n line += \"{0:10d}{0:10d}\".format(0)\r\n for opt in self.options:\r\n line += ' ' + str(opt)\r\n line += '\\n'\r\n f_drn.write(line)\r\n self.stress_period_data.write_transient(f_drn)\r\n f_drn.close()\r\n\r\n def add_record(self, kper, index, values):\r\n try:\r\n self.stress_period_data.add_record(kper, index, values)\r\n except Exception as e:\r\n raise Exception(\"mfdrn error adding record to list: \" + str(e))\r\n\r\n @staticmethod\r\n def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False):\r\n # get an empty recarray that corresponds to dtype\r\n dtype = ModflowDrn.get_default_dtype(structured=structured,\r\n is_drt=is_drt)\r\n if aux_names is not None:\r\n dtype = Package.add_to_dtype(dtype, aux_names, np.float32)\r\n return create_empty_recarray(ncells, dtype, default_value=-1.0E+10)\r\n\r\n @staticmethod\r\n def get_sfac_columns():\r\n return ['cond']\r\n\r\n @staticmethod\r\n def load(f, model, nper=None, ext_unit_dict=None, check=True):\r\n \"\"\"\r\n Load an existing package.\r\n\r\n Parameters\r\n ----------\r\n f : filename or file handle\r\n File to load.\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to\r\n which this package will be added.\r\n ext_unit_dict : dictionary, optional\r\n If the arrays in the file are specified using EXTERNAL,\r\n or older style array control records, then `f` should be a file\r\n handle. In this case ext_unit_dict is required, which can be\r\n constructed using the function\r\n :class:`flopy.utils.mfreadnam.parsenamefile`.\r\n check : boolean\r\n Check package data for common errors. (default True)\r\n\r\n Returns\r\n -------\r\n drn : ModflowDrn object\r\n ModflowDrn object.\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> m = flopy.modflow.Modflow()\r\n >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m)\r\n\r\n \"\"\"\r\n\r\n if model.verbose:\r\n sys.stdout.write('loading drn package file...\\n')\r\n\r\n return Package.load(f, model, ModflowDrn, nper=nper, check=check,\r\n ext_unit_dict=ext_unit_dict)\r\n\r\n @staticmethod\r\n def ftype():\r\n return 'DRN'\r\n\r\n @staticmethod\r\n def defaultunit():\r\n return 21\r\n", "import os\r\nimport numpy as np\r\nfrom ...utils import binaryfile as bf\r\n\r\n\r\nclass MFOutput:\r\n \"\"\"\r\n Wrapper class for Binary Arrays. This class enables directly getting slices\r\n from the binary output. It is intended to be called from the __getitem__\r\n method of the SimulationDict() class. Implemented to conserve memory.\r\n\r\n Parameters\r\n ----------\r\n path: binary file path location\r\n mfdict: SimulationDict() object\r\n key: OrderedDictionary key ex. ('flow15','CBC','FLOW RIGHT FACE')\r\n\r\n Returns\r\n -------\r\n Xarray of [n,n,n,n] dimension\r\n\r\n Usage:\r\n -----\r\n >>> val = MFOutput(mfdict, path, key)\r\n >>> return val.data\r\n\r\n User interaction:\r\n -----------------\r\n >>> data[('flow15','CBC','FLOW RIGHT FACE')][:,0,1,:]\r\n or\r\n >>> data[('flow15','CBC','FLOW RIGHT FACE')]\r\n \"\"\"\r\n def __init__(self, mfdict, path, key):\r\n self.mfdict = mfdict\r\n data = MFOutputRequester(mfdict, path, key)\r\n try:\r\n self.data = data.querybinarydata\r\n except AttributeError:\r\n self.data = np.array([[[[]]]])\r\n\r\n def __iter__(self):\r\n yield self.data\r\n\r\n def __getitem__(self, index):\r\n self.data = self.data[index]\r\n return self.data\r\n\r\n\r\nclass MFOutputRequester:\r\n \"\"\"\r\n MFOutputRequest class is a helper function to enable the user to query\r\n binary data from the SimulationDict() object on the fly without\r\n actually storing it in the SimulationDict() object.\r\n\r\n Parameters:\r\n ----------\r\n mfdict: OrderedDict\r\n local instance of the SimulationDict() object\r\n path:\r\n pointer to the MFSimulationPath object\r\n key: tuple\r\n user requested data key\r\n\r\n Methods:\r\n -------\r\n MFOutputRequester.querybinarydata\r\n returns: Xarray object\r\n\r\n Examples:\r\n --------\r\n >>> data = MFOutputRequester(mfdict, path, key)\r\n >>> data.querybinarydata\r\n \"\"\"\r\n\r\n def __init__(self, mfdict, path, key):\r\n self.path = path\r\n self.mfdict = mfdict\r\n self.dataDict = {}\r\n # get the binary file locations, create a dictionary key to look them\r\n # up from, store in self.dataDict\r\n self._getbinaryfilepaths()\r\n\r\n # check if supplied key exists, and model grid type\r\n if key in self.dataDict:\r\n if (key[0], 'disv', 'dimensions', 'nvert') in self.mfdict:\r\n self.querybinarydata = \\\r\n self._querybinarydata_vertices(self.mfdict, key)\r\n elif (key[0], 'disu', 'connectiondata', 'iac') in self.mfdict:\r\n self.querybinarydata = self._querybinarydata_unstructured(key)\r\n else:\r\n self.querybinarydata = self._querybinarydata(key)\r\n elif key == ('model', 'HDS', 'IamAdummy'):\r\n pass\r\n else:\r\n print('\\nValid Keys Are:\\n')\r\n for valid_key in self.dataDict:\r\n print(valid_key)\r\n raise KeyError('Invalid key {}'.format(key))\r\n\r\n def _querybinarydata(self, key):\r\n # Basic definition to get output from modflow binary files for\r\n # simulations using a structured grid\r\n path = self.dataDict[key]\r\n bintype = key[1]\r\n\r\n bindata = self._get_binary_file_object(path, bintype, key)\r\n\r\n if bintype == 'CBC':\r\n try:\r\n return np.array(bindata.get_data(text=key[-1], full3D=True))\r\n except ValueError:\r\n # imeth == 6\r\n return np.array(bindata.get_data(text=key[-1], full3D=False))\r\n else:\r\n return np.array(bindata.get_alldata())\r\n\r\n def _querybinarydata_vertices(self, mfdict, key):\r\n # Basic definition to get output data from binary output files for\r\n # simulations that define grid by vertices\r\n path = self.dataDict[key]\r\n bintype = key[1]\r\n\r\n bindata = self._get_binary_file_object(path, bintype, key)\r\n\r\n if bintype == 'CBC':\r\n if key[-1] == 'FLOW-JA-FACE':\r\n data = np.array(bindata.get_data(text=key[-1]))\r\n # uncomment line to remove extra dimensions from data\r\n # data data.shape = (len(times), -1)\r\n return data\r\n\r\n else:\r\n try:\r\n data = np.array(bindata.get_data(text=key[-1],\r\n full3D=True))\r\n except ValueError:\r\n # imeth == 6\r\n data = np.array(bindata.get_data(text=key[-1],\r\n full3D=False))\r\n else:\r\n data = np.array(bindata.get_alldata())\r\n\r\n # uncomment line to remove extra dimensions from data\r\n # data = _reshape_binary_data(data, 'V')\r\n return data\r\n\r\n def _querybinarydata_unstructured(self, key):\r\n # get unstructured binary data in numpy array format.\r\n path = self.dataDict[key]\r\n bintype = key[1]\r\n\r\n bindata = self._get_binary_file_object(path, bintype, key)\r\n\r\n if bintype == 'CBC':\r\n try:\r\n data = np.array(bindata.get_data(text=key[-1], full3D=True))\r\n except ValueError:\r\n data = np.array(bindata.get_data(text=key[-1], full3D=False))\r\n else:\r\n data = bindata.get_alldata()\r\n\r\n # remove un-needed dimensions\r\n data = _reshape_binary_data(data, 'U')\r\n\r\n if key[-1] == \"FLOW-JA-FACE\":\r\n return data\r\n\r\n else:\r\n return data\r\n\r\n def _get_binary_file_object(self, path, bintype, key):\r\n # simple method that trys to open the binary file object using Flopy\r\n if bintype == 'CBC':\r\n try:\r\n return bf.CellBudgetFile(path, precision='double')\r\n except AssertionError:\r\n raise AssertionError('{} does not '\r\n 'exist'.format(self.dataDict[key]))\r\n\r\n elif bintype == 'HDS':\r\n try:\r\n return bf.HeadFile(path, precision='double')\r\n except AssertionError:\r\n raise AssertionError('{} does not '\r\n 'exist'.format(self.dataDict[key]))\r\n\r\n elif bintype == 'DDN':\r\n try:\r\n return bf.HeadFile(path, text='drawdown', precision='double')\r\n except AssertionError:\r\n raise AssertionError('{} does not '\r\n 'exist'.format(self.dataDict[key]))\r\n\r\n elif bintype == 'UCN':\r\n try:\r\n return bf.UcnFile(path, precision=\"single\")\r\n except AssertionError:\r\n raise AssertionError('{} does not '\r\n 'exist'.format(self.dataDict[key]))\r\n\r\n else:\r\n raise AssertionError()\r\n\r\n @staticmethod\r\n def _get_vertices(mfdict, key):\r\n \"\"\"\r\n Depreciated! Consider removing from code.\r\n\r\n Parameters\r\n ----------\r\n key: binary query dictionary key\r\n\r\n Returns\r\n -------\r\n information defining specified vertices for all model cells to be added\r\n to xarray as coordinates.\r\n cellid: (list) corresponds to the modflow CELL2d cell number\r\n xcyc: (n x 2) dimensional Pandas object of tuples defining the CELL2d\r\n center coordinates\r\n nverts: (list) number of xy vertices corresponding to a cell\r\n xv: (n x nverts) dimensional Pandas object of tuples. Contains x\r\n vertices for a cell\r\n yv: (n x nverts) dimensional Pandas object of tuples. Contains y\r\n vertices for a cell\r\n topv: (n x nlayers) dimensional Pandas object of cell top elevations\r\n corresponding to a row column location\r\n botmv: (n x nlayers) dimensional Pandas object of cell bottom\r\n elevations corresponding to a row column location\r\n \"\"\"\r\n\r\n try:\r\n import pandas as pd\r\n except Exception as e:\r\n msg = 'MFOutputRequester._get_vertices(): requires pandas'\r\n raise ImportError(msg)\r\n\r\n mname = key[0]\r\n cellid = mfdict[(mname, 'DISV8', 'CELL2D', 'cell2d_num')]\r\n\r\n cellxc = mfdict[(mname, 'DISV8', 'CELL2D', 'xc')]\r\n cellyc = mfdict[(mname, 'DISV8', 'CELL2D', 'yc')]\r\n xcyc = [(cellxc[i], cellyc[i]) for i in range(len(cellxc))]\r\n xcyc = pd.Series(xcyc, dtype='object')\r\n\r\n nverts = mfdict[(mname, 'DISV8', 'CELL2D', 'nvert')]\r\n vertnums = mfdict[(mname, 'DISV8', 'CELL2D', 'iv')]\r\n vertid = mfdict[(mname, 'DISV8', 'VERTICES', 'vert_num')]\r\n vertx = mfdict[(mname, 'DISV8', 'VERTICES', 'x')]\r\n verty = mfdict[(mname, 'DISV8', 'VERTICES', 'y')]\r\n # get vertices that correspond to CellID list\r\n xv = []\r\n yv = []\r\n for line in vertnums:\r\n tempx = []\r\n tempy = []\r\n for vert in line:\r\n idx = vertid.index(vert)\r\n tempx.append(vertx[idx])\r\n tempy.append(verty[idx])\r\n xv.append(tempx)\r\n yv.append(tempy)\r\n xv = pd.Series(xv, dtype='object')\r\n yv = pd.Series(yv, dtype='object')\r\n\r\n top = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'top')])\r\n botm = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'botm')])\r\n top = top.tolist()\r\n botm = botm.tolist()\r\n # get cell top and bottom by layer\r\n topv = list(zip(top, *botm[:-1]))\r\n botmv = list(zip(*botm))\r\n topv = pd.Series(topv, dtype='object')\r\n botmv = pd.Series(botmv, dtype='object')\r\n\r\n return cellid, xcyc, nverts, xv, yv, topv, botmv\r\n\r\n def _getbinaryfilepaths(self):\r\n # model paths\r\n self.modelpathdict = {}\r\n for i in self.path.model_relative_path:\r\n self.modelpathdict[i] = self.path.get_model_path(i)\r\n sim_path = self.path.get_sim_path()\r\n self.binarypathdict = {}\r\n # check output control to see if a binary file is supposed to exist.\r\n # Get path to that file\r\n for i in self.modelpathdict:\r\n if (i, 'oc', 'options', 'budget_filerecord') in self.mfdict:\r\n cbc = self.mfdict[(i, 'oc', 'options', 'budget_filerecord')]\r\n if cbc.get_data() is not None:\r\n self.binarypathdict[(i, 'CBC')] = \\\r\n os.path.join(sim_path, cbc.get_data()[0][0])\r\n\r\n if (i, 'oc', 'options', 'head_filerecord') in self.mfdict:\r\n hds = self.mfdict[(i, 'oc', 'options', 'head_filerecord')]\r\n if hds.get_data() is not None:\r\n self.binarypathdict[(i, 'HDS')] = \\\r\n os.path.join(sim_path, hds.get_data()[0][0])\r\n\r\n if (i, 'oc', 'options', 'drawdown_filerecord') in self.mfdict:\r\n ddn = self.mfdict[(i, 'oc', 'options', 'drawdown_filerecord')]\r\n if ddn.get_data() is not None:\r\n self.binarypathdict[(i, 'DDN')] = \\\r\n os.path.join(sim_path, ddn.get_data()[0][0])\r\n\r\n self._setbinarykeys(self.binarypathdict)\r\n\r\n def _setbinarykeys(self, binarypathdict):\r\n # check that if a binary file is supposed to exist, it does, and create\r\n # a dictionary key to access that data\r\n for key in binarypathdict:\r\n path = binarypathdict[key]\r\n if key[1] == 'CBC':\r\n try:\r\n readcbc = bf.CellBudgetFile(path, precision='double')\r\n for record in readcbc.get_unique_record_names():\r\n name = record.decode(\"utf-8\").strip(' ')\r\n # store keys along with model name in ordered dict?\r\n self.dataDict[(key[0], key[1], name)] = path\r\n readcbc.close()\r\n\r\n except:\r\n pass\r\n\r\n elif key[1] == 'HDS':\r\n try:\r\n readhead = bf.HeadFile(path, precision='double')\r\n self.dataDict[(key[0], key[1], 'HEAD')] = path\r\n readhead.close()\r\n\r\n except:\r\n pass\r\n\r\n elif key[1] == 'DDN':\r\n try:\r\n readddn = bf.HeadFile(path, text='drawdown',\r\n precision='double')\r\n self.dataDict[(key[0], key[1], 'DRAWDOWN')] = path\r\n readddn.close()\r\n\r\n except:\r\n pass\r\n\r\n elif key[1] == 'UCN':\r\n try:\r\n readucn = bf.UcnFile(path, precision='single')\r\n self.dataDict[(key[0], key[1], 'CONCENTRATION')] = path\r\n readucn.close()\r\n\r\n except:\r\n pass\r\n\r\n else:\r\n pass\r\n\r\n @staticmethod\r\n def getkeys(mfdict, path, print_keys=True):\r\n # use a dummy key to get valid binary output keys\r\n dummy_key = ('model', 'HDS', 'IamAdummy')\r\n x = MFOutputRequester(mfdict, path, dummy_key)\r\n keys = [i for i in x.dataDict]\r\n if print_keys is True:\r\n for key in keys:\r\n print(key)\r\n return x\r\n\r\n\r\ndef _reshape_binary_data(data, dtype=None):\r\n # removes unnecessary dimensions from data returned by\r\n # flopy.utils.binaryfile\r\n time = len(data)\r\n data = np.array(data)\r\n if dtype is None:\r\n return data\r\n elif dtype == 'V':\r\n nodes = len(data[0][0][0])\r\n data.shape = (time, -1, nodes)\r\n elif dtype == 'U':\r\n data.shape = (time, -1)\r\n else:\r\n err = \"Invalid dtype flag supplied, valid are dtype='U', dtype='V'\"\r\n raise Exception(err)\r\n return data\r\n", "\"\"\"\r\nmfswt module. Contains the ModflowSwt class. Note that the user can access\r\nthe ModflowSub class as `flopy.modflow.ModflowSwt`.\r\n\r\nAdditional information for this MODFLOW package can be found at the `Online\r\nMODFLOW Guide\r\n<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/swt.htm>`_.\r\n\r\n\"\"\"\r\nimport sys\r\n\r\nimport numpy as np\r\n\r\nfrom ..pakbase import Package\r\nfrom ..utils import Util2d, Util3d, read1d\r\n\r\n\r\nclass ModflowSwt(Package):\r\n \"\"\"\r\n MODFLOW SUB-WT Package Class.\r\n\r\n Parameters\r\n ----------\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to which\r\n this package will be added.\r\n ipakcb : int\r\n A flag that is used to determine if cell-by-cell budget data should be\r\n saved. If ipakcb is non-zero cell-by-cell budget data will be saved.\r\n (default is 0).\r\n iswtoc : int\r\n iswtoc is a flag used to control output of information generated by the\r\n SUB Package. (default is 0).\r\n nystm : int\r\n nsystm is the number of systems of interbeds. (default is 1).\r\n ithk : int\r\n ithk is a flag to determine how thicknesses of compressible sediments\r\n vary in response to changes in saturated thickness. If ithk < 1,\r\n thickness of compressible sediments is constant. If ithk > 0, thickness\r\n of compressible sediments varies in response to changes in saturated\r\n thickness. (default is 1).\r\n ivoid : int\r\n ivoid is a flag to determine how void ratios of compressible sediments\r\n vary in response to changes in saturated thickness. If ivoid < 1, void\r\n ratio will be treated as a constant. If ivoid > 0, void ratio will be\r\n treated as a variable. (default is 0).\r\n nn : int\r\n nn is the number of nodes used to discretize the half space to\r\n approximate the head distributions in systems of delay interbeds.\r\n (default is 20).\r\n istpcs : int\r\n istpcs is a flag to determine how initial preconsolidation stress will\r\n be obtained. If istpcs does not equal 0, an array of offset values will\r\n be read in for each model layer. The offset values will be added to the\r\n initial effective stress to get initial preconsolidation stress. If\r\n istpcs = 0, an array with initial preconsolidation stress values will\r\n be read. (default is 1).\r\n icrcc : int\r\n icrcc is a flag to determine how recompression and compression indices\r\n will be obtained. If ICRCC is not equal to 0, arrays of elastic\r\n specific storage and inelastic skeletal specific storage will be read\r\n for each system of interbeds; the recompression index and compression\r\n index will not be read. If icrcc = 0, arrays of recompression index\r\n and compression index will be read for each system of interbeds;\r\n elastic skeletal specific storage and inelastic skeletal specific\r\n storage will not be read. (default is 0).\r\n lnwt : int or array of ints (nsystm)\r\n lnwt is a one-dimensional array specifying the model layer assignments\r\n for each system of interbeds. (default is 0).\r\n izcfl : int\r\n izcfl is a flag to specify whether or not initial calculated\r\n values of layer-center elevation will be printed. (default is 0).\r\n izcfm : int\r\n izcfm is is a code for the format in which layer-center elevation will\r\n be printed. (default is 0).\r\n iglfl : int\r\n iglfl is a flag to specify whether or not initial calculated values of\r\n geostatic stress will be printed. (default is 0).\r\n iglfm : int\r\n iglfm is a code for the format in which geostatic stress will be\r\n printed. (default is 0).\r\n iestfl : int\r\n iestfl is a flag to specify whether or not initial calculated values of\r\n effective stress will be printed. (default is 0).\r\n iestfm : int\r\n iestfm is a code for the format in which effective stress will be\r\n printed. (default is 0).\r\n ipcsfl : int\r\n ipcsfl is a flag to specify whether or not initial calculated values of\r\n preconsolidation stress will be printed. (default is 0).\r\n ipcsfm : int\r\n ipcsfm is a code for the format in which preconsolidation stress will\r\n be printed. (default is 0).\r\n istfl : int\r\n istfl is a flag to specify whether or not initial equivalent storage\r\n properties will be printed for each system of interbeds. If icrcc is\r\n not equal to 0, the\r\n equivalent storage properties that can be printed are recompression and\r\n compression indices (cr and cc), which are calculated from elastic and\r\n inelastic skeletal specific storage (sske and sskv). If icrcc = 0,\r\n equivalent storage properties that can be printed are elastic and\r\n inelastic skeletal specific storage, which are calculated from the\r\n recompression and compression indices. (default is 0).\r\n istfm : int\r\n istfm is a code for the format in which equivalent storage properties\r\n will be printed. (default is 0).\r\n gl0 : float or array of floats (nrow, ncol)\r\n gl0 is an array specifying the geostatic stress above model layer 1. If\r\n the top of model layer 1 is the land surface, enter values of zero for\r\n this array. (default is 0.).\r\n sgm : float or array of floats (nrow, ncol)\r\n sgm is an array specifying the specific gravity of moist or unsaturated\r\n sediments. (default is 1.7).\r\n sgs : float or array of floats (nrow, ncol)\r\n sgs is an array specifying the specific gravity of saturated sediments.\r\n (default is 2.).\r\n thick : float or array of floats (nsystm, nrow, ncol)\r\n thick is an array specifying the thickness of compressible sediments.\r\n (default is 1.).\r\n sse : float or array of floats (nsystm, nrow, ncol)\r\n sse is an array specifying the initial elastic skeletal specific\r\n storage of compressible beds. sse is not used if icrcc = 0.\r\n (default is 1.).\r\n ssv : float or array of floats (nsystm, nrow, ncol)\r\n ssv is an array specifying the initial inelastic skeletal specific\r\n storage of compressible beds. ssv is not used if icrcc = 0.\r\n (default is 1.).\r\n cr : float or array of floats (nsystm, nrow, ncol)\r\n cr is an array specifying the recompression index of compressible beds.\r\n cr is not used if icrcc is not equal to 0. (default is 0.01).\r\n cc : float or array of floats (nsystm, nrow, ncol)\r\n cc is an array specifying the compression index of compressible beds\r\n cc is not used if icrcc is not equal to 0. (default is 0.25).\r\n void : float or array of floats (nsystm, nrow, ncol)\r\n void is an array specifying the initial void ratio of compressible\r\n beds. (default is 0.82).\r\n sub : float or array of floats (nsystm, nrow, ncol)\r\n sub is an array specifying the initial compaction in each system of\r\n interbeds. Compaction values computed by the package are added to\r\n values in this array so that printed or stored values of compaction and\r\n land subsidence may include previous components. Values in this array\r\n do not affect calculations of storage changes or resulting compaction.\r\n For simulations in which output values will reflect compaction and\r\n subsidence since the start of the simulation, enter zero values for all\r\n elements of this array. (default is 0.).\r\n pcsoff : float or array of floats (nlay, nrow, ncol)\r\n pcsoff is an array specifying the offset from initial effective stress\r\n to initial preconsolidation stress at the bottom of the model layer in\r\n units of height of a column of water. pcsoff is not used if istpcs=0.\r\n (default is 0.).\r\n pcs : float or array of floats (nlay, nrow, ncol)\r\n pcs is an array specifying the initial preconsolidation stress, in\r\n units of height of a column of water, at the bottom of the model layer.\r\n pcs is not used if istpcs is not equal to 0. (default is 0.).\r\n ids16 : list or array of ints (26)\r\n Format codes and unit numbers for swtsidence, compaction by model\r\n layer, compaction by interbed system, vertical displacement,\r\n preconsolidation stress, change in preconsolidation stress, geostatic\r\n stress, change in geostatic stress, effective stress, void ration,\r\n thickness of compressible sediments, and layer-center elevation will be\r\n printed. If ids16 is None and iswtoc>0 then print code 0 will be used\r\n for all data which is output to the binary swtsidence output file\r\n (unit=1054). The 26 entries in ids16 correspond to ifm1, iun1, ifm2,\r\n iun2, ifm3, iun3, ifm4, iun4, ifm5, iun5, ifm6, iun6, ifm7, iun7, ifm8,\r\n iun8, ifm9, iun9, ifm10, iun11, ifm12, iun12, ifm13, and iun13\r\n variables. (default is None).\r\n ids17 : list or array of ints (iswtoc, 30)\r\n Stress period and time step range and print and save flags used to\r\n control printing and saving of information generated by the SUB-WT\r\n Package during program execution. Each row of ids17 corresponds to\r\n isp1, isp2, its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5, ifl6, ifl7,\r\n ifl8, ifl9, ifl10, ifl11, ifl12, ifl13, ifl14, ifl15, ifl16, ifl17,\r\n ifl18, ifl9, ifl20, ifl21, ifl22, ifl23, ifl24, ifl25, and ifl26\r\n variables for iswtoc entries. isp1, isp2, its1, and its2 are stress\r\n period and time step ranges. ifl1 and ifl2 control subsidence printing\r\n and saving. ifl3 and ifl4 control compaction by model layer printing\r\n and saving. ifl5 and ifl6 control compaction by interbed system\r\n printing and saving. ifl7 and ifl8 control vertical displacement\r\n printing and saving. ifl9 and ifl10 control preconsolidation stress\r\n printing and saving. ifl11 and ifl12 control change in preconsolidation\r\n stress printing and saving. ifl13 and ifl14 control geostatic stress\r\n printing and saving. ifl15 and ifl16 control change in geostatic stress\r\n printing and saving. ifl17 and ifl18 control effective stress printing\r\n and saving. ifl19 and ifl20 control change in effective stress printing\r\n and saving. ifl21 and ifl22 control void ratio printing and saving.\r\n ifl23 and ifl24 control compressible bed thickness printing and saving.\r\n ifl25 and ifl26 control layer-center elevation printing and saving.\r\n If ids17 is None and iswtoc>0 then all available subsidence output will\r\n be printed and saved to the binary subsidence output file (unit=1054).\r\n (default is None).\r\n unitnumber : int\r\n File unit number (default is None).\r\n filenames : str or list of str\r\n Filenames to use for the package and the output files. If\r\n filenames=None the package name will be created using the model name\r\n and package extension and the cbc output name and other swt output\r\n files will be created using the model name and .cbc and swt output\r\n extensions (for example, modflowtest.cbc), if ipakcbc and other\r\n swt output files (dataset 16) are numbers greater than zero.\r\n If a single string is passed the package name will be set to the\r\n string and other swt output files will be set to the model name with\r\n the appropriate output file extensions. To define the names for all\r\n package files (input and output) the length of the list of strings\r\n should be 15.\r\n Default is None.\r\n\r\n Attributes\r\n ----------\r\n\r\n Methods\r\n -------\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n Parameters are supported in Flopy only when reading in existing models.\r\n Parameter values are converted to native values in Flopy and the\r\n connection to \"parameters\" is thus nonexistent. Parameters are not\r\n supported in the SUB-WT Package.\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> m = flopy.modflow.Modflow()\r\n >>> swt = flopy.modflow.ModflowSwt(m)\r\n\r\n \"\"\"\r\n\r\n def write_file(self,f=None):\r\n \"\"\"\r\n Write the package file.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\r\n # Open file for writing\r\n if f is None:\r\n f = open(self.fn_path, 'w')\r\n # First line: heading\r\n f.write('{}\\n'.format(self.heading))\r\n # write dataset 1\r\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\r\n self.nsystm, self.ithk,\r\n self.ivoid, self.istpcs,\r\n self.icrcc))\r\n # write dataset 2\r\n t = self.lnwt.array\r\n for tt in t:\r\n f.write('{} '.format(tt + 1))\r\n f.write('\\n')\r\n\r\n # write dataset 3\r\n f.write(\r\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\r\n self.iglfl, self.iglfm,\r\n self.iestfl, self.iestfm,\r\n self.ipcsfl, self.ipcsfm,\r\n self.istfl, self.istfm))\r\n\r\n # write dataset 4\r\n f.write(self.gl0.get_file_entry())\r\n\r\n # write dataset 5\r\n f.write(self.sgm.get_file_entry())\r\n\r\n # write dataset 6\r\n f.write(self.sgs.get_file_entry())\r\n\r\n # write datasets 7 to 13\r\n for k in range(self.nsystm):\r\n f.write(self.thick[k].get_file_entry())\r\n if self.icrcc != 0:\r\n f.write(self.sse[k].get_file_entry())\r\n f.write(self.ssv[k].get_file_entry())\r\n else:\r\n f.write(self.cr[k].get_file_entry())\r\n f.write(self.cc[k].get_file_entry())\r\n f.write(self.void[k].get_file_entry())\r\n f.write(self.sub[k].get_file_entry())\r\n\r\n # write datasets 14 and 15\r\n for k in range(nlay):\r\n if self.istpcs != 0:\r\n f.write(self.pcsoff[k].get_file_entry())\r\n else:\r\n f.write(self.pcs[k].get_file_entry())\r\n\r\n # write dataset 16 and 17\r\n if self.iswtoc > 0:\r\n # dataset 16\r\n for i in self.ids16:\r\n f.write('{} '.format(i))\r\n f.write(' #dataset 16\\n')\r\n\r\n # dataset 17\r\n for k in range(self.iswtoc):\r\n t = self.ids17[k, :].copy()\r\n t[0:4] += 1\r\n for i in t:\r\n f.write('{} '.format(i))\r\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\r\n\r\n # close swt file\r\n f.close()\r\n\r\n def __init__(self, model, ipakcb=None, iswtoc=0, nsystm=1, ithk=0, ivoid=0,\r\n istpcs=1, icrcc=0, lnwt=0, izcfl=0, izcfm=0, iglfl=0, iglfm=0,\r\n iestfl=0, iestfm=0, ipcsfl=0, ipcsfm=0, istfl=0, istfm=0,\r\n gl0=0., sgm=1.7, sgs=2., thick=1., sse=1., ssv=1.,\r\n cr=0.01, cc=0.25, void=0.82, sub=0., pcsoff=0., pcs=0.,\r\n ids16=None, ids17=None,\r\n extension='swt', unitnumber=None, filenames=None):\r\n \"\"\"\r\n Package constructor.\r\n\r\n \"\"\"\r\n # set default unit number of one is not specified\r\n if unitnumber is None:\r\n unitnumber = ModflowSwt.defaultunit()\r\n\r\n # set filenames\r\n if filenames is None:\r\n filenames = [None for x in range(15)]\r\n elif isinstance(filenames, str):\r\n filenames = [filenames] + [None for x in range(14)]\r\n elif isinstance(filenames, list):\r\n if len(filenames) < 15:\r\n n = 15 - len(filenames) + 1\r\n filenames = filenames + [None for x in range(n)]\r\n\r\n # update external file information with cbc output, if necessary\r\n if ipakcb is not None:\r\n fname = filenames[1]\r\n model.add_output_file(ipakcb, fname=fname,\r\n package=ModflowSwt.ftype())\r\n else:\r\n ipakcb = 0\r\n\r\n item16_extensions = [\"swt_subsidence.hds\", \"swt_total_comp.hds\",\r\n \"swt_inter_comp.hds\", \"swt_vert_disp.hds\",\r\n \"swt_precon_stress.hds\",\r\n \"swt_precon_stress_delta.hds\",\r\n \"swt_geostatic_stress.hds\",\r\n \"swt_geostatic_stress_delta.hds\",\r\n \"swt_eff_stress.hds\", \"swt_eff_stress_delta.hds\",\r\n \"swt_void_ratio.hds\", \"swt_thick.hds\",\r\n \"swt_lay_center.hds\"]\r\n item16_units = [2052 + i for i in range(len(item16_extensions))]\r\n\r\n if iswtoc > 0:\r\n idx = 0\r\n for k in range(1, 26, 2):\r\n ext = item16_extensions[idx]\r\n if ids16 is None:\r\n iu = item16_units[idx]\r\n else:\r\n iu = ids16[k]\r\n fname = filenames[idx + 2]\r\n model.add_output_file(iu, fname=fname, extension=ext,\r\n package=ModflowSwt.ftype())\r\n idx += 1\r\n\r\n extensions = [extension]\r\n name = [ModflowSwt.ftype()]\r\n units = [unitnumber]\r\n extra = ['']\r\n\r\n # set package name\r\n fname = [filenames[0]]\r\n\r\n # Call ancestor's init to set self.parent, extension, name and\r\n # unit number\r\n Package.__init__(self, model, extension=extensions, name=name,\r\n unit_number=units, extra=extra, filenames=fname)\r\n\r\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\r\n\r\n self.heading = '# {} package for '.format(self.name[0]) + \\\r\n ' {}, '.format(model.version_types[model.version]) + \\\r\n 'generated by Flopy.'\r\n self.url = 'swt.htm'\r\n\r\n self.ipakcb = ipakcb\r\n self.iswtoc = iswtoc\r\n\r\n self.nsystm = nsystm\r\n self.ithk = ithk\r\n self.ivoid = ivoid\r\n self.istpcs = istpcs\r\n self.icrcc = icrcc\r\n\r\n self.lnwt = Util2d(model, (nsystm,), np.int32, lnwt, name='lnwt')\r\n\r\n self.izcfl = izcfl\r\n self.izcfm = izcfm\r\n self.iglfl = iglfl\r\n self.iglfm = iglfm\r\n self.iestfl = iestfl\r\n self.iestfm = iestfm\r\n self.ipcsfl = ipcsfl\r\n self.ipcsfm = ipcsfm\r\n self.istfl = istfl\r\n self.istfm = istfm\r\n\r\n self.gl0 = Util2d(model, (nrow, ncol), np.float32, gl0, name='gl0')\r\n self.sgm = Util2d(model, (nrow, ncol), np.float32, sgm, name='sgm')\r\n self.sgs = Util2d(model, (nrow, ncol), np.float32, sgs, name='sgs')\r\n\r\n # interbed data\r\n names = ['thick system ' for n in range(nsystm)]\r\n self.thick = Util3d(model, (nsystm, nrow, ncol), np.float32, thick,\r\n name=names,\r\n locat=self.unit_number[0])\r\n names = ['void system ' for n in range(nsystm)]\r\n self.void = Util3d(model, (nsystm, nrow, ncol), np.float32, void,\r\n name=names,\r\n locat=self.unit_number[0])\r\n names = ['sub system ' for n in range(nsystm)]\r\n self.sub = Util3d(model, (nsystm, nrow, ncol), np.float32, sub,\r\n name=names,\r\n locat=self.unit_number[0])\r\n if icrcc != 0:\r\n names = ['sse system ' for n in range(nsystm)]\r\n self.sse = Util3d(model, (nsystm, nrow, ncol), np.float32, sse,\r\n name=names,\r\n locat=self.unit_number[0])\r\n names = ['ssc system ' for n in range(nsystm)]\r\n self.ssv = Util3d(model, (nsystm, nrow, ncol), np.float32, ssv,\r\n name=names,\r\n locat=self.unit_number[0])\r\n self.cr = None\r\n self.cc = None\r\n else:\r\n self.sse = None\r\n self.ssv = None\r\n names = ['cr system ' for n in range(nsystm)]\r\n self.cr = Util3d(model, (nsystm, nrow, ncol), np.float32, cr,\r\n name=names,\r\n locat=self.unit_number[0])\r\n names = ['cc system ' for n in range(nsystm)]\r\n self.cc = Util3d(model, (nsystm, nrow, ncol), np.float32, cc,\r\n name=names,\r\n locat=self.unit_number[0])\r\n\r\n # layer data\r\n if istpcs != 0:\r\n self.pcsoff = Util3d(model, (nlay, nrow, ncol), np.float32, pcsoff,\r\n name='pcsoff', locat=self.unit_number[0])\r\n self.pcs = None\r\n else:\r\n self.pcsoff = None\r\n self.pcs = Util3d(model, (nlay, nrow, ncol), np.float32, pcs,\r\n name='pcs', locat=self.unit_number[0])\r\n\r\n # output data\r\n if iswtoc > 0:\r\n if ids16 is None:\r\n self.ids16 = np.zeros((26), dtype=np.int32)\r\n ui = 0\r\n for i in range(1, 26, 2):\r\n self.ids16[i] = item16_units[ui]\r\n ui += 1\r\n else:\r\n if isinstance(ids16, list):\r\n ds16 = np.array(ids16)\r\n assert len(ids16) == 26\r\n self.ids16 = ids16\r\n\r\n if ids17 is None:\r\n ids17 = np.ones((30), dtype=np.int32)\r\n ids17[0] = 0\r\n ids17[2] = 0\r\n ids17[1] = 9999\r\n ids17[3] = 9999\r\n self.ids17 = np.atleast_2d(ids17)\r\n else:\r\n if isinstance(ids17, list):\r\n ids17 = np.atleast_2d(np.array(ids17))\r\n assert ids17.shape[1] == 30\r\n self.ids17 = ids17\r\n\r\n # add package to model\r\n self.parent.add_package(self)\r\n\r\n @staticmethod\r\n def load(f, model, ext_unit_dict=None):\r\n \"\"\"\r\n Load an existing package.\r\n\r\n Parameters\r\n ----------\r\n f : filename or file handle\r\n File to load.\r\n model : model object\r\n The model object (of type :class:`flopy.modflow.mf.Modflow`) to\r\n which this package will be added.\r\n ext_unit_dict : dictionary, optional\r\n If the arrays in the file are specified using EXTERNAL,\r\n or older style array control records, then `f` should be a file\r\n handle. In this case ext_unit_dict is required, which can be\r\n constructed using the function\r\n :class:`flopy.utils.mfreadnam.parsenamefile`.\r\n\r\n Returns\r\n -------\r\n swt : ModflowSwt object\r\n\r\n Examples\r\n --------\r\n\r\n >>> import flopy\r\n >>> m = flopy.modflow.Modflow()\r\n >>> swt = flopy.modflow.ModflowSwt.load('test.swt', m)\r\n\r\n \"\"\"\r\n\r\n if model.verbose:\r\n sys.stdout.write('loading swt package file...\\n')\r\n\r\n openfile = not hasattr(f, 'read')\r\n if openfile:\r\n filename = f\r\n f = open(filename, 'r')\r\n\r\n # dataset 0 -- header\r\n while True:\r\n line = f.readline()\r\n if line[0] != '#':\r\n break\r\n # determine problem dimensions\r\n nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()\r\n\r\n # read dataset 1\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 1\\n')\r\n t = line.strip().split()\r\n ipakcb, iswtoc, nsystm, ithk, ivoid, istpcs, icrcc = int(t[0]), \\\r\n int(t[1]), \\\r\n int(t[2]), \\\r\n int(t[3]), \\\r\n int(t[4]), \\\r\n int(t[5]), \\\r\n int(t[6])\r\n\r\n # if ipakcb > 0:\r\n # ipakcb = 53\r\n\r\n # read dataset 2\r\n lnwt = None\r\n if nsystm > 0:\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 2\\n')\r\n lnwt = np.empty((nsystm), dtype=np.int32)\r\n lnwt = read1d(f, lnwt) - 1\r\n\r\n # read dataset 3\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 3\\n')\r\n line = f.readline()\r\n t = line.strip().split()\r\n iizcfl, izcfm, iglfl, iglfm, iestfl, \\\r\n iestfm, ipcsfl, ipcsfm, istfl, istfm = int(t[0]), int(t[1]), \\\r\n int(t[2]), int(t[3]), \\\r\n int(t[4]), int(t[5]), \\\r\n int(t[6]), int(t[7]), \\\r\n int(t[8]), int(t[9])\r\n\r\n # read dataset 4\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 4')\r\n gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, 'gl0',\r\n ext_unit_dict)\r\n\r\n # read dataset 5\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 5')\r\n sgm = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgm',\r\n ext_unit_dict)\r\n\r\n # read dataset 6\r\n if model.verbose:\r\n sys.stdout.write(' loading swt dataset 6')\r\n sgs = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgs',\r\n ext_unit_dict)\r\n\r\n # read datasets 7 to 13\r\n thick = [0] * nsystm\r\n void = [0] * nsystm\r\n sub = [0] * nsystm\r\n if icrcc == 0:\r\n sse = None\r\n ssv = None\r\n cr = [0] * nsystm\r\n cc = [0] * nsystm\r\n else:\r\n sse = [0] * nsystm\r\n ssv = [0] * nsystm\r\n cr = None\r\n cc = None\r\n\r\n for k in range(nsystm):\r\n kk = lnwt[k] + 1\r\n # thick\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 7 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'thick layer {}'.format(kk),\r\n ext_unit_dict)\r\n thick[k] = t\r\n if icrcc != 0:\r\n # sse\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 8 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'sse layer {}'.format(kk), ext_unit_dict)\r\n sse[k] = t\r\n # ssv\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 9 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'sse layer {}'.format(kk), ext_unit_dict)\r\n ssv[k] = t\r\n else:\r\n # cr\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 10 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'cr layer {}'.format(kk), ext_unit_dict)\r\n cr[k] = t\r\n # cc\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 11 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'cc layer {}'.format(kk), ext_unit_dict)\r\n cc[k] = t\r\n # void\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 12 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'void layer {}'.format(kk), ext_unit_dict)\r\n void[k] = t\r\n # sub\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 13 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'sub layer {}'.format(kk), ext_unit_dict)\r\n sub[k] = t\r\n\r\n # dataset 14 and 15\r\n if istpcs != 0:\r\n pcsoff = [0] * nlay\r\n pcs = None\r\n else:\r\n pcsoff = None\r\n pcs = [0] * nlay\r\n for k in range(nlay):\r\n if istpcs != 0:\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 14 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'pcsoff layer {}'.format(k + 1), ext_unit_dict)\r\n pcsoff[k] = t\r\n else:\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\r\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\r\n 'pcs layer {}'.format(k + 1), ext_unit_dict)\r\n pcs[k] = t\r\n\r\n ids16 = None\r\n ids17 = None\r\n if iswtoc > 0:\r\n # dataset 16\r\n if model.verbose:\r\n sys.stdout.write(\r\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\r\n ids16 = np.empty(26, dtype=np.int32)\r\n ids16 = read1d(f, ids16)\r\n #for k in range(1, 26, 2):\r\n # model.add_pop_key_list(ids16[k])\r\n # ids16[k] = 2054 # all sub-wt data sent to unit 2054\r\n # dataset 17\r\n ids17 = [0] * iswtoc\r\n for k in range(iswtoc):\r\n if model.verbose:\r\n msg = 2 * ' ' + 'loading swt dataset 17 for ' + \\\r\n 'iswtoc {}\\n'.format(k + 1)\r\n sys.stdout.write(msg)\r\n t = np.empty(30, dtype=np.int32)\r\n t = read1d(f, t)\r\n t[0:4] -= 1\r\n ids17[k] = t\r\n\r\n if openfile:\r\n f.close()\r\n\r\n # determine specified unit number\r\n unitnumber = None\r\n filenames = [None for x in range(15)]\r\n if ext_unit_dict is not None:\r\n unitnumber, filenames[0] = \\\r\n model.get_ext_dict_attr(ext_unit_dict,\r\n filetype=ModflowSwt.ftype())\r\n if ipakcb > 0:\r\n iu, filenames[1] = \\\r\n model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)\r\n\r\n if iswtoc > 0:\r\n ipos = 2\r\n for k in range(1, 26, 2):\r\n unit = ids16[k]\r\n if unit > 0:\r\n iu, filenames[ipos] = \\\r\n model.get_ext_dict_attr(ext_unit_dict,\r\n unit=unit)\r\n model.add_pop_key_list(unit)\r\n ipos += 1\r\n\r\n # create sub-wt instance\r\n swt = ModflowSwt(model, ipakcb=ipakcb, iswtoc=iswtoc, nsystm=nsystm,\r\n ithk=ithk, ivoid=ivoid, istpcs=istpcs,\r\n icrcc=icrcc, lnwt=lnwt, izcfl=iizcfl, izcfm=izcfm,\r\n iglfl=iglfl, iglfm=iglfm, iestfl=iestfl,\r\n iestfm=iestfm, ipcsfl=ipcsfl, ipcsfm=ipcsfm,\r\n istfl=istfl, istfm=istfm, gl0=gl0, sgm=sgm,\r\n sgs=sgs, thick=thick, sse=sse, ssv=ssv, cr=cr, cc=cc,\r\n void=void, sub=sub, pcsoff=pcsoff,\r\n pcs=pcs, ids16=ids16, ids17=ids17,\r\n unitnumber=unitnumber, filenames=filenames)\r\n\r\n # return sut-wt instance\r\n return swt\r\n\r\n @staticmethod\r\n def ftype():\r\n return 'SWT'\r\n\r\n @staticmethod\r\n def defaultunit():\r\n return 35\r\n", "import sys, inspect, copy, os\r\nimport numpy as np\r\nfrom collections import OrderedDict\r\nfrom ..data.mfstructure import DatumType\r\nfrom .mfdatastorage import DataStorage, DataStructureType, DataStorageType\r\nfrom ...utils.datautil import MultiList\r\nfrom ..mfbase import ExtFileAction, MFDataException\r\nfrom ..utils.mfenums import DiscretizationType\r\nfrom ...datbase import DataType\r\nfrom .mffileaccess import MFFileAccessArray\r\nfrom .mfdata import MFMultiDimVar, MFTransient\r\n\r\n\r\nclass MFArray(MFMultiDimVar):\r\n \"\"\"\r\n Provides an interface for the user to access and update MODFLOW array data.\r\n\r\n Parameters\r\n ----------\r\n sim_data : MFSimulationData\r\n data contained in the simulation\r\n structure : MFDataStructure\r\n describes the structure of the data\r\n data : list or ndarray\r\n actual data\r\n enable : bool\r\n enable/disable the array\r\n path : tuple\r\n path in the data dictionary to this MFArray\r\n dimensions : MFDataDimensions\r\n dimension information related to the model, package, and array\r\n\r\n Attributes\r\n ----------\r\n data_type : DataType\r\n type of data stored in the scalar\r\n plotable : bool\r\n if the scalar is plotable\r\n dtype : numpy.dtype\r\n the scalar's numpy data type\r\n data : variable\r\n calls get_data with default parameters\r\n\r\n Methods\r\n -------\r\n new_simulation : (sim_data : MFSimulationData)\r\n initialize MFArray object for a new simulation\r\n supports_layered : bool\r\n Returns whether this MFArray supports layered data\r\n set_layered_data : (layered_data : bool)\r\n Sets whether this MFArray supports layered data\r\n store_as_external_file : (external_file_path : string, layer_num : int,\r\n replace_existing_external : bool)\r\n Stores data from layer \"layer_num\" to an external file at\r\n \"external_file_path\". For unlayered data do not pass in \"layer\".\r\n If layer is not specified all layers will be stored with each layer\r\n as a separate file. If replace_existing_external is set to False,\r\n this method will not do anything if the data is already in an\r\n external file.\r\n store_as_internal_array : (multiplier : float, layer_num : int)\r\n Stores data from layer \"layer_num\" internally within the MODFLOW file\r\n with a multiplier \"multiplier\". For unlayered data do not pass in\r\n \"layer\".\r\n has_data : (layer_num : int) : bool\r\n Returns whether layer \"layer_num\" has any data associated with it.\r\n For unlayered data do not pass in \"layer\".\r\n get_data : (layer_num : int) : ndarray\r\n Returns the data associated with layer \"layer_num\". If \"layer_num\" is\r\n None, returns all data.\r\n set_data : (data : ndarray/list, multiplier : float, layer_num : int)\r\n Sets the contents of the data at layer \"layer_num\" to \"data\" with\r\n multiplier \"multiplier\". For unlayered\r\n data do not pass in \"layer_num\". data can have the following formats:\r\n 1) ndarray - numpy ndarray containing all of the data\r\n 2) [data] - python list containing all of the data\r\n 3) val - a single constant value to be used for all of the data\r\n 4) {'filename':filename, 'factor':fct, 'iprn':print, 'data':data} -\r\n dictionary defining external file information\r\n 5) {'data':data, 'factor':fct, 'iprn':print) - dictionary defining\r\n internal information. Data that is layered can also be set by defining\r\n a list with a length equal to the number of layers in the model.\r\n Each layer in the list contains the data as defined in the\r\n formats above:\r\n [layer_1_val, [layer_2_array_vals],\r\n {'filename':file_with_layer_3_data, 'factor':fct, 'iprn':print}]\r\n\r\n load : (first_line : string, file_handle : file descriptor,\r\n block_header : MFBlockHeader, pre_data_comments : MFComment) :\r\n tuple (bool, string)\r\n Loads data from first_line (the first line of data) and open file\r\n file_handle which is pointing to the second line of data. Returns a\r\n tuple with the first item indicating whether all data was read and\r\n the second item being the last line of text read from the file.\r\n get_file_entry : (layer : int) : string\r\n Returns a string containing the data in layer \"layer\". For unlayered\r\n data do not pass in \"layer\".\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n\r\n Examples\r\n --------\r\n\r\n\r\n \"\"\"\r\n def __init__(self, sim_data, model_or_sim, structure, data=None,\r\n enable=True, path=None, dimensions=None):\r\n super(MFArray, self).__init__(sim_data, model_or_sim, structure, enable, path,\r\n dimensions)\r\n if self.structure.layered:\r\n try:\r\n self._layer_shape = self.layer_shape()\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'resolving layer dimensions',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n else:\r\n self._layer_shape = (1,)\r\n if self._layer_shape[0] is None:\r\n self._layer_shape = (1,)\r\n self._data_type = structure.data_item_structures[0].type\r\n try:\r\n shp_ml = MultiList(shape=self._layer_shape)\r\n self._data_storage = self._new_storage(shp_ml.get_total_size()\r\n != 1)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(structure.get_model(),\r\n structure.get_package(), path,\r\n 'creating storage', structure.name,\r\n inspect.stack()[0][3],\r\n type_, value_, traceback_, None,\r\n sim_data.debug, ex)\r\n self._last_line_info = []\r\n if self.structure.type == DatumType.integer:\r\n multiplier = [1]\r\n else:\r\n multiplier = [1.0]\r\n if data is not None:\r\n try:\r\n self._get_storage_obj().set_data(data, key=self._current_key,\r\n multiplier=multiplier)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n def __setattr__(self, name, value):\r\n if name == '__setstate__':\r\n raise AttributeError(name)\r\n elif name == 'fname':\r\n self._get_storage_obj().layer_storage.first_item().fname = value\r\n elif name == 'factor':\r\n self._get_storage_obj().layer_storage.first_item().factor = value\r\n elif name == 'iprn':\r\n self._get_storage_obj().layer_storage.first_item().iprn = value\r\n elif name == 'binary':\r\n self._get_storage_obj().layer_storage.first_item().binary = value\r\n else:\r\n super(MFArray, self).__setattr__(name, value)\r\n\r\n def __getitem__(self, k):\r\n if isinstance(k, int):\r\n k = (k,)\r\n storage = self._get_storage_obj()\r\n if storage.layered and (isinstance(k, tuple) or isinstance(k, list)):\r\n if not storage.layer_storage.in_shape(k):\r\n comment = 'Could not retrieve layer {} of \"{}\". There' \\\r\n 'are only {} layers available' \\\r\n '.'.format(k, self.structure.name,\r\n len(storage.layer_storage))\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, comment,\r\n self._simulation_data.debug)\r\n # for layered data treat k as layer number(s)\r\n return storage.layer_storage[k]\r\n else:\r\n # for non-layered data treat k as an array/list index of the data\r\n if isinstance(k, int):\r\n try:\r\n if len(self._get_data(apply_mult=True).shape) == 1:\r\n return self._get_data(apply_mult=True)[k]\r\n elif self._get_data(apply_mult=True).shape[0] == 1:\r\n return self._get_data(apply_mult=True)[0, k]\r\n elif self._get_data(apply_mult=True).shape[1] == 1:\r\n return self._get_data(apply_mult=True)[k, 0]\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n comment = 'Unable to resolve index \"{}\" for ' \\\r\n 'multidimensional data.'.format(k)\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, comment,\r\n self._simulation_data.debug)\r\n else:\r\n try:\r\n if isinstance(k, tuple):\r\n if len(k) == 3:\r\n return self._get_data(apply_mult=True)[k[0], k[1],\r\n k[2]]\r\n elif len(k) == 2:\r\n return self._get_data(apply_mult=True)[k[0], k[1]]\r\n if len(k) == 1:\r\n return self._get_data(apply_mult=True)[k]\r\n else:\r\n return self._get_data(apply_mult=True)[(k,)]\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n def __setitem__(self, k, value):\r\n storage = self._get_storage_obj()\r\n self._resync()\r\n if storage.layered:\r\n if isinstance(k, int):\r\n k = (k,)\r\n # for layered data treat k as a layer number\r\n try:\r\n storage.layer_storage[k]._set_data(value)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n else:\r\n try:\r\n # for non-layered data treat k as an array/list index of the data\r\n a = self._get_data()\r\n a[k] = value\r\n a = a.astype(self._get_data().dtype)\r\n layer_storage = storage.layer_storage.first_item()\r\n self._get_storage_obj()._set_data(a, key=self._current_key,\r\n multiplier=layer_storage.factor)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n @property\r\n def data_type(self):\r\n if self.structure.layered:\r\n return DataType.array3d\r\n else:\r\n return DataType.array2d\r\n\r\n @property\r\n def dtype(self):\r\n return self._get_data().dtype.type\r\n\r\n @property\r\n def plotable(self):\r\n if self.model is None:\r\n return False\r\n else:\r\n return True\r\n\r\n def new_simulation(self, sim_data):\r\n super(MFArray, self).new_simulation(sim_data)\r\n self._data_storage = self._new_storage(False)\r\n self._layer_shape = (1,)\r\n\r\n def supports_layered(self):\r\n try:\r\n model_grid = self._data_dimensions.get_model_grid()\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting model grid',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n return self.structure.layered and \\\r\n model_grid.grid_type() != DiscretizationType.DISU\r\n\r\n def set_layered_data(self, layered_data):\r\n if layered_data is True and self.structure.layered is False:\r\n if self._data_dimensions.get_model_grid().grid_type() == \\\r\n DiscretizationType.DISU:\r\n comment = 'Layered option not available for unstructured ' \\\r\n 'grid. {}'.format(self._path)\r\n else:\r\n comment = 'Data \"{}\" does not support layered option. ' \\\r\n '{}'.format(self._data_name, self._path)\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting layered data', self.structure.name,\r\n inspect.stack()[0][3], type_, value_,\r\n traceback_, comment,\r\n self._simulation_data.debug)\r\n self._get_storage_obj().layered = layered_data\r\n\r\n def make_layered(self):\r\n if self.supports_layered():\r\n try:\r\n self._get_storage_obj().make_layered()\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'making data layered',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n else:\r\n if self._data_dimensions.get_model_grid().grid_type() == \\\r\n DiscretizationType.DISU:\r\n comment = 'Layered option not available for unstructured ' \\\r\n 'grid. {}'.format(self._path)\r\n else:\r\n comment = 'Data \"{}\" does not support layered option. ' \\\r\n '{}'.format(self._data_name, self._path)\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'converting data to layered',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_, value_,\r\n traceback_, comment,\r\n self._simulation_data.debug)\r\n\r\n def store_as_external_file(self, external_file_path, layer=None,\r\n binary=False,\r\n replace_existing_external=True):\r\n storage = self._get_storage_obj()\r\n if storage is None:\r\n self._set_storage_obj(self._new_storage(False, True))\r\n storage = self._get_storage_obj()\r\n # build list of layers\r\n if layer is None:\r\n layer_list = []\r\n for index in range(0, storage.layer_storage.get_total_size()):\r\n if replace_existing_external or \\\r\n storage.layer_storage[index].data_storage_type == \\\r\n DataStorageType.internal_array:\r\n layer_list.append(index)\r\n else:\r\n if replace_existing_external or \\\r\n storage.layer_storage[layer].data_storage_type == \\\r\n DataStorageType.internal_array:\r\n layer_list = [layer]\r\n else:\r\n layer_list = []\r\n\r\n # store data from each layer in a separate file\r\n for current_layer in layer_list:\r\n # determine external file name for layer\r\n if len(layer_list) > 0:\r\n fname, ext = os.path.splitext(external_file_path)\r\n if len(layer_list) == 1:\r\n file_path = '{}{}'.format(fname, ext)\r\n else:\r\n file_path = '{}_layer{}{}'.format(fname, current_layer + 1,\r\n ext)\r\n else:\r\n file_path = external_file_path\r\n if isinstance(current_layer, int):\r\n current_layer = (current_layer,)\r\n # get the layer's data\r\n data = self._get_data(current_layer, True)\r\n if data is None:\r\n # do not write empty data to an external file\r\n continue\r\n if isinstance(data, str) and self._tas_info(data)[0] is not \\\r\n None:\r\n # data must not be time array series information\r\n continue\r\n if storage.get_data_dimensions(current_layer)[0] == -9999:\r\n # data must have well defined dimensions to make external\r\n continue\r\n try:\r\n # store layer's data in external file\r\n factor = storage.layer_storage[current_layer].factor\r\n external_data = {'filename': file_path,\r\n 'data': self._get_data(current_layer, True),\r\n 'factor': factor,\r\n 'binary': binary}\r\n self._set_data(external_data, layer=current_layer)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'storing data in external file '\r\n '{}'.format(external_file_path),\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n def has_data(self, layer=None):\r\n storage = self._get_storage_obj()\r\n if storage is None:\r\n return False\r\n if isinstance(layer, int):\r\n layer = (layer,)\r\n try:\r\n return storage.has_data(layer)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'checking for data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n\r\n @property\r\n def data(self):\r\n return self._get_data()\r\n\r\n def get_data(self, layer=None, apply_mult=False, **kwargs):\r\n return self._get_data(layer, apply_mult, **kwargs)\r\n\r\n def _get_data(self, layer=None, apply_mult=False, **kwargs):\r\n if self._get_storage_obj() is None:\r\n self._data_storage = self._new_storage(False)\r\n if isinstance(layer, int):\r\n layer = (layer,)\r\n storage = self._get_storage_obj()\r\n if storage is not None:\r\n try:\r\n data = storage.get_data(layer, apply_mult)\r\n if 'array' in kwargs and kwargs['array'] \\\r\n and isinstance(self, MFTransientArray):\r\n data = np.expand_dims(data, 0)\r\n return data\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n return None\r\n\r\n def set_data(self, data, multiplier=None, layer=None):\r\n self._set_data(data, multiplier, layer)\r\n\r\n def _set_data(self, data, multiplier=None, layer=None):\r\n self._resync()\r\n if self._get_storage_obj() is None:\r\n self._data_storage = self._new_storage(False)\r\n if multiplier is None:\r\n multiplier = [self._get_storage_obj().get_default_mult()]\r\n if isinstance(layer, int):\r\n layer = (layer,)\r\n if isinstance(data, str):\r\n # check to see if this is a time series array\r\n tas_name, tas_label = self._tas_info(data)\r\n if tas_name is not None:\r\n # verify and save as time series array\r\n self._get_storage_obj().set_tas(tas_name, tas_label,\r\n self._current_key)\r\n return\r\n\r\n storage = self._get_storage_obj()\r\n if self.structure.name == 'aux' and layer is None:\r\n if isinstance(data, dict):\r\n aux_data = copy.deepcopy(data['data'])\r\n else:\r\n aux_data = data\r\n # make a list out of a single item\r\n if isinstance(aux_data, int) or \\\r\n isinstance(aux_data, float) or \\\r\n isinstance(aux_data, str):\r\n aux_data = [[aux_data]]\r\n # handle special case of aux variables in an array\r\n self.layered = True\r\n aux_var_names = self._data_dimensions.\\\r\n package_dim.get_aux_variables()\r\n if len(aux_data) == len(aux_var_names[0]) - 1:\r\n for layer, aux_var_data in enumerate(aux_data):\r\n if layer > 0 and \\\r\n layer >= storage.layer_storage.get_total_size():\r\n storage.add_layer()\r\n if isinstance(data, dict):\r\n # put layer data back in dictionary\r\n layer_data = data\r\n layer_data['data'] = aux_var_data\r\n else:\r\n layer_data = aux_var_data\r\n try:\r\n storage.set_data(layer_data, [layer], multiplier,\r\n self._current_key)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n else:\r\n message = 'Unable to set data for aux variable. ' \\\r\n 'Expected {} aux variables but got ' \\\r\n '{}.'.format(len(aux_var_names[0]),\r\n len(data))\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(\r\n self._data_dimensions.structure.get_model(),\r\n self._data_dimensions.structure.get_package(),\r\n self._data_dimensions.structure.path,\r\n 'setting aux variables',\r\n self._data_dimensions.structure.name,\r\n inspect.stack()[0][3], type_, value_, traceback_,\r\n message, self._simulation_data.debug)\r\n else:\r\n try:\r\n storage.set_data(data, layer, multiplier,\r\n key=self._current_key)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'setting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n self._layer_shape = storage.layer_storage.list_shape\r\n\r\n def load(self, first_line, file_handle, block_header,\r\n pre_data_comments=None, external_file_info=None):\r\n super(MFArray, self).load(first_line, file_handle, block_header,\r\n pre_data_comments=None,\r\n external_file_info=None)\r\n self._resync()\r\n if self.structure.layered:\r\n try:\r\n model_grid = self._data_dimensions.get_model_grid()\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting model grid',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n if self._layer_shape[-1] != model_grid.num_layers():\r\n if model_grid.grid_type() == DiscretizationType.DISU:\r\n self._layer_shape = (1,)\r\n else:\r\n self._layer_shape = (model_grid.num_layers(),)\r\n if self._layer_shape[-1] is None:\r\n self._layer_shape = (1,)\r\n shape_ml = MultiList(shape=self._layer_shape)\r\n self._set_storage_obj(self._new_storage(\r\n shape_ml.get_total_size() != 1, True))\r\n file_access = MFFileAccessArray(self.structure, self._data_dimensions,\r\n self._simulation_data, self._path,\r\n self._current_key)\r\n storage = self._get_storage_obj()\r\n self._layer_shape, return_val = file_access.load_from_package(\r\n first_line, file_handle, self._layer_shape, storage,\r\n self._keyword, pre_data_comments=None)\r\n if external_file_info is not None:\r\n storage.point_to_existing_external_file(\r\n external_file_info, storage.layer_storage.get_total_size() - 1)\r\n\r\n return return_val\r\n\r\n def _is_layered_aux(self):\r\n # determine if this is the special aux variable case\r\n if self.structure.name.lower() == 'aux' and \\\r\n self._get_storage_obj().layered:\r\n return True\r\n else:\r\n return False\r\n\r\n def get_file_entry(self, layer=None,\r\n ext_file_action=ExtFileAction.copy_relative_paths):\r\n return self._get_file_entry(layer, ext_file_action)\r\n\r\n def _get_file_entry(self, layer=None,\r\n ext_file_action=ExtFileAction.copy_relative_paths):\r\n if isinstance(layer, int):\r\n layer = (layer,)\r\n data_storage = self._get_storage_obj()\r\n if data_storage is None or \\\r\n data_storage.layer_storage.get_total_size() == 0 \\\r\n or not data_storage.has_data():\r\n return ''\r\n\r\n layered_aux = self._is_layered_aux()\r\n\r\n # prepare indent\r\n indent = self._simulation_data.indent_string\r\n shape_ml = MultiList(shape=self._layer_shape)\r\n if shape_ml.get_total_size() == 1:\r\n data_indent = indent\r\n else:\r\n data_indent = '{}{}'.format(indent,\r\n self._simulation_data.indent_string)\r\n\r\n file_entry_array = []\r\n if data_storage.data_structure_type == DataStructureType.scalar:\r\n # scalar data, like in the case of a time array series gets written\r\n # on a single line\r\n try:\r\n data = data_storage.get_data()\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n if self.structure.data_item_structures[0].numeric_index or \\\r\n self.structure.data_item_structures[0].is_cellid:\r\n # for cellid and numeric indices convert from 0 base to 1 based\r\n data = abs(data) + 1\r\n file_entry_array.append('{}{}{}{}\\n'.format(indent,\r\n self.structure.name,\r\n indent,\r\n data))\r\n elif data_storage.layered:\r\n if not layered_aux:\r\n if not self.structure.data_item_structures[0].just_data:\r\n name = self.structure.name\r\n file_entry_array.append('{}{}{}{}\\n'.format(indent, name,\r\n indent,\r\n 'LAYERED'))\r\n else:\r\n file_entry_array.append('{}{}\\n'.format(indent, 'LAYERED'))\r\n\r\n if layer is None:\r\n layer_min = shape_ml.first_index()\r\n layer_max = copy.deepcopy(self._layer_shape)\r\n else:\r\n # set layer range\r\n if not shape_ml.in_shape(layer):\r\n comment = 'Layer {} for variable \"{}\" does not exist' \\\r\n '.'.format(layer, self._data_name)\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting file entry',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_, value_,\r\n traceback_, comment,\r\n self._simulation_data.debug)\r\n\r\n layer_min = layer\r\n layer_max = shape_ml.inc_shape_idx(layer)\r\n for layer in shape_ml.indexes(layer_min, layer_max):\r\n file_entry_array.append(\r\n self._get_file_entry_layer(layer, data_indent,\r\n data_storage.layer_storage[\r\n layer].data_storage_type,\r\n ext_file_action,\r\n layered_aux))\r\n else:\r\n # data is not layered\r\n if not self.structure.data_item_structures[0].just_data:\r\n if self._data_name == 'aux':\r\n file_entry_array.append('{}{}\\n'.format(\r\n indent, self._get_aux_var_name([0])))\r\n else:\r\n file_entry_array.append('{}{}\\n'.format(indent,\r\n self.structure.name))\r\n\r\n data_storage_type = data_storage.layer_storage[0].data_storage_type\r\n file_entry_array.append(\r\n self._get_file_entry_layer(None, data_indent,\r\n data_storage_type,\r\n ext_file_action))\r\n\r\n return ''.join(file_entry_array)\r\n\r\n def _new_storage(self, set_layers=True, base_storage=False,\r\n stress_period=0):\r\n if set_layers:\r\n return DataStorage(self._simulation_data, self._model_or_sim,\r\n self._data_dimensions, self._get_file_entry,\r\n DataStorageType.internal_array,\r\n DataStructureType.ndarray, self._layer_shape,\r\n stress_period=stress_period,\r\n data_path=self._path)\r\n else:\r\n return DataStorage(self._simulation_data, self._model_or_sim,\r\n self._data_dimensions, self._get_file_entry,\r\n DataStorageType.internal_array,\r\n DataStructureType.ndarray,\r\n stress_period=stress_period,\r\n data_path=self._path)\r\n\r\n def _get_storage_obj(self):\r\n return self._data_storage\r\n\r\n def _set_storage_obj(self, storage):\r\n self._data_storage = storage\r\n\r\n def _get_file_entry_layer(self, layer, data_indent, storage_type,\r\n ext_file_action, layered_aux=False):\r\n if not self.structure.data_item_structures[0].just_data and \\\r\n not layered_aux:\r\n indent_string = '{}{}'.format(self._simulation_data.indent_string,\r\n self._simulation_data.indent_string)\r\n else:\r\n indent_string = self._simulation_data.indent_string\r\n\r\n file_entry = ''\r\n if layered_aux:\r\n try:\r\n # display aux name\r\n file_entry = '{}{}\\n'.format(indent_string,\r\n self._get_aux_var_name(layer))\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting aux variables',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n indent_string = '{}{}'.format(indent_string,\r\n self._simulation_data.indent_string)\r\n\r\n data_storage = self._get_storage_obj()\r\n if storage_type == DataStorageType.internal_array:\r\n # internal data header + data\r\n format_str = self._get_internal_formatting_string(layer).upper()\r\n lay_str = self._get_data_layer_string(layer, data_indent).upper()\r\n file_entry = '{}{}{}\\n{}'.format(file_entry, indent_string,\r\n format_str, lay_str)\r\n elif storage_type == DataStorageType.internal_constant:\r\n # constant data\r\n try:\r\n const_val = data_storage.get_const_val(layer)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting constant value',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, None,\r\n self._simulation_data.debug, ex)\r\n const_str = self._get_constant_formatting_string(\r\n const_val, layer, self._data_type).upper()\r\n file_entry = '{}{}{}'.format(file_entry, indent_string,\r\n const_str)\r\n else:\r\n # external data\r\n ext_str = self._get_external_formatting_string(layer,\r\n ext_file_action)\r\n file_entry = '{}{}{}'.format(file_entry, indent_string,\r\n ext_str)\r\n # add to active list of external files\r\n try:\r\n file_path = data_storage.get_external_file_path(layer)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n comment = 'Could not get external file path for layer ' \\\r\n '\"{}\"'.format(layer),\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting external file path',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, comment,\r\n self._simulation_data.debug, ex)\r\n package_dim = self._data_dimensions.package_dim\r\n model_name = package_dim.model_dim[0].model_name\r\n self._simulation_data.mfpath.add_ext_file(file_path, model_name)\r\n return file_entry\r\n\r\n def _get_data_layer_string(self, layer, data_indent):\r\n # iterate through data layer\r\n try:\r\n data = self._get_storage_obj().get_data(layer, False)\r\n except Exception as ex:\r\n type_, value_, traceback_ = sys.exc_info()\r\n comment = 'Could not get data for layer \"{}\"'.format(layer)\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'getting data',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_,\r\n value_, traceback_, comment,\r\n self._simulation_data.debug, ex)\r\n file_access = MFFileAccessArray(self.structure, self._data_dimensions,\r\n self._simulation_data, self._path,\r\n self._current_key)\r\n return file_access.get_data_string(data, self._data_type, data_indent)\r\n\r\n def _resolve_layer_index(self, layer, allow_multiple_layers=False):\r\n # handle layered vs non-layered data\r\n storage = self._get_storage_obj()\r\n if storage.layered:\r\n if layer is None:\r\n if storage.layer_storage.get_total_size() == 1:\r\n layer_index = [0]\r\n elif allow_multiple_layers:\r\n layer_index = storage.get_active_layer_indices()\r\n else:\r\n comment = 'Data \"{}\" is layered but no ' \\\r\n 'layer_num was specified' \\\r\n '.'.format(self._data_name)\r\n type_, value_, traceback_ = sys.exc_info()\r\n raise MFDataException(self.structure.get_model(),\r\n self.structure.get_package(),\r\n self._path,\r\n 'resolving layer index',\r\n self.structure.name,\r\n inspect.stack()[0][3], type_, value_,\r\n traceback_, comment,\r\n self._simulation_data.debug)\r\n\r\n else:\r\n layer_index = [layer]\r\n else:\r\n layer_index = [[0]]\r\n return layer_index\r\n\r\n def _verify_data(self, data_iter, layer_num):\r\n # TODO: Implement\r\n return True\r\n\r\n def plot(self, filename_base=None, file_extension=None, mflay=None,\r\n fignum=None, title=None, **kwargs):\r\n \"\"\"\r\n Plot 3-D model input data\r\n\r\n Parameters\r\n ----------\r\n filename_base : str\r\n Base file name that will be used to automatically generate file\r\n names for output image files. Plots will be exported as image\r\n files if file_name_base is not None. (default is None)\r\n file_extension : str\r\n Valid matplotlib.pyplot file extension for savefig(). Only used\r\n if filename_base is not None. (default is 'png')\r\n mflay : int\r\n MODFLOW zero-based layer number to return. If None, then all\r\n all layers will be included. (default is None)\r\n **kwargs : dict\r\n axes : list of matplotlib.pyplot.axis\r\n List of matplotlib.pyplot.axis that will be used to plot\r\n data for each layer. If axes=None axes will be generated.\r\n (default is None)\r\n pcolor : bool\r\n Boolean used to determine if matplotlib.pyplot.pcolormesh\r\n plot will be plotted. (default is True)\r\n colorbar : bool\r\n Boolean used to determine if a color bar will be added to\r\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\r\n (default is False)\r\n inactive : bool\r\n Boolean used to determine if a black overlay in inactive\r\n cells in a layer will be displayed. (default is True)\r\n contour : bool\r\n Boolean used to determine if matplotlib.pyplot.contour\r\n plot will be plotted. (default is False)\r\n clabel : bool\r\n Boolean used to determine if matplotlib.pyplot.clabel\r\n will be plotted. Only used if contour=True. (default is False)\r\n grid : bool\r\n Boolean used to determine if the model grid will be plotted\r\n on the figure. (default is False)\r\n masked_values : list\r\n List of unique values to be excluded from the plot.\r\n\r\n Returns\r\n ----------\r\n out : list\r\n Empty list is returned if filename_base is not None. Otherwise\r\n a list of matplotlib.pyplot.axis is returned.\r\n \"\"\"\r\n from flopy.plot import PlotUtilities\r\n\r\n if not self.plotable:\r\n raise TypeError(\"Simulation level packages are not plotable\")\r\n\r\n if len(self.array.shape) == 2:\r\n axes = PlotUtilities._plot_util2d_helper(self,\r\n title=title,\r\n filename_base=filename_base,\r\n file_extension=file_extension,\r\n fignum=fignum,\r\n **kwargs)\r\n elif len(self.array.shape) == 3:\r\n axes = PlotUtilities._plot_util3d_helper(self,\r\n filename_base=filename_base,\r\n file_extension=file_extension,\r\n mflay=mflay,\r\n fignum=fignum,\r\n **kwargs)\r\n else:\r\n axes = None\r\n\r\n return axes\r\n\r\n\r\nclass MFTransientArray(MFArray, MFTransient):\r\n \"\"\"\r\n Provides an interface for the user to access and update MODFLOW transient\r\n array data.\r\n\r\n Parameters\r\n ----------\r\n sim_data : MFSimulationData\r\n data contained in the simulation\r\n structure : MFDataStructure\r\n describes the structure of the data\r\n data : list or ndarray\r\n actual data\r\n enable : bool\r\n enable/disable the array\r\n path : tuple\r\n path in the data dictionary to this MFArray\r\n dimensions : MFDataDimensions\r\n dimension information related to the model, package, and array\r\n\r\n Methods\r\n -------\r\n add_transient_key : (transient_key : int)\r\n Adds a new transient time allowing data for that time to be stored and\r\n retrieved using the key \"transient_key\"\r\n get_data : (layer_num : int, key : int) : ndarray\r\n Returns the data associated with layer \"layer_num\" during time \"key\".\r\n If \"layer_num\" is None, returns all data for time \"key\".\r\n set_data : (data : ndarray/list, multiplier : float, layer_num : int,\r\n key : int)\r\n Sets the contents of the data at layer \"layer_num\" and time \"key\" to\r\n \"data\" with multiplier \"multiplier\". For unlayered data do not pass\r\n in \"layer_num\".\r\n load : (first_line : string, file_handle : file descriptor,\r\n block_header : MFBlockHeader, pre_data_comments : MFComment) :\r\n tuple (bool, string)\r\n Loads data from first_line (the first line of data) and open file\r\n handle which is pointing to the second line of data. Returns a\r\n tuple with the first item indicating whether all data was read\r\n and the second item being the last line of text read from the file.\r\n get_file_entry : (layer : int, key : int) : string\r\n Returns a string containing the data in layer \"layer\" at time \"key\".\r\n For unlayered data do not pass in \"layer\".\r\n\r\n See Also\r\n --------\r\n\r\n Notes\r\n -----\r\n\r\n Examples\r\n --------\r\n\r\n\r\n \"\"\"\r\n def __init__(self, sim_data, model_or_sim, structure, enable=True,\r\n path=None, dimensions=None):\r\n super(MFTransientArray, self).__init__(sim_data=sim_data,\r\n model_or_sim=model_or_sim,\r\n structure=structure,\r\n data=None,\r\n enable=enable,\r\n path=path,\r\n dimensions=dimensions)\r\n self._transient_setup(self._data_storage)\r\n self.repeating = True\r\n\r\n @property\r\n def data_type(self):\r\n return DataType.transient2d\r\n\r\n def remove_transient_key(self, transient_key):\r\n if transient_key in self._data_storage:\r\n del self._data_storage[transient_key]\r\n\r\n def add_transient_key(self, transient_key):\r\n super(MFTransientArray, self).add_transient_key(transient_key)\r\n self._data_storage[transient_key] = \\\r\n super(MFTransientArray, self)._new_storage(stress_period=\r\n transient_key)\r\n\r\n def store_as_external_file(self, external_file_path, layer=None,\r\n binary=False,\r\n replace_existing_external=True):\r\n sim_time = self._data_dimensions.package_dim.model_dim[\r\n 0].simulation_time\r\n num_sp = sim_time.get_num_stress_periods()\r\n # store each stress period in separate file(s)\r\n for sp in range(0, num_sp):\r\n if sp in self._data_storage:\r\n self._current_key = sp\r\n layer_storage = self._get_storage_obj().layer_storage\r\n if layer_storage.get_total_size() > 0 and \\\r\n self._get_storage_obj().layer_storage[0].\\\r\n layer_storage_type != \\\r\n DataStorageType.external_file:\r\n fname, ext = os.path.splitext(external_file_path)\r\n full_name = '{}_{}{}'.format(fname, sp+1, ext)\r\n super(MFTransientArray, self).\\\r\n store_as_external_file(full_name, layer, binary,\r\n replace_existing_external)\r\n\r\n def get_data(self, layer=None, apply_mult=True, **kwargs):\r\n if self._data_storage is not None and len(self._data_storage) > 0:\r\n if layer is None:\r\n output = None\r\n sim_time = self._data_dimensions.package_dim.model_dim[\r\n 0].simulation_time\r\n num_sp = sim_time.get_num_stress_periods()\r\n if 'array' in kwargs:\r\n data = None\r\n for sp in range(0, num_sp):\r\n if sp in self._data_storage:\r\n self.get_data_prep(sp)\r\n data = super(MFTransientArray, self).get_data(\r\n apply_mult=apply_mult, **kwargs)\r\n data = np.expand_dims(data, 0)\r\n else:\r\n if data is None:\r\n # get any data\r\n self.get_data_prep(self._data_storage.key()[0])\r\n data = super(MFTransientArray, self).get_data(\r\n apply_mult=apply_mult, **kwargs)\r\n data = np.expand_dims(data, 0)\r\n if self.structure.type == DatumType.integer:\r\n data = np.full_like(data, 0)\r\n else:\r\n data = np.full_like(data, 0.0)\r\n if output is None:\r\n output = data\r\n else:\r\n output = np.concatenate((output, data))\r\n return output\r\n else:\r\n for sp in range(0, num_sp):\r\n data = None\r\n if sp in self._data_storage:\r\n self.get_data_prep(sp)\r\n data = super(MFTransientArray, self).get_data(\r\n apply_mult=apply_mult, **kwargs)\r\n if output is None:\r\n if 'array' in kwargs:\r\n output = [data]\r\n else:\r\n output = {sp: data}\r\n else:\r\n if 'array' in kwargs:\r\n output.append(data)\r\n else:\r\n output[sp] = data\r\n return output\r\n else:\r\n self.get_data_prep(layer)\r\n return super(MFTransientArray, self).get_data(\r\n apply_mult=apply_mult)\r\n else:\r\n return None\r\n\r\n def set_data(self, data, multiplier=None, layer=None, key=None):\r\n if isinstance(data, dict) or isinstance(data, OrderedDict):\r\n # each item in the dictionary is a list for one stress period\r\n # the dictionary key is the stress period the list is for\r\n del_keys = []\r\n for key, list_item in data.items():\r\n if list_item is None:\r\n self.remove_transient_key(key)\r\n del_keys.append(key)\r\n else:\r\n self._set_data_prep(list_item, key)\r\n super(MFTransientArray, self).set_data(list_item,\r\n multiplier, layer)\r\n for key in del_keys:\r\n del data[key]\r\n else:\r\n if key is None:\r\n # search for a key\r\n new_key_index = self.structure.first_non_keyword_index()\r\n if new_key_index is not None and hasattr(data, '__len__') and \\\r\n len(data) > new_key_index:\r\n key = data[new_key_index]\r\n else:\r\n key = 0\r\n if data is None:\r\n self.remove_transient_key(key)\r\n else:\r\n self._set_data_prep(data, key)\r\n super(MFTransientArray, self).set_data(data, multiplier,\r\n layer)\r\n\r\n def get_file_entry(self, key=0,\r\n ext_file_action=ExtFileAction.copy_relative_paths):\r\n self._get_file_entry_prep(key)\r\n return super(MFTransientArray, self).get_file_entry(ext_file_action=\r\n ext_file_action)\r\n\r\n def load(self, first_line, file_handle, block_header,\r\n pre_data_comments=None, external_file_info=None):\r\n self._load_prep(block_header)\r\n return super(MFTransientArray, self).load(first_line, file_handle,\r\n pre_data_comments,\r\n external_file_info)\r\n\r\n def _new_storage(self, set_layers=True, base_storage=False,\r\n stress_period=0):\r\n if base_storage:\r\n if not isinstance(stress_period, int):\r\n stress_period = 1\r\n return super(MFTransientArray, self)._new_storage(set_layers,\r\n base_storage,\r\n stress_period)\r\n else:\r\n return OrderedDict()\r\n\r\n def _set_storage_obj(self, storage):\r\n self._data_storage[self._current_key] = storage\r\n\r\n def _get_storage_obj(self):\r\n if self._current_key is None or \\\r\n self._current_key not in self._data_storage:\r\n return None\r\n return self._data_storage[self._current_key]\r\n\r\n def plot(self, kper=None, filename_base=None, file_extension=None,\r\n mflay=None, fignum=None, **kwargs):\r\n \"\"\"\r\n Plot transient array model input data\r\n\r\n Parameters\r\n ----------\r\n transient2d : flopy.utils.util_array.Transient2D object\r\n filename_base : str\r\n Base file name that will be used to automatically generate file\r\n names for output image files. Plots will be exported as image\r\n files if file_name_base is not None. (default is None)\r\n file_extension : str\r\n Valid matplotlib.pyplot file extension for savefig(). Only used\r\n if filename_base is not None. (default is 'png')\r\n **kwargs : dict\r\n axes : list of matplotlib.pyplot.axis\r\n List of matplotlib.pyplot.axis that will be used to plot\r\n data for each layer. If axes=None axes will be generated.\r\n (default is None)\r\n pcolor : bool\r\n Boolean used to determine if matplotlib.pyplot.pcolormesh\r\n plot will be plotted. (default is True)\r\n colorbar : bool\r\n Boolean used to determine if a color bar will be added to\r\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\r\n (default is False)\r\n inactive : bool\r\n Boolean used to determine if a black overlay in inactive\r\n cells in a layer will be displayed. (default is True)\r\n contour : bool\r\n Boolean used to determine if matplotlib.pyplot.contour\r\n plot will be plotted. (default is False)\r\n clabel : bool\r\n Boolean used to determine if matplotlib.pyplot.clabel\r\n will be plotted. Only used if contour=True. (default is False)\r\n grid : bool\r\n Boolean used to determine if the model grid will be plotted\r\n on the figure. (default is False)\r\n masked_values : list\r\n List of unique values to be excluded from the plot.\r\n kper : str\r\n MODFLOW zero-based stress period number to return. If\r\n kper='all' then data for all stress period will be\r\n extracted. (default is zero).\r\n\r\n Returns\r\n ----------\r\n axes : list\r\n Empty list is returned if filename_base is not None. Otherwise\r\n a list of matplotlib.pyplot.axis is returned.\r\n \"\"\"\r\n from flopy.plot.plotutil import PlotUtilities\r\n\r\n if not self.plotable:\r\n raise TypeError(\"Simulation level packages are not plotable\")\r\n\r\n axes = PlotUtilities._plot_transient2d_helper(self,\r\n filename_base=filename_base,\r\n file_extension=file_extension,\r\n kper=kper,\r\n fignum=fignum,\r\n **kwargs)\r\n return axes" ]
[ [ "numpy.dtype" ], [ "numpy.array", "pandas.Series" ], [ "numpy.ones", "numpy.atleast_2d", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.concatenate", "numpy.expand_dims", "numpy.full_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
molkjar/bachelor
[ "a0591691b820c6c8a45d16f8d55f3a7e80ea384b" ]
[ "NYS-covasim/second_wave_scenarios.py" ]
[ "import covasim as cv\nimport covasim.utils as cvu\nimport optuna as op\nimport sciris as sc\nimport pandas as pd\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport population\n\n## Interesting part starts around line 200\n## First part is setup, optimization workers and alike - Important to run before analysing but not that interesting.\n\n############# Dates #######################\n\nstart_day = '2020-03-01'\nend_day = '2022-03-01'\n\n''' NYSonPause:\n General closure, incorporate school closures (happened few days before)\n Shelter-in-place order etc.\n Everything non-essential closed\n Lifting: Chosen quite arbitrary although somewhat at the right time'''\n\nNYSonPause = '2020-03-22'\nschoolsClosure = '2020-03-16'\n\nlifting = '2020-07-20'\nliftingSW = '2021-08-01'\n\n############# Model Setup #################\n# Population file to load - Generate with 'make_ny_pop.py'\npopfile = 'nyppl.pop'\n\n# Layer specification file used in popgeneration\nlayers = pd.read_csv('layers.csv', index_col='layer')\n\n# Data files to fit with\ncumDeathsfile = 'EpiData/deathsNY20200106.csv' #Deaths 2021-01-06\n\n# Model parameters\npars = sc.objdict(\n pop_size = 200e3,\n pop_scale = 100,\n rescale = True,\n \n pop_infected = 10000, # 0.05% of population infected at start of simulation - Should have a reference, but is stated in a NYT article somewhere.\n \n contacts = layers['contacts'].to_dict(),\n \n beta = 0.07576320418933516,\n beta_layer = layers['beta_layer'].to_dict(),\n \n start_day = start_day,\n end_day = end_day,\n \n rand_seed = 271220,\n \n verbose = .1,\n )\n\n# Intervention level fitted to first wave \nintv = {'H': 1.2765967578928226, \n 'W': 0.07393991037226055,\n 'C': 0.07393991037226055}\n\n############ Interventions ###############\n''' Make interventions, as scaling of beta.\n-- Level specific intervention effects\n-- i.e. Households see increase in transmission with school/work closures\n\n** intv = 0 - No transmission\n** intv = 1 - Regular transmission (no intervention)\n** intv > 1 - increase in transmission\n\nAs of now keep schools closed, maybe open them in fall, and close again at thanksgiving/december??\n'''\n \ndef make_ints(lintv, intv=intv):\n \n interventions = [\n # School layer\n cv.change_beta(days = [schoolsClosure, lifting, liftingSW],\n changes = [0, lintv['S'], 1],\n layers = ['S'],\n do_plot = True,\n ),\n \n # Workplace layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['W'], lintv['W'], 1],\n layers = ['W'],\n do_plot = False,\n ),\n \n # Householsd layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['H'], lintv['H'], 1],\n layers = ['H'],\n do_plot = True,\n ),\n \n # Community layer\n cv.change_beta(days = [NYSonPause, lifting, liftingSW],\n changes = [intv['C'], lintv['C'], 1],\n layers = ['C1'],\n do_plot = False,\n ),\n cv.dynamic_pars(n_imports=dict(days=[0, 141, 142], vals=[0, 10, 0])),\n ]\n\n \n # Regenerate dynamic layers\n interventions.insert(0, population.UpdateNetworks())\n \n return interventions\n\n############## Simulation/calibration setup ############\n## Initialize simulation with intervention\ndef make_sim(pars, lintv={'S':1,'W':1,'H':1,'C':1}, load_pop=True, popfile=popfile, datafile=cumDeathsfile):\n sim = cv.Sim(pars=pars,\n popfile=popfile,\n load_pop=load_pop,\n datafile=datafile)\n \n sim.pars['interventions'] = make_ints(lintv=lintv)\n \n sim.initialize()\n \n return sim\n\n## Running simulation\ndef run_sim(pars, lintv={'S':1,'W':1,'H':1,'C':1}, popfile=popfile, return_stat=False, verbose=0.1):\n sim = make_sim(pars=pars, lintv=lintv, popfile=popfile)\n sim.run(verbose=verbose)\n \n if return_stat:\n stat = sim.results['cum_infections'][-1]\n return stat\n else:\n return sim\n\n\n \n\n############## Calibration settings ###############\nname = 'lintv-SW-herd'\n\nW_low = 0.07 #0\nW_high = 1 #1\n\nn_workers = 2 # Define how many workers to run in parallel\nn_trials = 50 # Define the number of trials, i.e. sim runs, per worker\n\ndb_name = f'{name}.db'\nstorage = f'sqlite:///{db_name}'\n\n\n\n############### Calibration workings ##############\ndef run_trial(trial):\n ''' Define the objective for Optuna ''' \n lintv_W = trial.suggest_uniform('lintv_W', W_low, W_high)\n lintv_H = -0.3*lintv_W+1.3\n lintv = {'S':lintv_W, 'W':lintv_W, 'H':lintv_H, 'C':lintv_W}\n \n cum_d = run_sim(pars, lintv=lintv, return_stat=True, verbose=0)\n return cum_d\n\ndef worker():\n ''' Run a single worker '''\n study = op.load_study(storage=storage, study_name=name)\n output = study.optimize(run_trial, n_trials=n_trials)\n return output\n\ndef run_workers():\n ''' Run multiple workers in parallel '''\n output = sc.parallelize(worker, n_workers)\n return output\n\n\ndef make_study():\n ''' Make a study, deleting one if it already exists '''\n if os.path.exists(db_name):\n os.remove(db_name)\n print(f'Removed existing calibration {db_name}')\n output = op.create_study(storage=storage, study_name=name)\n return output\n\n\n\n\n########### Run the optimization ############\nt0 = sc.tic()\nmake_study()\nrun_workers()\nstudy = op.load_study(storage=storage, study_name=name)\nbest_pars = study.best_params\nT = sc.toc(t0, output=True)\nprint(f'\\n\\nOutput: {best_pars}, time: {T:0.1f} s')\n\n'''\nOptimal intervention level estimate: lintv_W=0.355\n!! lintv_H=-0.3*lintv_W+1.3 = 1.195\n'''\n\n\n\n########### Scenarios #############\n## Code which is commented out (single #) are used to run the simulation which is loaded underneath\n\n#basesim = make_sim(pars=pars, lintv={'W':0.355, 'C':0.355, 'S':0.355, 'H':-0.3*0.35+1.3})\n#msim = cv.MultiSim(basesim)\n#msim.run(n_runs=50, n_cpus=10)\n#msim.median(quantiles=[0.025, 0.975])\n#msim.plot()\n#msim.save(\"second_wave_hd50.msim\")\n\nmsim = cv.load(\"alreadyRun/second_wave_hd50.msim\")\n\n## Check that there's still infectious individuals left\nfor sim in msim.sims:\n print(sim.label)\n print(sim.results['new_infectious'][409])\n \n \n## Final size --> Herd immunity threshold over different seeds - Quantiles\nfin_size = [0]*50\nind = 0\nfor sim in msim.sims:\n fin_size[ind] = sim.results['cum_deaths'][-1]\n ind += 1\n\nnp.quantile(fin_size, [0.025, 0.5, 0.975])\nnp.quantile(fin_size, [0.025, 0.5, 0.975])/200e3\n## [74.52449518, 75.2632428 , 75.86432382]\n\n \n \n###### Running without interventions \n#basesimf = make_sim(pars=pars, lintv={'W':1, 'C':1, 'S':1, 'H':1})\n#msimf = cv.MultiSim(basesimf)\n#msimf.run(n_runs=50, n_cpus=10)\n#msimf.median(quantiles=[0.025, 0.975])\n#msimf.save(\"second_wave_free50.msim\")\n\nmismf = cv.load(\"alreadyRun/second_wave_free50.msim\")\n\n\ncum_inf = [0]*50\nind = 0\nfor sim in msimf.sims:\n cum_inf[ind] = sim.results['cum_infectious'][-1]\n ind += 1\n\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])/200e3\n\n\n####### With current estimated interventions\n#I_W = 0.23084114685289414\n#basesimCI = make_sim(pars=pars, lintv={'W':I_W, 'C':I_W, 'S':I_W, 'H':-0.3*I_W+1.3})\n#msimCI = cv.MultiSim(basesimCI)\n#msimCI.run(n_runs=50, n_cpus=10)\n#msimCI.median(quantiles=[0.025, 0.975])\n#msimCI.save(\"second_wave_fit_reopen50.msim\")\n\nmsimCI = cv.load(\"alreadyRun/second_wave_fit_reopen50.msim\")\n\ncum_inf = [0]*50\nind = 0\nfor sim in msimCI.sims:\n cum_inf[ind] = sim.results['cum_deaths'][-1]\n ind += 1\n\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])\nnp.quantile(cum_inf, [0.025, 0.5, 0.975])/200e3\n\n\n\n\n\n\n\n\n\n\n###### Running with closed schools\nbasesimCS = make_sim(pars=pars, lintv={'W':0.44, 'C':0.44, 'S':0, 'H':-0.3*0.44+1.3})\nmsimCS = cv.MultiSim(basesimCS)\nmsimCS.run(n_runs=25, n_cpus=10)\nmsimCS.median(quantiles=[0.025, 0.975])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.quantile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Arielce/dio
[ "eb8035664f605783f86b41d34006aeb9ef861f13" ]
[ "tutorials/bios-boot-tutorial/bios-boot-tutorial.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport numpy\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\nimport time\n\nargs = None\nlogFile = None\n\nunlockTimeout = 999999999\nfastUnstakeSystem = './fast.refund/dccio.system/dccio.system.wasm'\n\nsystemAccounts = [\n 'dccio.bpay',\n 'dccio.msig',\n 'dccio.names',\n 'dccio.ram',\n 'dccio.ramfee',\n 'dccio.saving',\n 'dccio.stake',\n 'dccio.token',\n 'dccio.vpay',\n]\n\ndef jsonArg(a):\n return \" '\" + json.dumps(a) + \"' \"\n\ndef run(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n if subprocess.call(args, shell=True):\n print('bios-boot-tutorial.py: exiting because of error')\n sys.exit(1)\n\ndef retry(args):\n while True:\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n if subprocess.call(args, shell=True):\n print('*** Retry')\n else:\n break\n\ndef background(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n return subprocess.Popen(args, shell=True)\n\ndef getOutput(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)\n return proc.communicate()[0].decode('utf-8')\n\ndef getJsonOutput(args):\n print('bios-boot-tutorial.py:', args)\n logFile.write(args + '\\n')\n proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)\n return json.loads(proc.communicate()[0])\n\ndef sleep(t):\n print('sleep', t, '...')\n time.sleep(t)\n print('resume')\n\ndef startWallet():\n run('rm -rf ' + os.path.abspath(args.wallet_dir))\n run('mkdir -p ' + os.path.abspath(args.wallet_dir))\n background(args.kdccd + ' --unlock-timeout %d --http-server-address 127.0.0.1:6666 --wallet-dir %s' % (unlockTimeout, os.path.abspath(args.wallet_dir)))\n sleep(.4)\n run(args.cldcc + 'wallet create --to-console')\n\ndef importKeys():\n run(args.cldcc + 'wallet import --private-key ' + args.private_key)\n keys = {}\n for a in accounts:\n key = a['pvt']\n if not key in keys:\n if len(keys) >= args.max_user_keys:\n break\n keys[key] = True\n run(args.cldcc + 'wallet import --private-key ' + key)\n for i in range(firstProducer, firstProducer + numProducers):\n a = accounts[i]\n key = a['pvt']\n if not key in keys:\n keys[key] = True\n run(args.cldcc + 'wallet import --private-key ' + key)\n\ndef startNode(nodeIndex, account):\n dir = args.nodes_dir + ('%02d-' % nodeIndex) + account['name'] + '/'\n run('rm -rf ' + dir)\n run('mkdir -p ' + dir)\n otherOpts = ''.join(list(map(lambda i: ' --p2p-peer-address localhost:' + str(9000 + i), range(nodeIndex))))\n if not nodeIndex: otherOpts += (\n ' --plugin dccio::history_plugin'\n ' --plugin dccio::history_api_plugin'\n )\n cmd = (\n args.noddcc +\n ' --max-irreversible-block-age -1'\n ' --contracts-console'\n ' --genesis-json ' + os.path.abspath(args.genesis) +\n ' --blocks-dir ' + os.path.abspath(dir) + '/blocks'\n ' --config-dir ' + os.path.abspath(dir) +\n ' --data-dir ' + os.path.abspath(dir) +\n ' --chain-state-db-size-mb 1024'\n ' --http-server-address 127.0.0.1:' + str(8000 + nodeIndex) +\n ' --p2p-listen-endpoint 127.0.0.1:' + str(9000 + nodeIndex) +\n ' --max-clients ' + str(maxClients) +\n ' --p2p-max-nodes-per-host ' + str(maxClients) +\n ' --enable-stale-production'\n ' --producer-name ' + account['name'] +\n ' --private-key \\'[\"' + account['pub'] + '\",\"' + account['pvt'] + '\"]\\''\n ' --plugin dccio::http_plugin'\n ' --plugin dccio::chain_api_plugin'\n ' --plugin dccio::producer_plugin' +\n otherOpts)\n with open(dir + 'stderr', mode='w') as f:\n f.write(cmd + '\\n\\n')\n background(cmd + ' 2>>' + dir + 'stderr')\n\ndef startProducers(b, e):\n for i in range(b, e):\n startNode(i - b + 1, accounts[i])\n\ndef createSystemAccounts():\n for a in systemAccounts:\n run(args.cldcc + 'create account dccio ' + a + ' ' + args.public_key)\n\ndef intToCurrency(i):\n return '%d.%04d %s' % (i // 10000, i % 10000, args.symbol)\n\ndef allocateFunds(b, e):\n dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule\n dist.sort()\n dist.reverse()\n factor = 1_000_000_000 / sum(dist)\n total = 0\n for i in range(b, e):\n funds = round(factor * dist[i - b] * 10000)\n if i >= firstProducer and i < firstProducer + numProducers:\n funds = max(funds, round(args.min_producer_funds * 10000))\n total += funds\n accounts[i]['funds'] = funds\n return total\n\ndef createStakedAccounts(b, e):\n ramFunds = round(args.ram_funds * 10000)\n configuredMinStake = round(args.min_stake * 10000)\n maxUnstaked = round(args.max_unstaked * 10000)\n for i in range(b, e):\n a = accounts[i]\n funds = a['funds']\n print('#' * 80)\n print('# %d/%d %s %s' % (i, e, a['name'], intToCurrency(funds)))\n print('#' * 80)\n if funds < ramFunds:\n print('skipping %s: not enough funds to cover ram' % a['name'])\n continue\n minStake = min(funds - ramFunds, configuredMinStake)\n unstaked = min(funds - ramFunds - minStake, maxUnstaked)\n stake = funds - ramFunds - unstaked\n stakeNet = round(stake / 2)\n stakeCpu = stake - stakeNet\n print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))\n assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)\n retry(args.cldcc + 'system newaccount --transfer dccio %s %s --stake-net \"%s\" --stake-cpu \"%s\" --buy-ram \"%s\" ' % \n (a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))\n if unstaked:\n retry(args.cldcc + 'transfer dccio %s \"%s\"' % (a['name'], intToCurrency(unstaked)))\n\ndef regProducers(b, e):\n for i in range(b, e):\n a = accounts[i]\n retry(args.cldcc + 'system regproducer ' + a['name'] + ' ' + a['pub'] + ' https://' + a['name'] + '.com' + '/' + a['pub'])\n\ndef listProducers():\n run(args.cldcc + 'system listproducers')\n\ndef vote(b, e):\n for i in range(b, e):\n voter = accounts[i]['name']\n prods = random.sample(range(firstProducer, firstProducer + numProducers), args.num_producers_vote)\n prods = ' '.join(map(lambda x: accounts[x]['name'], prods))\n retry(args.cldcc + 'system voteproducer prods ' + voter + ' ' + prods)\n\ndef claimRewards():\n table = getJsonOutput(args.cldcc + 'get table dccio dccio producers -l 100')\n times = []\n for row in table['rows']:\n if row['unpaid_blocks'] and not row['last_claim_time']:\n times.append(getJsonOutput(args.cldcc + 'system claimrewards -j ' + row['owner'])['processed']['elapsed'])\n print('Elapsed time for claimrewards:', times)\n\ndef proxyVotes(b, e):\n vote(firstProducer, firstProducer + 1)\n proxy = accounts[firstProducer]['name']\n retry(args.cldcc + 'system regproxy ' + proxy)\n sleep(1.0)\n for i in range(b, e):\n voter = accounts[i]['name']\n retry(args.cldcc + 'system voteproducer proxy ' + voter + ' ' + proxy)\n\ndef updateAuth(account, permission, parent, controller):\n run(args.cldcc + 'push action dccio updateauth' + jsonArg({\n 'account': account,\n 'permission': permission,\n 'parent': parent,\n 'auth': {\n 'threshold': 1, 'keys': [], 'waits': [],\n 'accounts': [{\n 'weight': 1,\n 'permission': {'actor': controller, 'permission': 'active'}\n }]\n }\n }) + '-p ' + account + '@' + permission)\n\ndef resign(account, controller):\n updateAuth(account, 'owner', '', controller)\n updateAuth(account, 'active', 'owner', controller)\n sleep(1)\n run(args.cldcc + 'get account ' + account)\n\ndef randomTransfer(b, e):\n for j in range(20):\n src = accounts[random.randint(b, e - 1)]['name']\n dest = src\n while dest == src:\n dest = accounts[random.randint(b, e - 1)]['name']\n run(args.cldcc + 'transfer -f ' + src + ' ' + dest + ' \"0.0001 ' + args.symbol + '\"' + ' || true')\n\ndef msigProposeReplaceSystem(proposer, proposalName):\n requestedPermissions = []\n for i in range(firstProducer, firstProducer + numProducers):\n requestedPermissions.append({'actor': accounts[i]['name'], 'permission': 'active'})\n trxPermissions = [{'actor': 'dccio', 'permission': 'active'}]\n with open(fastUnstakeSystem, mode='rb') as f:\n setcode = {'account': 'dccio', 'vmtype': 0, 'vmversion': 0, 'code': f.read().hex()}\n run(args.cldcc + 'multisig propose ' + proposalName + jsonArg(requestedPermissions) + \n jsonArg(trxPermissions) + 'dccio setcode' + jsonArg(setcode) + ' -p ' + proposer)\n\ndef msigApproveReplaceSystem(proposer, proposalName):\n for i in range(firstProducer, firstProducer + numProducers):\n run(args.cldcc + 'multisig approve ' + proposer + ' ' + proposalName +\n jsonArg({'actor': accounts[i]['name'], 'permission': 'active'}) +\n '-p ' + accounts[i]['name'])\n\ndef msigExecReplaceSystem(proposer, proposalName):\n retry(args.cldcc + 'multisig exec ' + proposer + ' ' + proposalName + ' -p ' + proposer)\n\ndef msigReplaceSystem():\n run(args.cldcc + 'push action dccio buyrambytes' + jsonArg(['dccio', accounts[0]['name'], 200000]) + '-p dccio')\n sleep(1)\n msigProposeReplaceSystem(accounts[0]['name'], 'fast.unstake')\n sleep(1)\n msigApproveReplaceSystem(accounts[0]['name'], 'fast.unstake')\n msigExecReplaceSystem(accounts[0]['name'], 'fast.unstake')\n\ndef produceNewAccounts():\n with open('newusers', 'w') as f:\n for i in range(120_000, 200_000):\n x = getOutput(args.cldcc + 'create key --to-console')\n r = re.match('Private key: *([^ \\n]*)\\nPublic key: *([^ \\n]*)', x, re.DOTALL | re.MULTILINE)\n name = 'user'\n for j in range(7, -1, -1):\n name += chr(ord('a') + ((i >> (j * 4)) & 15))\n print(i, name)\n f.write(' {\"name\":\"%s\", \"pvt\":\"%s\", \"pub\":\"%s\"},\\n' % (name, r[1], r[2]))\n\ndef stepKillAll():\n run('killall kdccd noddcc || true')\n sleep(1.5)\ndef stepStartWallet():\n startWallet()\n importKeys()\ndef stepStartBoot():\n startNode(0, {'name': 'dccio', 'pvt': args.private_key, 'pub': args.public_key})\n sleep(1.5)\ndef stepInstallSystemContracts():\n run(args.cldcc + 'set contract dccio.token ' + args.contracts_dir + 'dccio.token/')\n run(args.cldcc + 'set contract dccio.msig ' + args.contracts_dir + 'dccio.msig/')\ndef stepCreateTokens():\n run(args.cldcc + 'push action dccio.token create \\'[\"dccio\", \"10000000000.0000 %s\"]\\' -p dccio.token' % (args.symbol))\n totalAllocation = allocateFunds(0, len(accounts))\n run(args.cldcc + 'push action dccio.token issue \\'[\"dccio\", \"%s\", \"memo\"]\\' -p dccio' % intToCurrency(totalAllocation))\n sleep(1)\ndef stepSetSystemContract():\n retry(args.cldcc + 'set contract dccio ' + args.contracts_dir + 'dccio.system/')\n sleep(1)\n run(args.cldcc + 'push action dccio setpriv' + jsonArg(['dccio.msig', 1]) + '-p dccio@active')\ndef stepCreateStakedAccounts():\n createStakedAccounts(0, len(accounts))\ndef stepRegProducers():\n regProducers(firstProducer, firstProducer + numProducers)\n sleep(1)\n listProducers()\ndef stepStartProducers():\n startProducers(firstProducer, firstProducer + numProducers)\n sleep(args.producer_sync_delay)\ndef stepVote():\n vote(0, 0 + args.num_voters)\n sleep(1)\n listProducers()\n sleep(5)\ndef stepProxyVotes():\n proxyVotes(0, 0 + args.num_voters)\ndef stepResign():\n resign('dccio', 'dccio.prods')\n for a in systemAccounts:\n resign(a, 'dccio')\ndef stepTransfer():\n while True:\n randomTransfer(0, args.num_senders)\ndef stepLog():\n run('tail -n 60 ' + args.nodes_dir + '00-dccio/stderr')\n\n# Command Line Arguments\n\nparser = argparse.ArgumentParser()\n\ncommands = [\n ('k', 'kill', stepKillAll, True, \"Kill all noddcc and kdccd processes\"),\n ('w', 'wallet', stepStartWallet, True, \"Start kdccd, create wallet, fill with keys\"),\n ('b', 'boot', stepStartBoot, True, \"Start boot node\"),\n ('s', 'sys', createSystemAccounts, True, \"Create system accounts (dccio.*)\"),\n ('c', 'contracts', stepInstallSystemContracts, True, \"Install system contracts (token, msig)\"),\n ('t', 'tokens', stepCreateTokens, True, \"Create tokens\"),\n ('S', 'sys-contract', stepSetSystemContract, True, \"Set system contract\"),\n ('T', 'stake', stepCreateStakedAccounts, True, \"Create staked accounts\"),\n ('p', 'reg-prod', stepRegProducers, True, \"Register producers\"),\n ('P', 'start-prod', stepStartProducers, True, \"Start producers\"),\n ('v', 'vote', stepVote, True, \"Vote for producers\"),\n ('R', 'claim', claimRewards, True, \"Claim rewards\"),\n ('x', 'proxy', stepProxyVotes, True, \"Proxy votes\"),\n ('q', 'resign', stepResign, True, \"Resign dccio\"),\n ('m', 'msg-replace', msigReplaceSystem, False, \"Replace system contract using msig\"),\n ('X', 'xfer', stepTransfer, False, \"Random transfer tokens (infinite loop)\"),\n ('l', 'log', stepLog, True, \"Show tail of node's log\"),\n]\n\nparser.add_argument('--public-key', metavar='', help=\"dccIO Public Key\", default='dcc8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', dest=\"public_key\")\nparser.add_argument('--private-Key', metavar='', help=\"dccIO Private Key\", default='5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p', dest=\"private_key\")\nparser.add_argument('--cldcc', metavar='', help=\"Cldcc command\", default='../../build/programs/cldcc/cldcc --wallet-url http://127.0.0.1:6666 ')\nparser.add_argument('--noddcc', metavar='', help=\"Path to noddcc binary\", default='../../build/programs/noddcc/noddcc')\nparser.add_argument('--kdccd', metavar='', help=\"Path to kdccd binary\", default='../../build/programs/kdccd/kdccd')\nparser.add_argument('--contracts-dir', metavar='', help=\"Path to contracts directory\", default='../../build/contracts/')\nparser.add_argument('--nodes-dir', metavar='', help=\"Path to nodes directory\", default='./nodes/')\nparser.add_argument('--genesis', metavar='', help=\"Path to genesis.json\", default=\"./genesis.json\")\nparser.add_argument('--wallet-dir', metavar='', help=\"Path to wallet directory\", default='./wallet/')\nparser.add_argument('--log-path', metavar='', help=\"Path to log file\", default='./output.log')\nparser.add_argument('--symbol', metavar='', help=\"The dccio.system symbol\", default='SYS')\nparser.add_argument('--user-limit', metavar='', help=\"Max number of users. (0 = no limit)\", type=int, default=3000)\nparser.add_argument('--max-user-keys', metavar='', help=\"Maximum user keys to import into wallet\", type=int, default=10)\nparser.add_argument('--ram-funds', metavar='', help=\"How much funds for each user to spend on ram\", type=float, default=0.1)\nparser.add_argument('--min-stake', metavar='', help=\"Minimum stake before allocating unstaked funds\", type=float, default=0.9)\nparser.add_argument('--max-unstaked', metavar='', help=\"Maximum unstaked funds\", type=float, default=10)\nparser.add_argument('--producer-limit', metavar='', help=\"Maximum number of producers. (0 = no limit)\", type=int, default=0)\nparser.add_argument('--min-producer-funds', metavar='', help=\"Minimum producer funds\", type=float, default=1000.0000)\nparser.add_argument('--num-producers-vote', metavar='', help=\"Number of producers for which each user votes\", type=int, default=20)\nparser.add_argument('--num-voters', metavar='', help=\"Number of voters\", type=int, default=10)\nparser.add_argument('--num-senders', metavar='', help=\"Number of users to transfer funds randomly\", type=int, default=10)\nparser.add_argument('--producer-sync-delay', metavar='', help=\"Time (s) to sleep to allow producers to sync\", type=int, default=80)\nparser.add_argument('-a', '--all', action='store_true', help=\"Do everything marked with (*)\")\nparser.add_argument('-H', '--http-port', type=int, default=8000, metavar='', help='HTTP port for cldcc')\n\nfor (flag, command, function, inAll, help) in commands:\n prefix = ''\n if inAll: prefix += '*'\n if prefix: help = '(' + prefix + ') ' + help\n if flag:\n parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command)\n else:\n parser.add_argument('--' + command, action='store_true', help=help, dest=command)\n \nargs = parser.parse_args()\n\nargs.cldcc += '--url http://127.0.0.1:%d ' % args.http_port\n\nlogFile = open(args.log_path, 'a')\n\nlogFile.write('\\n\\n' + '*' * 80 + '\\n\\n\\n')\n\nwith open('accounts.json') as f:\n a = json.load(f)\n if args.user_limit:\n del a['users'][args.user_limit:]\n if args.producer_limit:\n del a['producers'][args.producer_limit:]\n firstProducer = len(a['users'])\n numProducers = len(a['producers'])\n accounts = a['users'] + a['producers']\n\nmaxClients = numProducers + 10\n\nhaveCommand = False\nfor (flag, command, function, inAll, help) in commands:\n if getattr(args, command) or inAll and args.all:\n if function:\n haveCommand = True\n function()\nif not haveCommand:\n print('bios-boot-tutorial.py: Tell me what to do. -a does almost everything. -h shows options.')\n" ]
[ [ "numpy.random.pareto" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
11BP11/inverse_problems_GAN
[ "1d8ece55f7de1610b5481d39945b083a4ed3fcc0" ]
[ "problems/center_inpainting.py" ]
[ "\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom problems.problem import *\r\n\r\nname = \"center inpainting\"\r\n \r\ng_tf_info_placeholder = tf.placeholder(tf.float32, [None], name='g_transform_info')\r\n \r\ndef problem_loss(x_tformed, g_tformed):\r\n return tf.reduce_mean(tf.abs(x_tformed-g_tformed),[1,2,3])\r\n\r\ndef merge(g_output, x_tformed, g_tform_info):\r\n h, w = x_tformed.shape[1:3]\r\n h4, w4 = h//6, w//6\r\n merged = np.copy(x_tformed)\r\n merged[:,h4:h-h4,w4:w-w4,:] = g_output[:,h4:h-h4,w4:w-w4,:]\r\n return merged\r\n\r\ndef transform_tf(x, g_tf_info):\r\n not_x = - tf.ones_like(x, dtype=tf.float32)\r\n mask = np.ones(x.get_shape(), dtype=np.float32)\r\n mask0 = np.zeros(x.get_shape(), dtype=np.float32)\r\n mask = merge(mask0, mask, None)\r\n output = mask * x + (1-mask) * not_x\r\n return output\r\n\r\n \r\ndef transform(x, g_tf_info):\r\n not_x = - np.ones_like(x, dtype=np.float32)\r\n output = merge(not_x, x, None)\r\n return output\r\n \r\ndef create_tform_info(args):\r\n return [0]*args.batch_size\r\n\r\ndef safe_format(tformed):\r\n return np.clip(tformed,0,1)\r\n \r\n " ]
[ [ "numpy.ones_like", "numpy.clip", "tensorflow.ones_like", "tensorflow.placeholder", "numpy.copy", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
nicoguertler/leibnizgym
[ "2c1cb14fbfece09644445d58fe7ac28c41611e5f" ]
[ "leibnizgym/envs/trifinger/sample.py" ]
[ "\"\"\"\n@author Mayank Mittal\n@email [email protected]\n@brief Defines sampling stratergies.\n\n# TODO: These functions are generic. Can put in leibnizgym.utils.torch_utils module.\n\"\"\"\n\n# leibnizgym\nfrom leibnizgym.utils.torch_utils import quaternion_from_euler_xyz\n# python\nfrom typing import Union, List, Tuple\nimport numpy as np\nimport torch\nimport torch.nn.functional\n\n\"\"\"\nSampling of cuboidal object\n\"\"\"\n\n\[email protected]\ndef random_xy(num: int, max_com_distance_to_center: float, device: str) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns sampled uniform positions in circle (https://stackoverflow.com/a/50746409)\"\"\"\n # sample radius of circle\n radius = torch.sqrt(torch.rand(num, dtype=torch.float, device=device))\n radius *= max_com_distance_to_center\n # sample theta of point\n theta = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)\n # x,y-position of the cube\n x = radius * torch.cos(theta)\n y = radius * torch.sin(theta)\n\n return x, y\n\n\[email protected]\ndef random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:\n \"\"\"Returns sampled height of the goal object.\"\"\"\n z = torch.rand(num, dtype=torch.float, device=device)\n z = (max_height - min_height) * z + min_height\n\n return z\n\n\[email protected]\ndef default_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns identity rotation transform.\"\"\"\n quat = torch.zeros((num, 4,), dtype=torch.float, device=device)\n quat[..., -1] = 1.0\n\n return quat\n\n\[email protected]\ndef random_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns sampled rotation in 3D as quaternion.\n Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html\n \"\"\"\n # sample random orientation from normal distribution\n quat = torch.randn((num, 4,), dtype=torch.float, device=device)\n # normalize the quaternion\n quat = torch.nn.functional.normalize(quat, p=2., dim=-1, eps=1e-12)\n\n return quat\n\[email protected]\ndef random_angular_vel(num: int, device: str, magnitude_stdev: float) -> torch.Tensor:\n \"\"\"Samples a random angular velocity with standard deviation `magnitude_stdev`\"\"\"\n\n axis = torch.randn((num, 3,), dtype=torch.float, device=device)\n axis /= torch.norm(axis, p=2, dim=-1).view(-1, 1)\n magnitude = torch.randn((num, 1,), dtype=torch.float, device=device)\n magnitude *= magnitude_stdev\n return magnitude * axis\n\[email protected]\ndef random_yaw_orientation(num: int, device: str) -> torch.Tensor:\n \"\"\"Returns sampled rotation around z-axis.\"\"\"\n roll = torch.zeros(num, dtype=torch.float, device=device)\n pitch = torch.zeros(num, dtype=torch.float, device=device)\n yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)\n\n return quaternion_from_euler_xyz(roll, pitch, yaw)\n\n# EOF\n" ]
[ [ "torch.nn.functional.normalize", "torch.norm", "torch.zeros", "torch.sin", "torch.randn", "torch.rand", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
magne-max/zipline
[ "41172cd3a320806c4116bcfafa6a607fa300acde", "41172cd3a320806c4116bcfafa6a607fa300acde", "41172cd3a320806c4116bcfafa6a607fa300acde", "8beba055aa4211dc2debc5c3083077cbd19d0bbc" ]
[ "zipline/pipeline/loaders/events.py", "zipline/finance/performance/position_tracker.py", "zipline/data/bundles/yahoo.py", "zipline/data/treasuries_can.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom six import viewvalues\nfrom toolz import groupby, merge\n\nfrom .base import PipelineLoader\nfrom .frame import DataFrameLoader\nfrom zipline.pipeline.common import (\n EVENT_DATE_FIELD_NAME,\n SID_FIELD_NAME,\n TS_FIELD_NAME,\n)\nfrom zipline.pipeline.loaders.utils import (\n next_event_indexer,\n previous_event_indexer,\n)\n\n\ndef required_event_fields(next_value_columns, previous_value_columns):\n \"\"\"\n Compute the set of resource columns required to serve\n ``next_value_columns`` and ``previous_value_columns``.\n \"\"\"\n # These metadata columns are used to align event indexers.\n return {\n TS_FIELD_NAME,\n SID_FIELD_NAME,\n EVENT_DATE_FIELD_NAME,\n }.union(\n # We also expect any of the field names that our loadable columns\n # are mapped to.\n viewvalues(next_value_columns),\n viewvalues(previous_value_columns),\n )\n\n\ndef validate_column_specs(events, next_value_columns, previous_value_columns):\n \"\"\"\n Verify that the columns of ``events`` can be used by an EventsLoader to\n serve the BoundColumns described by ``next_value_columns`` and\n ``previous_value_columns``.\n \"\"\"\n required = {\n TS_FIELD_NAME,\n SID_FIELD_NAME,\n EVENT_DATE_FIELD_NAME,\n }.union(\n # We also expect any of the field names that our loadable columns\n # are mapped to.\n viewvalues(next_value_columns),\n viewvalues(previous_value_columns),\n )\n received = set(events.columns)\n missing = required - received\n if missing:\n raise ValueError(\n \"EventsLoader missing required columns {missing}.\\n\"\n \"Got Columns: {received}\\n\"\n \"Expected Columns: {required}\".format(\n missing=sorted(missing),\n received=sorted(received),\n required=sorted(required),\n )\n )\n\n\nclass EventsLoader(PipelineLoader):\n \"\"\"\n Base class for PipelineLoaders that supports loading the next and previous\n value of an event field.\n\n Does not currently support adjustments.\n\n Parameters\n ----------\n events : pd.DataFrame\n A DataFrame representing events (e.g. share buybacks or\n earnings announcements) associated with particular companies.\n\n ``events`` must contain at least three columns::\n sid : int64\n The asset id associated with each event.\n\n event_date : datetime64[ns]\n The date on which the event occurred.\n\n timestamp : datetime64[ns]\n The date on which we learned about the event.\n\n next_value_columns : dict[BoundColumn -> str]\n Map from dataset columns to raw field names that should be used when\n searching for a next event value.\n\n previous_value_columns : dict[BoundColumn -> str]\n Map from dataset columns to raw field names that should be used when\n searching for a previous event value.\n \"\"\"\n def __init__(self,\n events,\n next_value_columns,\n previous_value_columns):\n validate_column_specs(\n events,\n next_value_columns,\n previous_value_columns,\n )\n\n events = events[events[EVENT_DATE_FIELD_NAME].notnull()]\n\n # We always work with entries from ``events`` directly as numpy arrays,\n # so we coerce from a frame here.\n self.events = {\n name: np.asarray(series)\n for name, series in events.sort(EVENT_DATE_FIELD_NAME).iteritems()\n }\n\n # Columns to load with self.load_next_events.\n self.next_value_columns = next_value_columns\n\n # Columns to load with self.load_previous_events.\n self.previous_value_columns = previous_value_columns\n\n def split_next_and_previous_event_columns(self, requested_columns):\n \"\"\"\n Split requested columns into columns that should load the next known\n value and columns that should load the previous known value.\n\n Parameters\n ----------\n requested_columns : iterable[BoundColumn]\n\n Returns\n -------\n next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]\n ``requested_columns``, partitioned into sub-sequences based on\n whether the column should produce values from the next event or the\n previous event\n \"\"\"\n def next_or_previous(c):\n if c in self.next_value_columns:\n return 'next'\n elif c in self.previous_value_columns:\n return 'previous'\n\n raise ValueError(\n \"{c} not found in next_value_columns \"\n \"or previous_value_columns\".format(c=c)\n )\n groups = groupby(next_or_previous, requested_columns)\n return groups.get('next', ()), groups.get('previous', ())\n\n def next_event_indexer(self, dates, sids):\n return next_event_indexer(\n dates,\n sids,\n self.events[EVENT_DATE_FIELD_NAME],\n self.events[TS_FIELD_NAME],\n self.events[SID_FIELD_NAME],\n )\n\n def previous_event_indexer(self, dates, sids):\n return previous_event_indexer(\n dates,\n sids,\n self.events[EVENT_DATE_FIELD_NAME],\n self.events[TS_FIELD_NAME],\n self.events[SID_FIELD_NAME],\n )\n\n def load_next_events(self, columns, dates, sids, mask):\n if not columns:\n return {}\n\n return self._load_events(\n name_map=self.next_value_columns,\n indexer=self.next_event_indexer(dates, sids),\n columns=columns,\n dates=dates,\n sids=sids,\n mask=mask,\n )\n\n def load_previous_events(self, columns, dates, sids, mask):\n if not columns:\n return {}\n\n return self._load_events(\n name_map=self.previous_value_columns,\n indexer=self.previous_event_indexer(dates, sids),\n columns=columns,\n dates=dates,\n sids=sids,\n mask=mask,\n )\n\n def _load_events(self, name_map, indexer, columns, dates, sids, mask):\n def to_frame(array):\n return pd.DataFrame(array, index=dates, columns=sids)\n\n out = {}\n for c in columns:\n raw = self.events[name_map[c]][indexer]\n # indexer will be -1 for locations where we don't have a known\n # value.\n raw[indexer < 0] = c.missing_value\n\n # Delegate the actual array formatting logic to a DataFrameLoader.\n loader = DataFrameLoader(c, to_frame(raw), adjustments=None)\n out[c] = loader.load_adjusted_array([c], dates, sids, mask)[c]\n return out\n\n def load_adjusted_array(self, columns, dates, sids, mask):\n n, p = self.split_next_and_previous_event_columns(columns)\n return merge(\n self.load_next_events(n, dates, sids, mask),\n self.load_previous_events(p, dates, sids, mask),\n )\n", "#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nimport logbook\nimport numpy as np\nfrom collections import namedtuple\nfrom math import isnan\nfrom zipline.finance.performance.position import Position\nfrom zipline.finance.transaction import Transaction\n\ntry:\n # optional cython based OrderedDict\n from cyordereddict import OrderedDict\nexcept ImportError:\n from collections import OrderedDict\nfrom six import iteritems, itervalues\n\nimport zipline.protocol as zp\nfrom zipline.assets import (\n Equity, Future\n)\nfrom zipline.errors import PositionTrackerMissingAssetFinder\nfrom . position import positiondict\n\nlog = logbook.Logger('Performance')\n\n\nPositionStats = namedtuple('PositionStats',\n ['net_exposure',\n 'gross_value',\n 'gross_exposure',\n 'short_value',\n 'short_exposure',\n 'shorts_count',\n 'long_value',\n 'long_exposure',\n 'longs_count',\n 'net_value'])\n\n\ndef calc_position_values(amounts,\n last_sale_prices,\n value_multipliers):\n iter_amount_price_multiplier = zip(\n amounts,\n last_sale_prices,\n itervalues(value_multipliers),\n )\n return [\n price * amount * multiplier for\n price, amount, multiplier in iter_amount_price_multiplier\n ]\n\n\ndef calc_net(values):\n # Returns 0.0 if there are no values.\n return sum(values, np.float64())\n\n\ndef calc_position_exposures(amounts,\n last_sale_prices,\n exposure_multipliers):\n iter_amount_price_multiplier = zip(\n amounts,\n last_sale_prices,\n itervalues(exposure_multipliers),\n )\n return [\n price * amount * multiplier for\n price, amount, multiplier in iter_amount_price_multiplier\n ]\n\n\ndef calc_long_value(position_values):\n return sum(i for i in position_values if i > 0)\n\n\ndef calc_short_value(position_values):\n return sum(i for i in position_values if i < 0)\n\n\ndef calc_long_exposure(position_exposures):\n return sum(i for i in position_exposures if i > 0)\n\n\ndef calc_short_exposure(position_exposures):\n return sum(i for i in position_exposures if i < 0)\n\n\ndef calc_longs_count(position_exposures):\n return sum(1 for i in position_exposures if i > 0)\n\n\ndef calc_shorts_count(position_exposures):\n return sum(1 for i in position_exposures if i < 0)\n\n\ndef calc_gross_exposure(long_exposure, short_exposure):\n return long_exposure + abs(short_exposure)\n\n\ndef calc_gross_value(long_value, short_value):\n return long_value + abs(short_value)\n\n\nclass PositionTracker(object):\n\n def __init__(self, asset_finder, data_frequency):\n self.asset_finder = asset_finder\n\n # sid => position object\n self.positions = positiondict()\n # Arrays for quick calculations of positions value\n self._position_value_multipliers = OrderedDict()\n self._position_exposure_multipliers = OrderedDict()\n self._unpaid_dividends = {}\n self._unpaid_stock_dividends = {}\n self._positions_store = zp.Positions()\n\n self.data_frequency = data_frequency\n\n def _update_asset(self, sid):\n try:\n self._position_value_multipliers[sid]\n self._position_exposure_multipliers[sid]\n except KeyError:\n # Check if there is an AssetFinder\n if self.asset_finder is None:\n raise PositionTrackerMissingAssetFinder()\n\n # Collect the value multipliers from applicable sids\n asset = self.asset_finder.retrieve_asset(sid)\n if isinstance(asset, Equity):\n self._position_value_multipliers[sid] = 1\n self._position_exposure_multipliers[sid] = 1\n if isinstance(asset, Future):\n self._position_value_multipliers[sid] = 0\n self._position_exposure_multipliers[sid] = asset.multiplier\n\n def update_positions(self, positions):\n # update positions in batch\n self.positions.update(positions)\n for sid, pos in iteritems(positions):\n self._update_asset(sid)\n\n def update_position(self, sid, amount=None, last_sale_price=None,\n last_sale_date=None, cost_basis=None):\n if sid not in self.positions:\n position = Position(sid)\n self.positions[sid] = position\n else:\n position = self.positions[sid]\n\n if amount is not None:\n position.amount = amount\n self._update_asset(sid=sid)\n if last_sale_price is not None:\n position.last_sale_price = last_sale_price\n if last_sale_date is not None:\n position.last_sale_date = last_sale_date\n if cost_basis is not None:\n position.cost_basis = cost_basis\n\n def execute_transaction(self, txn):\n # Update Position\n # ----------------\n sid = txn.sid\n\n if sid not in self.positions:\n position = Position(sid)\n self.positions[sid] = position\n else:\n position = self.positions[sid]\n\n position.update(txn)\n\n if position.amount == 0:\n # if this position now has 0 shares, remove it from our internal\n # bookkeeping.\n del self.positions[sid]\n\n try:\n # if this position exists in our user-facing dictionary,\n # remove it as well.\n del self._positions_store[sid]\n except KeyError:\n pass\n\n self._update_asset(sid)\n\n def handle_commission(self, sid, cost):\n # Adjust the cost basis of the stock if we own it\n if sid in self.positions:\n self.positions[sid].adjust_commission_cost_basis(sid, cost)\n\n def handle_splits(self, splits):\n \"\"\"\n Processes a list of splits by modifying any positions as needed.\n\n Parameters\n ----------\n splits: list\n A list of splits. Each split is a tuple of (sid, ratio).\n\n Returns\n -------\n int: The leftover cash from fractional sahres after modifying each\n position.\n \"\"\"\n total_leftover_cash = 0\n\n for split in splits:\n sid = split[0]\n if sid in self.positions:\n # Make the position object handle the split. It returns the\n # leftover cash from a fractional share, if there is any.\n position = self.positions[sid]\n leftover_cash = position.handle_split(sid, split[1])\n self._update_asset(split[0])\n total_leftover_cash += leftover_cash\n\n return total_leftover_cash\n\n def earn_dividends(self, dividends, stock_dividends):\n \"\"\"\n Given a list of dividends whose ex_dates are all the next trading day,\n calculate and store the cash and/or stock payments to be paid on each\n dividend's pay date.\n\n Parameters\n ----------\n dividends: iterable of (asset, amount, pay_date) namedtuples\n\n stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)\n namedtuples.\n \"\"\"\n for dividend in dividends:\n # Store the earned dividends so that they can be paid on the\n # dividends' pay_dates.\n div_owed = self.positions[dividend.asset].earn_dividend(dividend)\n try:\n self._unpaid_dividends[dividend.pay_date].append(div_owed)\n except KeyError:\n self._unpaid_dividends[dividend.pay_date] = [div_owed]\n\n for stock_dividend in stock_dividends:\n div_owed = \\\n self.positions[stock_dividend.asset].earn_stock_dividend(\n stock_dividend)\n try:\n self._unpaid_stock_dividends[stock_dividend.pay_date].\\\n append(div_owed)\n except KeyError:\n self._unpaid_stock_dividends[stock_dividend.pay_date] = \\\n [div_owed]\n\n def pay_dividends(self, next_trading_day):\n \"\"\"\n Returns a cash payment based on the dividends that should be paid out\n according to the accumulated bookkeeping of earned, unpaid, and stock\n dividends.\n \"\"\"\n net_cash_payment = 0.0\n\n try:\n payments = self._unpaid_dividends[next_trading_day]\n # Mark these dividends as paid by dropping them from our unpaid\n del self._unpaid_dividends[next_trading_day]\n except KeyError:\n payments = []\n\n # representing the fact that we're required to reimburse the owner of\n # the stock for any dividends paid while borrowing.\n for payment in payments:\n net_cash_payment += payment['amount']\n\n # Add stock for any stock dividends paid. Again, the values here may\n # be negative in the case of short positions.\n try:\n stock_payments = self._unpaid_stock_dividends[next_trading_day]\n except:\n stock_payments = []\n\n for stock_payment in stock_payments:\n payment_asset = stock_payment['payment_asset']\n share_count = stock_payment['share_count']\n # note we create a Position for stock dividend if we don't\n # already own the asset\n if payment_asset in self.positions:\n position = self.positions[payment_asset]\n else:\n position = self.positions[payment_asset] = \\\n Position(payment_asset)\n\n position.amount += share_count\n self._update_asset(payment_asset)\n\n return net_cash_payment\n\n def maybe_create_close_position_transaction(self, asset, dt, data_portal):\n if not self.positions.get(asset):\n return None\n\n amount = self.positions.get(asset).amount\n price = data_portal.get_spot_value(\n asset, 'price', dt, self.data_frequency)\n\n # Get the last traded price if price is no longer available\n if isnan(price):\n price = self.positions.get(asset).last_sale_price\n\n txn = Transaction(\n sid=asset,\n amount=(-1 * amount),\n dt=dt,\n price=price,\n commission=0,\n order_id=None,\n )\n return txn\n\n def get_positions(self):\n\n positions = self._positions_store\n\n for sid, pos in iteritems(self.positions):\n\n if pos.amount == 0:\n # Clear out the position if it has become empty since the last\n # time get_positions was called. Catching the KeyError is\n # faster than checking `if sid in positions`, and this can be\n # potentially called in a tight inner loop.\n try:\n del positions[sid]\n except KeyError:\n pass\n continue\n\n position = zp.Position(sid)\n position.amount = pos.amount\n position.cost_basis = pos.cost_basis\n position.last_sale_price = pos.last_sale_price\n position.last_sale_date = pos.last_sale_date\n\n # Adds the new position if we didn't have one before, or overwrite\n # one we have currently\n positions[sid] = position\n\n return positions\n\n def get_positions_list(self):\n positions = []\n for sid, pos in iteritems(self.positions):\n if pos.amount != 0:\n positions.append(pos.to_dict())\n return positions\n\n def sync_last_sale_prices(self, dt, handle_non_market_minutes,\n data_portal):\n if not handle_non_market_minutes:\n for asset, position in iteritems(self.positions):\n last_sale_price = data_portal.get_spot_value(\n asset, 'price', dt, self.data_frequency\n )\n\n if not np.isnan(last_sale_price):\n position.last_sale_price = last_sale_price\n else:\n for asset, position in iteritems(self.positions):\n last_sale_price = data_portal.get_adjusted_value(\n asset,\n 'price',\n data_portal.trading_calendar.previous_minute(dt),\n dt,\n self.data_frequency\n )\n\n if not np.isnan(last_sale_price):\n position.last_sale_price = last_sale_price\n\n def stats(self):\n amounts = []\n last_sale_prices = []\n for pos in itervalues(self.positions):\n amounts.append(pos.amount)\n last_sale_prices.append(pos.last_sale_price)\n\n position_values = calc_position_values(\n amounts,\n last_sale_prices,\n self._position_value_multipliers\n )\n\n position_exposures = calc_position_exposures(\n amounts,\n last_sale_prices,\n self._position_exposure_multipliers\n )\n\n long_value = calc_long_value(position_values)\n short_value = calc_short_value(position_values)\n gross_value = calc_gross_value(long_value, short_value)\n long_exposure = calc_long_exposure(position_exposures)\n short_exposure = calc_short_exposure(position_exposures)\n gross_exposure = calc_gross_exposure(long_exposure, short_exposure)\n net_exposure = calc_net(position_exposures)\n longs_count = calc_longs_count(position_exposures)\n shorts_count = calc_shorts_count(position_exposures)\n net_value = calc_net(position_values)\n\n return PositionStats(\n long_value=long_value,\n gross_value=gross_value,\n short_value=short_value,\n long_exposure=long_exposure,\n short_exposure=short_exposure,\n gross_exposure=gross_exposure,\n net_exposure=net_exposure,\n longs_count=longs_count,\n shorts_count=shorts_count,\n net_value=net_value\n )\n", "import os\n\nimport numpy as np\nimport pandas as pd\nfrom pandas_datareader.data import DataReader\nimport requests\n\nfrom zipline.utils.cli import maybe_show_progress\nfrom .core import register\n\n\ndef _cachpath(symbol, type_):\n return '-'.join((symbol.replace(os.path.sep, '_'), type_))\n\n\ndef yahoo_equities(symbols, start=None, end=None):\n \"\"\"Create a data bundle ingest function from a set of symbols loaded from\n yahoo.\n\n Parameters\n ----------\n symbols : iterable[str]\n The ticker symbols to load data for.\n start : datetime, optional\n The start date to query for. By default this pulls the full history\n for the calendar.\n end : datetime, optional\n The end date to query for. By default this pulls the full history\n for the calendar.\n\n Returns\n -------\n ingest : callable\n The bundle ingest function for the given set of symbols.\n\n Examples\n --------\n This code should be added to ~/.zipline/extension.py\n\n .. code-block:: python\n\n from zipline.data.bundles import yahoo_equities, register\n\n symbols = (\n 'AAPL',\n 'IBM',\n 'MSFT',\n )\n register('my_bundle', yahoo_equities(symbols))\n\n Notes\n -----\n The sids for each symbol will be the index into the symbols sequence.\n \"\"\"\n # strict this in memory so that we can reiterate over it\n symbols = tuple(symbols)\n\n def ingest(environ,\n asset_db_writer,\n minute_bar_writer, # unused\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session,\n end_session,\n cache,\n show_progress,\n output_dir,\n # pass these as defaults to make them 'nonlocal' in py2\n start=start,\n end=end):\n if start is None:\n start = start_session\n if end is None:\n end = None\n\n metadata = pd.DataFrame(np.empty(len(symbols), dtype=[\n ('start_date', 'datetime64[ns]'),\n ('end_date', 'datetime64[ns]'),\n ('auto_close_date', 'datetime64[ns]'),\n ('symbol', 'object'),\n ]))\n\n def _pricing_iter():\n sid = 0\n with maybe_show_progress(\n symbols,\n show_progress,\n label='Downloading Yahoo pricing data: ') as it, \\\n requests.Session() as session:\n for symbol in it:\n path = _cachpath(symbol, 'ohlcv')\n try:\n df = cache[path]\n except KeyError:\n df = cache[path] = DataReader(\n symbol,\n 'yahoo',\n start,\n end,\n session=session,\n ).sort_index()\n\n # the start date is the date of the first trade and\n # the end date is the date of the last trade\n start_date = df.index[0]\n end_date = df.index[-1]\n # The auto_close date is the day after the last trade.\n ac_date = end_date + pd.Timedelta(days=1)\n metadata.iloc[sid] = start_date, end_date, ac_date, symbol\n\n df.rename(\n columns={\n 'Open': 'open',\n 'High': 'high',\n 'Low': 'low',\n 'Close': 'close',\n 'Volume': 'volume',\n },\n inplace=True,\n )\n yield sid, df\n sid += 1\n\n daily_bar_writer.write(_pricing_iter(), show_progress=show_progress)\n\n symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)\n\n # Hardcode the exchange to \"YAHOO\" for all assets and (elsewhere)\n # register \"YAHOO\" to resolve to the NYSE calendar, because these are\n # all equities and thus can use the NYSE calendar.\n metadata['exchange'] = \"YAHOO\"\n asset_db_writer.write(equities=metadata)\n\n adjustments = []\n with maybe_show_progress(\n symbols,\n show_progress,\n label='Downloading Yahoo adjustment data: ') as it, \\\n requests.Session() as session:\n for symbol in it:\n path = _cachpath(symbol, 'adjustment')\n try:\n df = cache[path]\n except KeyError:\n df = cache[path] = DataReader(\n symbol,\n 'yahoo-actions',\n start,\n end,\n session=session,\n ).sort_index()\n\n df['sid'] = symbol_map[symbol]\n adjustments.append(df)\n\n adj_df = pd.concat(adjustments)\n adj_df.index.name = 'date'\n adj_df.reset_index(inplace=True)\n\n splits = adj_df[adj_df.action == 'SPLIT']\n splits = splits.rename(\n columns={'value': 'ratio', 'date': 'effective_date'},\n )\n splits.drop('action', axis=1, inplace=True)\n\n dividends = adj_df[adj_df.action == 'DIVIDEND']\n dividends = dividends.rename(\n columns={'value': 'amount', 'date': 'ex_date'},\n )\n dividends.drop('action', axis=1, inplace=True)\n # we do not have this data in the yahoo dataset\n dividends['record_date'] = pd.NaT\n dividends['declared_date'] = pd.NaT\n dividends['pay_date'] = pd.NaT\n\n adjustment_writer.write(splits=splits, dividends=dividends)\n\n return ingest\n\n\n# bundle used when creating test data\nregister(\n '.test',\n yahoo_equities(\n (\n 'AMD',\n 'CERN',\n 'COST',\n 'DELL',\n 'GPS',\n 'INTC',\n 'MMM',\n 'AAPL',\n 'MSFT',\n ),\n pd.Timestamp('2004-01-02', tz='utc'),\n pd.Timestamp('2015-01-01', tz='utc'),\n ),\n)\n", "#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport six\nfrom toolz import curry\nfrom toolz.curried.operator import add as prepend\n\nCOLUMN_NAMES = {\n \"V39063\": '1month',\n \"V39065\": '3month',\n \"V39066\": '6month',\n \"V39067\": '1year',\n \"V39051\": '2year',\n \"V39052\": '3year',\n \"V39053\": '5year',\n \"V39054\": '7year',\n \"V39055\": '10year',\n # Bank of Canada refers to this as 'Long' Rate, approximately 30 years.\n \"V39056\": '30year',\n}\nBILL_IDS = ['V39063', 'V39065', 'V39066', 'V39067']\nBOND_IDS = ['V39051', 'V39052', 'V39053', 'V39054', 'V39055', 'V39056']\n\n\n@curry\ndef _format_url(instrument_type,\n instrument_ids,\n start_date,\n end_date,\n earliest_allowed_date):\n \"\"\"\n Format a URL for loading data from Bank of Canada.\n \"\"\"\n return (\n \"http://www.bankofcanada.ca/stats/results/csv\"\n \"?lP=lookup_{instrument_type}_yields.php\"\n \"&sR={restrict}\"\n \"&se={instrument_ids}\"\n \"&dF={start}\"\n \"&dT={end}\".format(\n instrument_type=instrument_type,\n instrument_ids='-'.join(map(prepend(\"L_\"), instrument_ids)),\n restrict=earliest_allowed_date.strftime(\"%Y-%m-%d\"),\n start=start_date.strftime(\"%Y-%m-%d\"),\n end=end_date.strftime(\"%Y-%m-%d\"),\n )\n )\n\n\nformat_bill_url = _format_url('tbill', BILL_IDS)\nformat_bond_url = _format_url('bond', BOND_IDS)\n\n\ndef load_frame(url, skiprows):\n \"\"\"\n Load a DataFrame of data from a Bank of Canada site.\n \"\"\"\n return pd.read_csv(\n url,\n skiprows=skiprows,\n skipinitialspace=True,\n na_values=[\"Bank holiday\", \"Not available\"],\n parse_dates=[\"Date\"],\n index_col=\"Date\",\n ).dropna(how='all') \\\n .tz_localize('UTC') \\\n .rename(columns=COLUMN_NAMES)\n\n\ndef check_known_inconsistencies(bill_data, bond_data):\n \"\"\"\n There are a couple quirks in the data provided by Bank of Canada.\n Check that no new quirks have been introduced in the latest download.\n \"\"\"\n inconsistent_dates = bill_data.index.sym_diff(bond_data.index)\n known_inconsistencies = [\n # bill_data has an entry for 2010-02-15, which bond_data doesn't.\n # bond_data has an entry for 2006-09-04, which bill_data doesn't.\n # Both of these dates are bank holidays (Flag Day and Labor Day,\n # respectively).\n pd.Timestamp('2006-09-04', tz='UTC'),\n pd.Timestamp('2010-02-15', tz='UTC'),\n # 2013-07-25 comes back as \"Not available\" from the bills endpoint.\n # This date doesn't seem to be a bank holiday, but the previous\n # calendar implementation dropped this entry, so we drop it as well.\n # If someone cares deeply about the integrity of the Canadian trading\n # calendar, they may want to consider forward-filling here rather than\n # dropping the row.\n pd.Timestamp('2013-07-25', tz='UTC'),\n ]\n unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)\n if len(unexpected_inconsistences):\n in_bills = bill_data.index.difference(bond_data.index).difference(\n known_inconsistencies\n )\n in_bonds = bond_data.index.difference(bill_data.index).difference(\n known_inconsistencies\n )\n raise ValueError(\n \"Inconsistent dates for Canadian treasury bills vs bonds. \\n\"\n \"Dates with bills but not bonds: {in_bills}.\\n\"\n \"Dates with bonds but not bills: {in_bonds}.\".format(\n in_bills=in_bills,\n in_bonds=in_bonds,\n )\n )\n\n\ndef earliest_possible_date():\n \"\"\"\n The earliest date for which we can load data from this module.\n \"\"\"\n today = pd.Timestamp('now', tz='UTC').normalize()\n # Bank of Canada only has the last 10 years of data at any given time.\n return today.replace(year=today.year - 10)\n\n\ndef get_treasury_data(start_date, end_date):\n bill_data = load_frame(\n format_bill_url(start_date, end_date, start_date),\n # We skip fewer rows here because we query for fewer bill fields,\n # which makes the header smaller.\n skiprows=18,\n )\n bond_data = load_frame(\n format_bond_url(start_date, end_date, start_date),\n skiprows=22,\n )\n check_known_inconsistencies(bill_data, bond_data)\n\n # dropna('any') removes the rows for which we only had data for one of\n # bills/bonds.\n out = pd.concat([bond_data, bill_data], axis=1).dropna(how='any')\n assert set(out.columns) == set(six.itervalues(COLUMN_NAMES))\n\n # Multiply by 0.01 to convert from percentages to expected output format.\n return out * 0.01\n" ]
[ [ "numpy.asarray", "pandas.DataFrame" ], [ "numpy.isnan", "numpy.float64" ], [ "pandas.concat", "pandas.Timestamp", "pandas.Series", "pandas.Timedelta" ], [ "pandas.concat", "pandas.Timestamp", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
gmtpritam/stolgo
[ "8ced9b4c3ea2b0a89c929c2d2765ebc8593d00b2" ]
[ "lib/stolgo/nasdaq.py" ]
[ "import requests\nimport io\n\nfrom datetime import timedelta\nimport pandas as pd\n\nfrom stolgo.helper import get_date_range,get_formated_dateframe\nfrom stolgo.request import RequestUrl,Curl\n\n#default params for url connection\nDEFAULT_TIMEOUT = 5 # seconds\nMAX_RETRIES = 2\n#default periods\nDEFAULT_DAYS = 250\n\nclass NasdaqUrls:\n def __init__(self):\n self.STK_DATA_PRE_URL = r\"https://www.nasdaq.com/api/v1/historical/\"\n self.date_formats = {\"stock_data\":\"%Y-%m-%d\"}\n\n #historical data header\n self.header = {\n \"authority\":\"www.nasdaq.com\",\n \"method\":\"GET\",\n \"path\":\"/market-activity/stocks/aapl/historical\",\n \"scheme\":\"https\",\n \"accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"accept-encoding\":\"gzip, deflate, br\",\n \"accept-language\":\"en-GB,en-US;q=0.9,en;q=0.8\",\n \"cache-control\":\"max-age=0\",\n \"referer\":\"https://www.nasdaq.com/market-activity/quotes/historical\",\n \"sec-fetch-dest\":\"document\",\n \"sec-fetch-mode\":\"navigate\",\n \"sec-fetch-site\":\"same-origin\",\n \"sec-fetch-user\":\"?1\",\n \"upgrade-insecure-requests\":\"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36\"\n }\n\n def get_data_url(self,symbol,start,end):\n try:\n start = start.strftime(self.date_formats[\"stock_data\"])\n end = end.strftime(self.date_formats[\"stock_data\"])\n url = self.STK_DATA_PRE_URL + symbol + r\"/stocks/\" + start + r\"/\" + end\n return url\n except Exception as err:\n raise Exception(\"Error occurred in URL constructions \", str(err))\n\nclass Nasdaq:\n \"\"\"Nasdaq class to get data from nasdaq\n \"\"\"\n def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES,cloud_mode=False):\n if cloud_mode:\n self.requests = Curl(timeout,max_retries)\n else:\n self.requests = RequestUrl(timeout,max_retries)\n self.nasdaq_url = NasdaqUrls()\n\n def __get_data_adjusted(self,dfs,symbol,start=None,end=None,periods=None):\n if periods and (dfs.shape[0] < periods):\n new_periods = periods - dfs.shape[0]\n try:\n s_from = e_till = None\n #if only start, find till today\n if start and (not end):\n s_from = dfs.index[0] + timedelta(1)\n e_till = None\n #if not start, can go to past\n elif((end and (not start)) or periods):\n s_from = None\n e_till = dfs.index[-1] - timedelta(1)\n except IndexError as err:\n raise Exception(\"Nasdaq Access error.\")\n except Exception as exc:\n raise Exception(\"Nasdaq data error: \",str(exc))\n try:\n dfs_new = self.get_data(symbol,start = s_from,end = e_till,periods = new_periods)\n dfs = self.__join_dfs(dfs,dfs_new).sort_index(ascending=False)\n except Exception as exc:\n #Small part of data may not be available\n pass\n return dfs\n\n def __join_dfs(self,join,joiner):\n \"\"\"will append joiner to join for oi_dfs\n\n :param join: df which will be appended\n :type join: pandas.DataFrame\n :param joiner: df which we want to append\n :type joiner: pandas.DataFrame\n :return: merged data frame\n :rtype: pandas.DataFrame\n \"\"\"\n return join.append(joiner)\n\n def get_data(self,symbol,start=None,end=None,periods=None,dayfirst=False):\n \"\"\"get_data API to fetch data from nasdaq\n\n :param symbol: stock symbol\n :type symbol: string\n :param start: start date, defaults to None\n :type start: string, optional\n :param end: end date, defaults to None\n :type end: string, optional\n :param periods: number of days, defaults to None\n :type periods: integer, optional\n :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False\n :type dayfirst: bool, optional\n :raises ValueError: for invalid inputs\n :raises Exception: incase if no data found\n :return: stock data\n :rtype: pandas.DataFrame\n \"\"\"\n try:\n #Step1: get the date range\n s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)\n\n if s_from > e_till:\n raise ValueError(\"End should grater than start.\")\n\n url = self.nasdaq_url.get_data_url(symbol=symbol,start=s_from,end=e_till)\n res = self.requests.get(url,headers=self.nasdaq_url.header)\n\n try:\n dfs = pd.read_csv(io.StringIO(res.content.decode('utf-8')))\n except Exception as err:\n #increase data range, nasdaq not returning for small set\n if e_till == get_formated_dateframe():\n raise Exception(\"Nasdaq not retruning data for this date range.\\\n Please, retry with other date ranges\")\n e_till = get_formated_dateframe()\n if (e_till - s_from).days < DEFAULT_DAYS:\n s_from = e_till - DEFAULT_DAYS\n dfs = self.get_data(symbol,start=s_from,end=e_till)\n\n dfs.set_index(\"Date\",inplace=True)\n #convert to datetime\n dfs.index = pd.to_datetime(dfs.index)\n dfs = self.__get_data_adjusted(dfs,symbol,start=start,end=end,periods=periods)\n return dfs\n except Exception as err:\n raise Exception(\"Error occurred while getting data :\", str(err))" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
guoyang328/pytorch-dann
[ "1971cf1a7b9ecadc17932a8ecb3f0c34609751a3", "1971cf1a7b9ecadc17932a8ecb3f0c34609751a3", "21ad0622ce948002fdc90d5e8a2b498c94429218" ]
[ "datasets/mnist.py", "datasets/usps_weight.py", "core/test_weight.py" ]
[ "\"\"\"Dataset setting and data loader for MNIST.\"\"\"\n\n\nimport torch\nfrom torchvision import datasets, transforms\nimport os\n\ndef get_mnist(dataset_root, batch_size, train):\n \"\"\"Get MNIST datasets loader.\"\"\"\n # image pre-processing\n pre_process = transforms.Compose([transforms.Resize(28), # different img size settings for mnist(28), usps(16) and svhn(32).\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.1307], # Mean of MNIST train data\n std=[0.3015] # std of MNIST train data\n )])\n\n # datasets and data loader\n mnist_dataset = datasets.MNIST(root=os.path.join(dataset_root),\n train=train,\n transform=pre_process,\n download=True)\n\n\n mnist_data_loader = torch.utils.data.DataLoader(\n dataset=mnist_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=8)\n\n return mnist_data_loader", "\"\"\"Dataset setting and data loader for USPS.\"\"\"\n\n\nimport torch\nfrom torchvision import datasets, transforms\nimport os\n\nimport torch\nfrom torchvision import datasets, transforms\nimport os\n\nclass USPS(datasets.USPS):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, index):\n data, target = super().__getitem__(index)\n return data, target, index\n \ndef get_usps_weight(dataset_root, batch_size, train, subsample_size, weights):\n \"\"\"Get USPS datasets loader.\"\"\"\n # image pre-processing\n pre_process = transforms.Compose([transforms.Resize(28),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.2473], # Mean for USPS train data\n std=[0.2665] # std for USPS train data\n )])\n\n # datasets and data loader\n usps_dataset = USPS(root=os.path.join(dataset_root),\n train=train,\n transform=pre_process,\n download=True)\n num_sample = len(usps_dataset)\n \n if len(weights) == 10: \n sample_weight = torch.tensor([weights[label] for label in usps_dataset.targets])\n subsize = len(sample_weight) \n if subsample_size != 0: \n subsize = subsample_size\n print('usps')\n print(\"subsample size:{}\".format(subsample_size))\n print(\"subsize {}\".format(subsize))\n usps_data_loader = torch.utils.data.DataLoader(\n dataset=usps_dataset,\n batch_size=batch_size,\n sampler=torch.utils.data.sampler.WeightedRandomSampler(\n sample_weight, subsize),\n drop_last=True,\n num_workers=8)\n else: \n usps_data_loader = torch.utils.data.DataLoader(\n dataset=usps_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=8)\n return usps_data_loader, num_sample\n", "import torch.utils.data\nimport torch.nn as nn\n\ndef test_weight(model, data_loader, device, flag):\n \"\"\"Evaluate model for dataset.\"\"\"\n # set eval state for Dropout and BN layers\n model.eval()\n\n # init loss and accuracy\n loss_ = 0.0\n acc_ = 0.0\n acc_domain_ = 0.0\n n_total = 0\n\n # set loss function\n criterion = nn.CrossEntropyLoss()\n # evaluate network\n for (images, labels, _) in data_loader:\n images = images.to(device)\n labels = labels.to(device) #labels = labels.squeeze(1)\n size = len(labels)\n if flag == 'target':\n labels_domain = torch.ones(size).long().to(device)\n else:\n labels_domain = torch.zeros(size).long().to(device)\n\n preds, domain = model(images, alpha=0)\n\n loss_ += criterion(preds, labels).item()\n\n pred_cls = preds.data.max(1)[1]\n pred_domain = domain.data.max(1)[1]\n acc_ += pred_cls.eq(labels.data).sum().item()\n acc_domain_ += pred_domain.eq(labels_domain.data).sum().item()\n n_total += size\n\n loss = loss_ / n_total\n acc = acc_ / n_total\n acc_domain = acc_domain_ / n_total\n\n print(\"Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, {}/{}, Avg Domain Accuracy = {:2%}\".format(loss, acc, acc_, n_total, acc_domain))\n\n return loss, acc, acc_domain\n" ]
[ [ "torch.utils.data.DataLoader" ], [ "torch.utils.data.sampler.WeightedRandomSampler", "torch.utils.data.DataLoader", "torch.tensor" ], [ "torch.nn.CrossEntropyLoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
heiseApple/learn2learn
[ "df3c3291b4681440a80a69a7815090a4bd3cd661" ]
[ "examples/text/news_topic_classification.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport random\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\nimport learn2learn as l2l\n\n\nclass Net(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, num_classes, input_dim=768, inner_dim=200, pooler_dropout=0.3):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.activation_fn = nn.ReLU()\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, x, **kwargs):\n x = self.dropout(x)\n x = self.dense(x)\n x = self.activation_fn(x)\n x = self.dropout(x)\n x = F.log_softmax(self.out_proj(x), dim=1)\n return x\n\n\ndef accuracy(predictions, targets):\n predictions = predictions.argmax(dim=1)\n acc = (predictions == targets).sum().float()\n acc /= len(targets)\n return acc.item()\n\n\ndef collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n size = max(v.size(0) for v in values)\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\nclass _BatchedDataset(torch.utils.data.Dataset):\n def __init__(self, batched):\n self.sents = [s for s in batched[0]]\n self.ys = [y for y in batched[1]]\n \n def __len__(self):\n return len(self.ys)\n \n def __getitem__(self, idx):\n return (self.sents[idx], self.ys[idx])\n\n\ndef compute_loss(task, roberta, device, learner, loss_func, batch=15):\n loss = 0.0\n acc = 0.0\n for i, (x, y) in enumerate(torch.utils.data.DataLoader(\n _BatchedDataset(task), batch_size=batch, shuffle=True, num_workers=0)):\n # RoBERTa ENCODING\n x = collate_tokens([roberta.encode(sent) for sent in x], pad_idx=1)\n with torch.no_grad():\n x = roberta.extract_features(x)\n x = x[:, 0, :]\n\n # Moving to device\n x, y = x.to(device), y.view(-1).to(device)\n\n output = learner(x)\n curr_loss = loss_func(output, y)\n acc += accuracy(output, y)\n loss += curr_loss / len(task)\n loss /= len(task)\n return loss, acc\n\n\ndef main(lr=0.005, maml_lr=0.01, iterations=1000, ways=5, shots=1, tps=32, fas=5, device=torch.device(\"cpu\"),\n download_location=\"/tmp/text\"):\n dataset = l2l.text.datasets.NewsClassification(root=download_location, download=True)\n dataset = l2l.data.MetaDataset(dataset)\n\n classes = list(range(len(dataset.labels))) # 41 classes\n random.shuffle(classes)\n\n train_dataset, validation_dataset, test_dataset = dataset, dataset, dataset\n\n train_gen = l2l.data.TaskDataset(\n train_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n train_dataset, n=ways, k=shots, filter_labels=classes[:20]),\n l2l.data.transforms.LoadData(train_dataset),\n l2l.data.transforms.RemapLabels(train_dataset)],)\n\n validation_gen = l2l.data.TaskDataset(\n validation_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n validation_dataset, n=ways, k=shots, filter_labels=classes[20:30]),\n l2l.data.transforms.LoadData(validation_dataset),\n l2l.data.transforms.RemapLabels(validation_dataset)],)\n\n test_gen = l2l.data.TaskDataset(\n test_dataset, num_tasks=20000, \n task_transforms=[\n l2l.data.transforms.FusedNWaysKShots(\n test_dataset, n=ways, k=shots, filter_labels=classes[30:]),\n l2l.data.transforms.LoadData(test_dataset),\n l2l.data.transforms.RemapLabels(test_dataset)],)\n\n torch.hub.set_dir(download_location)\n roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')\n roberta.eval()\n roberta.to(device)\n model = Net(num_classes=ways)\n model.to(device)\n meta_model = l2l.algorithms.MAML(model, lr=maml_lr)\n opt = optim.Adam(meta_model.parameters(), lr=lr)\n loss_func = nn.NLLLoss(reduction=\"sum\")\n\n tqdm_bar = tqdm(range(iterations))\n\n accs = []\n for _ in tqdm_bar:\n iteration_error = 0.0\n iteration_acc = 0.0\n for _ in range(tps):\n learner = meta_model.clone()\n train_task, valid_task = train_gen.sample(), validation_gen.sample()\n\n # Fast Adaptation\n for _ in range(fas):\n train_error, _ = compute_loss(train_task, roberta, device, learner, loss_func, batch=shots * ways)\n learner.adapt(train_error)\n\n # Compute validation loss\n valid_error, valid_acc = compute_loss(valid_task, roberta, device, learner, loss_func,\n batch=shots * ways)\n iteration_error += valid_error\n iteration_acc += valid_acc\n\n iteration_error /= tps\n iteration_acc /= tps\n tqdm_bar.set_description(\"Loss : {:.3f} Acc : {:.3f}\".format(iteration_error.item(), iteration_acc))\n accs.append(iteration_acc)\n # Take the meta-learning step\n opt.zero_grad()\n iteration_error.backward()\n opt.step()\n print (f'first and best validation accuracy: {accs[0]:.4f}, {max(accs):.4f}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn2Learn Text Classification Example')\n\n parser.add_argument('--ways', type=int, default=5, metavar='N',\n help='number of ways (default: 5)')\n parser.add_argument('--shots', type=int, default=1, metavar='N',\n help='number of shots (default: 1)')\n parser.add_argument('-tps', '--tasks-per-step', type=int, default=32, metavar='N',\n help='tasks per step (default: 32)')\n parser.add_argument('-fas', '--fast-adaption-steps', type=int, default=5, metavar='N',\n help='steps per fast adaption (default: 5)')\n\n parser.add_argument('--iterations', type=int, default=1000, metavar='N',\n help='number of iterations (default: 1000)')\n\n parser.add_argument('--lr', type=float, default=0.005, metavar='LR',\n help='learning rate (default: 0.005)')\n parser.add_argument('--maml-lr', type=float, default=0.01, metavar='LR',\n help='learning rate for MAML (default: 0.01)')\n\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n parser.add_argument('--download-location', type=str, default=\"/tmp/text\", metavar='S',\n help='download location for train data and roberta(default : /tmp/text')\n\n args = parser.parse_args()\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n main(lr=args.lr, maml_lr=args.maml_lr, iterations=args.iterations, ways=args.ways, shots=args.shots,\n tps=args.tasks_per_step, fas=args.fast_adaption_steps, device=device,\n download_location=args.download_location)\n" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Dropout", "torch.manual_seed", "torch.nn.ReLU", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.hub.set_dir", "torch.hub.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taqtiqa-mark/pymarket
[ "2f8db92010d5f9407a72941788500351e92cbe81" ]
[ "pymarket/bids/demand_curves.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom typing import Tuple, Union\nfrom pymarket.bids import BidManager\n\n\ndef demand_curve_from_bids(\n bids: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Creates a demand curve from a set of buying bids.\n It is the inverse cumulative distribution of quantity\n as a function of price.\n\n Parameters\n ----------\n bids\n Collection of all the bids in the market. The algorithm\n filters only the buying bids.\n\n Returns\n ---------\n demand_curve: np.ndarray\n Stepwise constant demand curve represented as a collection\n of the N rightmost points of each interval (N-1 bids). It is stored\n as a (N, 2) matrix where the first column is the x-coordinate\n and the second column is the y-coordinate.\n An extra point is a))dded with x coordinate at infinity and\n price at 0 to represent the end of the curve.\n\n index : np.ndarray\n The order of the identifier of each bid in the demand\n curve.\n\n Examples\n ---------\n\n A minimal example, selling bid is ignored:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 1, 0, buying=True)\n 0\n >>> bm.add_bid(1, 1, 1, buying=False)\n 1\n >>> dc, index = pm.demand_curve_from_bids(bm.get_df())\n >>> dc\n array([[ 1., 1.],\n [inf, 0.]])\n >>> index\n array([0])\n\n A larger example with reordering of bids:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 1, 0, buying=True)\n 0\n >>> bm.add_bid(1, 1, 1, buying=False)\n 1\n >>> bm.add_bid(3, 0.5, 2, buying=True)\n 2\n >>> bm.add_bid(2.3, 0.1, 3, buying=True)\n 3\n >>> dc, index = pm.demand_curve_from_bids(bm.get_df())\n >>> dc\n array([[1. , 1. ],\n [4. , 0.5],\n [6.3, 0.1],\n [inf, 0. ]])\n >>> index\n array([0, 2, 3])\n\n \"\"\"\n buying = bids[bids.buying]\n buying = buying.sort_values('price', ascending=False)\n buying['acum'] = buying.quantity.cumsum()\n demand_curve = buying[['acum', 'price']].values\n demand_curve = np.vstack([demand_curve, [np.inf, 0]])\n index = buying.index.values\n return demand_curve, index\n\n\ndef supply_curve_from_bids(\n bids: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Creates a supply curve from a set of selling bids.\n It is the cumulative distribution of quantity\n as a function of price.\n\n Parameters\n ----------\n bids\n Collection of all the bids in the market. The algorithm\n filters only the selling bids.\n\n Returns\n ---------\n supply_curve: np.ndarray\n Stepwise constant demand curve represented as a collection\n of the N rightmost points of each interval (N-1 bids). It is stored\n as a (N, 2) matrix where the first column is the x-coordinate\n and the second column is the y-coordinate.\n An extra point is added with x coordinate at infinity and\n price at infinity to represent the end of the curve.\n\n index : np.ndarray\n The order of the identifier of each bid in the supply\n curve.\n\n Examples\n ---------\n\n A minimal example, selling bid is ignored:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 3, 0, False)\n 0\n >>> bm.add_bid(2.1, 3, 3, True)\n 1\n >>> sc, index = pm.supply_curve_from_bids(bm.get_df())\n >>> sc\n array([[ 1., 3.],\n [inf, inf]])\n >>> index\n array([0])\n\n A larger example with reordering:\n\n >>> bm = pm.BidManager()\n >>> bm.add_bid(1, 3, 0, False)\n 0\n >>> bm.add_bid(2.1, 3, 3, True)\n 1\n >>> bm.add_bid(0.2, 1, 3, False)\n 2\n >>> bm.add_bid(1.7, 6, 4, False)\n 3\n >>> sc, index = pm.supply_curve_from_bids(bm.get_df())\n >>> sc\n array([[0.2, 1. ],\n [1.2, 3. ],\n [2.9, 6. ],\n [inf, inf]])\n >>> index\n array([2, 0, 3])\n\n\n \"\"\"\n selling = bids[bids.buying == False]\n selling = selling.sort_values('price')\n selling['acum'] = selling.quantity.cumsum()\n supply_curve = selling[['acum', 'price']].values\n supply_curve = np.vstack([supply_curve, [np.inf, np.inf]])\n index = selling.index.values\n return supply_curve, index\n\n\ndef get_value_stepwise(x: float, f: np.ndarray) -> Union[float, None]:\n \"\"\"\n Returns the value of a stepwise constant\n function defined by the right extrems\n of its interval\n Functions are assumed to be defined\n in (0, inf).\n\n Parameters\n ----------\n x\n Value in which the function is to be\n evaluated\n f\n Stepwise function represented as a 2 column\n matrix. Each row is the rightmost extreme\n point of each constant interval. The first column\n contains the x coordinate and is sorted increasingly.\n f is assumed to be defined only in the interval\n :math: (0, \\infty)\n Returns\n --------\n float or None\n The image of x under f: `f(x)`. If `x` is negative,\n then None is returned instead. If x is outside\n the range of the function (greater than `f[-1, 0]`),\n then the method returns None.\n\n Examples\n ---------\n >>> f = np.array([\n ... [1, 1],\n ... [3, 4]])\n >>> [pm.get_value_stepwise(x, f)\n ... for x in [-1, 0, 0.5, 1, 2, 3, 4]]\n [None, 1, 1, 1, 4, 4, None]\n\n \"\"\"\n if x < 0:\n return None\n\n for step in f:\n if x <= step[0]:\n return step[1]\n\n\ndef intersect_stepwise(\n f: np.ndarray,\n g: np.ndarray,\n k: float=0.5\n ) -> Tuple[\n Union[float, None],\n Union[int, None],\n Union[int, None],\n float]:\n \"\"\"\n Finds the intersection of\n two stepwise constants functions\n where f is assumed to be bigger at 0\n than g.\n If no intersection is found, None is returned.\n\n Parameters\n ----------\n f\n Stepwise constant function represented as\n a 2 column matrix where each row is the rightmost\n point of the constat interval. The first column\n is sorted increasingly.\n Preconditions: f is non-increasing.\n\n g\n Stepwise constant function represented as\n a 2 column matrix where each row is the rightmost\n point of the constat interval. The first column\n is sorted increasingly.\n Preconditions: g is non-decreasing and\n `f[0, 0] > g[0, 0]`\n k\n If the intersection is empty or an interval,\n a convex combination of the y-values of f and g\n will be returned and k will be used to determine\n hte final value. `k=1` will be the value of g\n while `k=0` will be the value of f.\n\n Returns\n --------\n x_ast : float or None\n Axis coordinate of the intersection of both\n functions. If the intersection is empty,\n then it returns None.\n f_ast : int or None\n Index of the rightmost extreme\n of the interval of `f` involved in the\n intersection. If the intersection is\n empty, returns None\n g_ast : int or None\n Index of the rightmost extreme\n of the interval of `g` involved in the\n intersection. If the intersection is\n empty, returns None.\n v : float or None\n Ordinate of the intersection if it\n is uniquely identified, otherwise\n the k-convex combination of the\n y values of `f` and `g` in the last\n point when they were both defined.\n\n Examples\n ---------\n Simple intersection with diferent domains\n\n >>> f = np.array([[1, 3], [3, 1]])\n >>> g = np.array([[2,2]])\n >>> pm.intersect_stepwise(f, g)\n (1, 0, 0, 2)\n\n Empty intersection, returning the middle value\n\n >>> f = np.array([[1,3], [2, 2.5]])\n >>> g = np.array([[1,1], [2, 2]])\n >>> pm.intersect_stepwise(f, g)\n (None, None, None, 2.25)\n \"\"\"\n x_max = np.min([f.max(axis=0)[0], g.max(axis=0)[0]])\n xs = sorted([x for x in set(g[:, 0]).union(set(f[:, 0])) if x <= x_max])\n fext = [get_value_stepwise(x, f) for x in xs]\n gext = [get_value_stepwise(x, g) for x in xs]\n x_ast = None\n for i in range(len(xs) - 1):\n if (fext[i] > gext[i]) and (fext[i + 1] < gext[i + 1]):\n x_ast = xs[i]\n\n f_ast = np.argmax(f[:, 0] >= x_ast) if x_ast is not None else None\n g_ast = np.argmax(g[:, 0] >= x_ast) if x_ast is not None else None\n\n g_val = g[g_ast, 1] if g_ast is not None else get_value_stepwise(xs[-1], g)\n f_val = f[f_ast, 1] if f_ast is not None else get_value_stepwise(xs[-1], f)\n\n intersect_domain_both = x_ast in f[:, 0] and x_ast in g[:, 0]\n if not (intersect_domain_both) and (x_ast is not None):\n v = g_val if x_ast in f[:, 0] else f_val\n else:\n v = g_val * k + (1 - k) * f_val\n\n return x_ast, f_ast, g_ast, v\n" ]
[ [ "numpy.argmax", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ethanjli/liquid-handling-robotics
[ "999ab03c225b4c5382ab9fcac6a4988d0c232c67" ]
[ "lhrhost/robot/axes.py" ]
[ "\"\"\"Abstractions for the axes of a liquid-handling robot.\"\"\"\n\n# Standard imports\nimport logging\nfrom abc import abstractmethod\n\n# Local package imiports\nfrom lhrhost.protocol.linear_actuator import Receiver as LinearActuatorReceiver\nfrom lhrhost.util.containers import add_to_tree, get_from_tree\nfrom lhrhost.util.files import load_from_json, save_to_json\nfrom lhrhost.util.interfaces import InterfaceClass\n\n# External imports\nimport scipy.stats as stats\n\n# Logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass RobotAxis(LinearActuatorReceiver, metaclass=InterfaceClass):\n \"\"\"High-level controller mixin interface for axes with physical position units.\"\"\"\n\n @property\n @abstractmethod\n def protocol(self):\n \"\"\"Return the associated linear actuator protocol.\"\"\"\n return None\n\n @abstractmethod\n def physical_to_sensor(self, physical_position):\n \"\"\"Convert a position in physical units to a unitless sensor position.\"\"\"\n pass\n\n @abstractmethod\n def sensor_to_physical(self, sensor_position):\n \"\"\"Convert a unitless sensor position to a position in physical units.\"\"\"\n pass\n\n @property\n @abstractmethod\n def physical_unit(self):\n \"\"\"Return a string representation of the physical units.\"\"\"\n pass\n\n def load_tunings_json(self, json_path=None):\n \"\"\"Load localized controller tunings from the provided JSON file path.\n\n Default path: 'calibrations/{}_tunings.json' where {} is replaced with\n the axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_tunings.json'.format(self.name)\n trees = load_from_json(json_path)\n self.default_tuning = trees['default']\n self.target_position_tunings = trees['target positions']\n return trees\n\n def save_tunings_json(self, json_path=None):\n \"\"\"Save a localized controller tunings tree to the provided JSON file path.\"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_tunings.json'.format(self.name)\n save_to_json({\n 'default': self.default_tuning,\n 'target positions': self.target_position_tunings\n }, json_path)\n\n async def go_to_sensor_position(\n self, sensor_position, apply_tunings=True, restore_tunings=True\n ):\n \"\"\"Go to the specified sensor position.\n\n Returns the final sensor position.\n \"\"\"\n if apply_tunings:\n current_tuning = self.default_tuning\n for tuning in self.target_position_tunings:\n if sensor_position >= tuning['min'] and sensor_position < tuning['max']:\n current_tuning = tuning\n else:\n logger.debug(\n 'PID tunings for sensor position {} unspecified, using defaults.'\n .format(int(sensor_position))\n )\n kp = current_tuning['pid']['kp']\n kd = current_tuning['pid']['kd']\n motor_limits = current_tuning['limits']['motor']\n duty_forwards_max = motor_limits['forwards']['max']\n duty_forwards_min = motor_limits['forwards']['min']\n duty_backwards_max = motor_limits['backwards']['max']\n duty_backwards_min = motor_limits['backwards']['min']\n (prev_kp, prev_kd, prev_ki) = await self.set_pid_gains(kp=kp, kd=kd)\n (\n prev_duty_forwards_max, prev_duty_forwards_min,\n prev_duty_backwards_max, prev_duty_backwards_min\n ) = await self.set_motor_limits(\n forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,\n backwards_max=duty_backwards_max, backwards_min=duty_backwards_min\n )\n await self.protocol.feedback_controller.request_complete(\n int(sensor_position)\n )\n if apply_tunings and restore_tunings:\n await self.set_pid_gains(kp=prev_kp, kd=prev_kd, ki=prev_ki)\n await self.set_motor_limits(\n forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,\n backwards_max=duty_backwards_max, backwards_min=duty_backwards_min\n )\n return self.protocol.position.last_response_payload\n\n async def go_to_low_end_position(self, speed=None):\n \"\"\"Go to the lowest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n if speed is None:\n speed = (\n self.protocol.feedback_controller.limits.motor\n .backwards.high.last_response_payload\n )\n await self.protocol.motor.request_complete(speed)\n await self.protocol.position.request()\n return self.protocol.position.last_response_payload\n\n async def go_to_high_end_position(self, speed=None):\n \"\"\"Go to the highest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n if speed is None:\n speed = (\n self.protocol.feedback_controller.limits.motor\n .forwards.high.last_response_payload\n )\n await self.protocol.motor.request_complete(speed)\n await self.protocol.position.request()\n return self.protocol.position.last_response_payload\n\n async def go_to_physical_position(self, physical_position):\n \"\"\"Go to the specified physical position.\n\n Returns the final physical position.\n \"\"\"\n sensor_position = self.physical_to_sensor(physical_position)\n sensor_position = await self.go_to_sensor_position(sensor_position)\n return self.sensor_to_physical(sensor_position)\n\n async def move_by_sensor_delta(self, sensor_delta):\n \"\"\"Go forwards/backwards by the specified sensor displacement.\n\n Returns the final physical displacement.\n \"\"\"\n position = await self.sensor_position\n target_position = position + sensor_delta\n final_position = await self.go_to_sensor_position(target_position)\n return final_position - position\n\n async def move_by_physical_delta(self, physical_delta):\n \"\"\"Go forwards/backwards by the specified physical displacement.\n\n Returns the final physical displacement.\n \"\"\"\n position = await self.physical_position\n target_position = position + physical_delta\n final_position = await self.go_to_physical_position(target_position)\n return final_position - position\n\n async def wait_until_initialized(self):\n \"\"\"Wait until the axis is ready to control.\"\"\"\n await self.protocol.initialized.wait()\n await self.protocol.position.initialized.wait()\n await self.protocol.motor.initialized.wait()\n\n async def synchronize_values(self):\n \"\"\"Request the values of all channels.\"\"\"\n await self.protocol.request_all()\n\n @property\n def name(self):\n \"\"\"Return the name of the axis.\"\"\"\n return self.protocol.node_name\n\n @property\n def last_position_limits(self):\n \"\"\"Get the last received position limits of the axis.\"\"\"\n return (\n self.protocol.feedback_controller.limits.position.low.last_response_payload,\n self.protocol.feedback_controller.limits.position.high.last_response_payload\n )\n\n @property\n async def sensor_position(self):\n \"\"\"Get the current sensor position of the axis.\"\"\"\n await self.protocol.position.request()\n return self.last_sensor_position\n\n @property\n def last_sensor_position(self):\n \"\"\"Get the last received sensor position of the axis.\"\"\"\n return self.protocol.position.last_response_payload\n\n @property\n async def physical_position(self):\n \"\"\"Get the current physical position of the axis.\"\"\"\n await self.protocol.position.request()\n return self.last_physical_position\n\n @property\n def last_physical_position(self):\n \"\"\"Get the last received physical position of the axis.\"\"\"\n return self.sensor_to_physical(self.last_sensor_position)\n\n async def set_pid_gains(self, kp=None, kd=None, ki=None, floating_point=True):\n \"\"\"Set values for the PID gains whose values are specified.\n\n Returns the previous values of the gains.\n \"\"\"\n pid_protocol = self.protocol.feedback_controller.pid\n prev_kp = pid_protocol.kp.last_response_payload\n prev_kd = pid_protocol.kd.last_response_payload\n prev_ki = pid_protocol.ki.last_response_payload\n if kp is not None and prev_kp != int(kp * 100 if floating_point else kp):\n await pid_protocol.kp.request(int(kp * 100 if floating_point else kp))\n if kd is not None and prev_kd != int(kd * 100 if floating_point else kp):\n await pid_protocol.kd.request(int(kd * 100 if floating_point else kp))\n if ki is not None and prev_ki != int(ki * 100 if floating_point else kp):\n await pid_protocol.ki.request(int(ki * 100 if floating_point else kp))\n return (\n prev_kp / 100 if floating_point else prev_kp,\n prev_kd / 100 if floating_point else prev_kd,\n prev_ki / 100 if floating_point else prev_ki\n )\n\n async def set_motor_limits(\n self, forwards_max=None, forwards_min=None, backwards_max=None, backwards_min=None\n ):\n \"\"\"Set values for the motor duty cycle limits where specified.\n\n Returns the previous values of the limits.\n \"\"\"\n limits_protocol = self.protocol.feedback_controller.limits.motor\n prev_forwards_max = limits_protocol.forwards.high.last_response_payload\n prev_forwards_min = limits_protocol.forwards.low.last_response_payload\n prev_backwards_max = -limits_protocol.backwards.high.last_response_payload\n prev_backwards_min = -limits_protocol.backwards.low.last_response_payload\n if forwards_max is not None and prev_forwards_max != int(forwards_max):\n await limits_protocol.forwards.high.request(int(forwards_max))\n if forwards_min is not None and prev_forwards_min != int(forwards_min):\n await limits_protocol.forwards.high.request(int(forwards_min))\n if backwards_max is not None and prev_backwards_max != int(backwards_max):\n await limits_protocol.backwards.high.request(int(-backwards_max))\n if backwards_min is not None and prev_backwards_min != int(backwards_min):\n await limits_protocol.backwards.high.request(int(-backwards_min))\n return (\n prev_forwards_max, prev_forwards_min,\n prev_backwards_max, prev_backwards_min\n )\n\n\nclass ContinuousRobotAxis(RobotAxis):\n \"\"\"High-level controller mixin interface for axes with continuous positions.\n\n Assumes a linear transformation exists between sensor and physical positions.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize member variables.\"\"\"\n super().__init__()\n self._calibration_samples = []\n self.linear_regression = None\n\n def clear_calibration_samples(self):\n \"\"\"Discard the stored calibration data.\"\"\"\n self._calibration_samples = []\n self.linear_regression = None\n\n def add_calibration_sample(self, sensor_position, physical_position):\n \"\"\"Add a (sensor, physical) position pair for calibration.\"\"\"\n self.linear_regression = None\n self._calibration_samples.append((sensor_position, physical_position))\n\n def fit_calibration_linear(self):\n \"\"\"Perform a linear regression on the calibration data and store results.\n\n Returns the regression slope, intercept, R-value, and standard error.\n The regression is for physical_position = slope * sensor_position + intercept.\n \"\"\"\n linear_regression = stats.linregress(self._calibration_samples)\n self.linear_regression = [\n linear_regression[0], linear_regression[1],\n linear_regression[2], linear_regression[4]\n ]\n return self.linear_regression\n\n @property\n def calibration_data(self):\n \"\"\"Return a JSON-exportable structure of calibration data.\"\"\"\n calibration_data = {\n 'parameters': {\n 'slope': self.linear_regression[0],\n 'intercept': self.linear_regression[1],\n 'rsquared': self.linear_regression[2],\n 'stderr': self.linear_regression[3]\n },\n 'physical unit': self.physical_unit,\n 'samples': [\n {\n 'sensor': calibration_sample[0],\n 'physical': calibration_sample[1]\n }\n for calibration_sample in self._calibration_samples\n ]\n }\n return calibration_data\n\n def load_calibration(self, calibration_data):\n \"\"\"Load a calibration from the provided calibration data structure.\"\"\"\n self._calibration_samples = [\n (calibration_sample['sensor'], calibration_sample['physical'])\n for calibration_sample in calibration_data['samples']\n ]\n self.fit_calibration_linear()\n\n def load_calibration_json(self, json_path=None):\n \"\"\"Load a calibration from a provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_physical.json'.format(self.name)\n self.load_calibration(load_from_json(json_path))\n\n def save_calibration_json(self, json_path=None):\n \"\"\"Save the calibration to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_physical.json'.format(self.name)\n save_to_json(self.calibration_data, json_path)\n\n @property\n def sensor_to_physical_scaling(self):\n \"\"\"Return the scaling factor from sensor to physical positions.\"\"\"\n if self.linear_regression is None:\n self._fit_calibration_linear()\n return self.linear_regression[0]\n\n @property\n def sensor_to_physical_offset(self):\n \"\"\"Return the post-scaling offset from sensor to physical positions.\"\"\"\n if self.linear_regression is None:\n self._fit_calibration_linear()\n return self.linear_regression[1]\n\n # Implement RobotAxis\n\n def physical_to_sensor(self, physical_position):\n \"\"\"Convert a position in physical units to a unitless integer sensor position.\"\"\"\n return (\n (physical_position - self.sensor_to_physical_offset) /\n self.sensor_to_physical_scaling\n )\n\n def sensor_to_physical(self, sensor_position):\n \"\"\"Convert a unitless sensor position to a position in physical units.\"\"\"\n return (\n self.sensor_to_physical_scaling * sensor_position +\n self.sensor_to_physical_offset\n )\n\n\nclass PresetRobotAxis(RobotAxis):\n \"\"\"High-level controller mixin for axes with preset positions.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize member variables.\"\"\"\n super().__init__()\n self.preset_sensor_position_tree = {}\n self.preset_physical_position_tree = {}\n self.current_preset_position = None\n\n def set_preset_sensor_position(self, preset_position, sensor_position):\n \"\"\"Associate a preset position with a sensor position.\"\"\"\n try:\n physical_position = self.preset_to_physical(\n preset_position, use_sensor_if_needed=False\n )\n except (AttributeError, KeyError):\n physical_position = None\n if physical_position is not None:\n raise KeyError(\n 'Preset position {} is already set to physical position {} {}!'\n .format(preset_position, physical_position, self.physical_units)\n )\n add_to_tree(\n self.preset_sensor_position_tree, preset_position,\n sensor_position\n )\n\n def set_preset_physical_position(self, preset_position, physical_position):\n \"\"\"Associate a preset position with a physical position.\"\"\"\n try:\n sensor_position = self.preset_to_sensor(\n preset_position, use_physical_if_needed=False\n )\n except KeyError:\n sensor_position = None\n if sensor_position is not None:\n raise KeyError(\n 'Preset position {} is already set to sensor position {}!'\n .format(preset_position, sensor_position)\n )\n add_to_tree(\n self.preset_physical_position_tree, preset_position,\n physical_position\n )\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n position_node = get_from_tree(presets_tree, preset_position)\n if isinstance(position_node, dict):\n try:\n type = position_node['type']\n except KeyError:\n raise TypeError(\n 'Type-less preset position {}!'.format(preset_position)\n )\n if type == 'implicit':\n raise TypeError(\n 'Cannot use implicit preset position {}!'.format(preset_position)\n )\n if type == 'constants':\n raise TypeError(\n 'Cannot use partially-specified preset position {}!'\n .format(preset_position)\n )\n if type == 'constant':\n return position_node['value']\n raise NotImplementedError(\n 'Unknown type {} for preset position {}!'\n .format(type, preset_position)\n )\n return position_node\n\n def preset_to_sensor(self, preset_position, use_physical_if_needed=True):\n \"\"\"Convert a preset position to a sensor position.\"\"\"\n try:\n return self.get_preset_position(\n self.preset_sensor_position_tree, preset_position\n )\n except KeyError:\n if use_physical_if_needed:\n physical_position = self.preset_to_physical(preset_position, False)\n return self.physical_to_sensor(physical_position)\n else:\n raise\n\n def preset_to_physical(self, preset_position, use_sensor_if_needed=True):\n \"\"\"Convert a preset position to a physical position.\"\"\"\n try:\n return self.get_preset_position(\n self.preset_physical_position_tree, preset_position\n )\n except KeyError:\n if use_sensor_if_needed:\n sensor_position = self.preset_to_sensor(preset_position, False)\n return self.sensor_to_physical(sensor_position)\n else:\n raise\n\n async def go_to_preset_position(self, preset_position, force_go=False):\n \"\"\"Go to the specified preset position.\n\n Returns the physical position error between the desired physical position\n and the final physical position.\n \"\"\"\n if self.current_preset_position == preset_position and not force_go:\n return\n physical_position = self.preset_to_physical(preset_position)\n final_physical_position = await self.go_to_physical_position(physical_position)\n if isinstance(preset_position, str):\n preset_position = (preset_position,)\n self.current_preset_position = preset_position\n return physical_position - final_physical_position\n\n def load_preset_json(self, json_path=None):\n \"\"\"Load a preset positions tree from the provided JSON file path.\n\n Default path: 'calibrations/{}_preset.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n trees = load_from_json(json_path)\n self.preset_physical_position_tree = trees['physical']\n self.preset_sensor_position_tree = trees['sensor']\n return trees\n\n def save_preset_json(self, json_path=None):\n \"\"\"Save a preset positions tree to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n save_to_json({\n 'physical': self.preset_physical_position_tree,\n 'sensor': self.preset_sensor_position_tree\n }, json_path)\n\n # Implement RobotAxis\n\n async def go_to_sensor_position(self, sensor_position):\n \"\"\"Go to the specified sensor position.\n\n Returns the final sensor position.\n \"\"\"\n self.current_preset_position = None\n return await super().go_to_sensor_position(sensor_position)\n\n async def go_to_low_end_position(self, speed=None):\n \"\"\"Go to the lowest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n try:\n return await self.go_to_preset_position('low end')\n except (KeyError, TypeError):\n if self.current_preset_position == ('low end',):\n return\n self.current_preset_position = ('low end',)\n return await super().go_to_low_end_position(speed)\n\n async def go_to_high_end_position(self, speed=None):\n \"\"\"Go to the highest possible sensor position at the maximum allowed speed.\n\n Speed must be given as a signed motor duty cycle.\n \"\"\"\n try:\n return await self.go_to_preset_position('high end')\n except (KeyError, TypeError):\n if self.current_preset_position == ('high end',):\n return\n self.current_preset_position = ('high end',)\n return await super().go_to_high_end_position(speed)\n\n\nclass AlignedRobotAxis(PresetRobotAxis):\n \"\"\"High-level controller mixin for axes with alignment.\"\"\"\n\n def at_alignment_hole(self):\n \"\"\"Return whether the axis is already at the alignment hole.\"\"\"\n return self.current_preset_position == ('alignment hole',)\n\n async def go_to_alignment_hole(self):\n \"\"\"Move to the alignment hole.\"\"\"\n if self.at_alignment_hole():\n return\n try:\n await self.go_to_preset_position('alignment hole')\n except TypeError:\n await self.go_to_physical_position(0)\n self.current_preset_position = ('alignment hole',)\n\n\nclass ManuallyAlignedRobotAxis(AlignedRobotAxis, ContinuousRobotAxis):\n \"\"\"High-level controller mixin for axes with manual alignment.\"\"\"\n\n async def set_alignment(self):\n \"\"\"Update the physical calibration to align against the current position.\"\"\"\n position = await self.sensor_position\n self.linear_regression[1] = -self.linear_regression[0] * position\n\n\nclass ModularRobotAxis(PresetRobotAxis):\n \"\"\"High-level controller mixin for axes with modular sets of positions.\"\"\"\n\n def at_module(self, module):\n \"\"\"Return whether the axis is already at the module.\n\n Module may be the module's name or type, depending on the axis.\n \"\"\"\n return self.current_preset_position[0] == module\n\n def at_module_position(self, module, position):\n \"\"\"Return whether the axis is already at the position for the module.\n\n Module may be the module's name or type, depending on the axis.\n \"\"\"\n return self.current_preset_position == (module, position)\n\n def get_indexed_offset(self, module_params, index, origin_index_key='origin index'):\n \"\"\"Return the physical offset for the provided module indexed preset position.\"\"\"\n def to_num(index):\n if isinstance(index, str) and len(index) == 1:\n return ord(index)\n return index\n index = to_num(index)\n min_index = to_num(module_params['min index'])\n max_index = to_num(module_params['max index'])\n origin_index = to_num(module_params[origin_index_key])\n if (index < min_index) or (max_index is not None and index > max_index):\n raise IndexError(\n 'Index {} is out of the range ({}, {})!'\n .format(index, min_index, max_index)\n )\n return (index - origin_index) * module_params['increment']\n\n def get_continuous_offset(self, module_params, offset):\n \"\"\"Return the physical offset for the provided module continuous preset position.\"\"\"\n min = module_params['min']\n max = module_params['max']\n if (offset < min) or (max is not None and offset > max):\n raise ValueError(\n 'Offset {} is out of the range ({}, {})!'\n .format(offset, min, max)\n )\n return offset\n\n def get_module_mount_position(self, presets_tree, module_type):\n \"\"\"Get the position of the module's mount.\"\"\"\n return self.get_preset_position(presets_tree, 'mount')\n\n def get_module_offset_position(self, module_params, offset):\n \"\"\"Get the position on the module relative to the module's origin.\"\"\"\n if module_params['type'] == 'indexed':\n return self.get_indexed_offset(module_params, offset)\n elif module_params['type'] == 'continuous':\n return self.get_continuous_offset(module_params, offset)\n else:\n raise NotImplementedError(\n 'Unknown module type {}!'.format(module_params['type'])\n )\n\n def get_module_position(self, presets_tree, module_params, preset_position):\n \"\"\"Get the actual position from a preset module position tree node.\"\"\"\n (module, offset) = preset_position\n return (\n self.get_module_mount_position(presets_tree, module) +\n module_params['origin'] +\n self.get_module_offset_position(module_params, offset)\n )\n\n async def go_to_module_position(self, module, position):\n \"\"\"Move to the position for the specified module.\"\"\"\n await self.go_to_preset_position((module, position))\n\n # Implement PresetRobotAxis\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n try:\n return super().get_preset_position(presets_tree, preset_position)\n except KeyError:\n module_params = get_from_tree(presets_tree, preset_position[0])\n return self.get_module_position(presets_tree, module_params, preset_position)\n\n\nclass ConfigurableRobotAxis(ModularRobotAxis):\n \"\"\"High-level controller mixin for axes with reconfigurable sets of modules.\"\"\"\n\n def get_module_type(self, module_name):\n \"\"\"Return the module type of the named module.\"\"\"\n return self.configuration_tree[module_name]['type']\n\n def get_module_mount(self, module_name):\n \"\"\"Return the module type of the named module.\"\"\"\n return self.configuration_tree[module_name]['mount']\n\n # Implement PresetRobotAxis\n\n def load_preset_json(self, json_path=None):\n \"\"\"Load a preset positions tree from the provided JSON file path.\n\n Default path: 'calibrations/{}_preset.json' where {} is replaced with the\n axis name.\n \"\"\"\n trees = super().load_preset_json(json_path)\n if self.configuration is None:\n self.configuration = trees['default configuration']\n self.configurations = trees['configurations']\n self.configuration_tree = trees['configurations'][self.configuration]\n\n def save_preset_json(self, json_path=None):\n \"\"\"Save a preset positions tree to the provided JSON file path.\n\n Default path: 'calibrations/{}_physical.json' where {} is replaced with the\n axis name.\n \"\"\"\n if json_path is None:\n json_path = 'calibrations/{}_preset.json'.format(self.name)\n save_to_json({\n 'physical': self.preset_physical_position_tree,\n 'sensor': self.preset_sensor_position_tree,\n 'default configuration': self.configuration,\n 'configurations': self.configurations\n }, json_path)\n\n def get_preset_position(self, presets_tree, preset_position):\n \"\"\"Get an actual position from a preset position tree node.\"\"\"\n try:\n return super().get_preset_position(presets_tree, preset_position)\n except KeyError:\n module_name = preset_position[0]\n module_type = self.get_module_type(module_name)\n module_params = get_from_tree(presets_tree, module_type)\n return self.get_module_position(presets_tree, module_params, preset_position)\n\n" ]
[ [ "scipy.stats.linregress" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
vvmar/machine-learning-engineering-for-production-public
[ "0350e2e79eebc1dc2edb9e7b5e6f582b40fa74be" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n# rev 1\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jiansowa/Paddle
[ "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13", "0ecf441af14d554c85f69a206e3e3a9bdd86fb13" ]
[ "python/paddle/fluid/trainer_factory.py", "python/paddle/dataset/uci_housing.py", "python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py", "python/paddle/fluid/tests/unittests/mkldnn/test_fusion_gru_int8_mkldnn_op.py", "python/paddle/fluid/dataloader/batch_sampler.py", "python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defination of TrainerFactory.\"\"\"\n\nimport threading\nimport time\nimport logging\nimport numpy as np\nfrom paddle.fluid.log_helper import get_logger\n\nlocal_logger = get_logger(\n __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')\n\nfrom .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer\nfrom .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT\nfrom .framework import Variable\nfrom multiprocessing import Process, Manager\n\n__all__ = [\"TrainerFactory\", \"FetchHandler\", \"FetchHandlerMonitor\"]\n\n\nclass TrainerFactory(object):\n \"\"\"\n Create trainer and device worker.\n If opt_info is not None, it will get configs from opt_info,\n otherwise create MultiTrainer and Hogwild.\n \"\"\"\n\n def __init__(self):\n pass\n\n def _create_trainer(self, opt_info=None):\n trainer = None\n device_worker = None\n if not opt_info:\n # default is MultiTrainer + Hogwild\n trainer = MultiTrainer()\n device_worker = Hogwild()\n trainer._set_device_worker(device_worker)\n else:\n trainer_class = opt_info[\"trainer\"]\n device_worker_class = opt_info[\"device_worker\"]\n trainer = globals()[trainer_class]()\n device_worker = globals()[device_worker_class]()\n\n # for debug tools\n if opt_info is not None:\n if opt_info.get(\"dump_slot\") is not None:\n trainer._set_dump_slot(opt_info[\"dump_slot\"])\n if opt_info.get(\"mpi_rank\") is not None:\n trainer._set_mpi_rank(opt_info[\"mpi_rank\"])\n if opt_info.get(\"mpi_size\") is not None:\n trainer._set_mpi_size(opt_info[\"mpi_size\"])\n if opt_info.get(\"dump_fields\") is not None and len(\n opt_info.get(\"dump_fields\")) != 0:\n trainer._set_dump_fields(opt_info[\"dump_fields\"])\n if opt_info.get(\"dump_fields_path\") is not None and len(\n opt_info.get(\"dump_fields_path\")) != 0:\n trainer._set_dump_fields_path(opt_info[\"dump_fields_path\"])\n if opt_info.get(\"dump_file_num\") is not None:\n trainer._set_dump_file_num(opt_info[\"dump_file_num\"])\n if opt_info.get(\"dump_converter\") is not None:\n trainer._set_dump_converter(opt_info[\"dump_converter\"])\n if opt_info.get(\"dump_param\") is not None and len(\n opt_info.get(\"dump_param\")) != 0:\n trainer._set_dump_param(opt_info[\"dump_param\"])\n if opt_info.get(\"worker_places\") is not None:\n trainer._set_worker_places(opt_info[\"worker_places\"])\n if opt_info.get(\"enable_random_dump\") is not None:\n trainer._set_enable_random_dump(opt_info[\n \"enable_random_dump\"])\n if opt_info.get(\"dump_interval\") is not None:\n trainer._set_dump_interval(opt_info[\"dump_interval\"])\n if opt_info.get(\"random_with_lineid\") is not None:\n trainer._set_random_with_lineid(opt_info[\n \"random_with_lineid\"])\n\n if \"fleet_desc\" in opt_info:\n device_worker._set_fleet_desc(opt_info[\"fleet_desc\"])\n trainer._set_fleet_desc(opt_info[\"fleet_desc\"])\n if opt_info.get(\"use_cvm\") is not None:\n trainer._set_use_cvm(opt_info[\"use_cvm\"])\n if opt_info.get(\"no_cvm\") is not None:\n trainer._set_no_cvm(opt_info[\"no_cvm\"])\n if opt_info.get(\"scale_datanorm\") is not None:\n trainer._set_scale_datanorm(opt_info[\"scale_datanorm\"])\n if opt_info.get(\"adjust_ins_weight\") is not None:\n trainer._set_adjust_ins_weight(opt_info[\n \"adjust_ins_weight\"])\n if opt_info.get(\"copy_table\") is not None:\n trainer._set_copy_table_config(opt_info[\"copy_table\"])\n if opt_info.get(\"check_nan_var_names\") is not None:\n trainer._set_check_nan_var_names(opt_info[\n \"check_nan_var_names\"])\n if opt_info.get(\"loss_names\") is not None:\n trainer._set_loss_names(opt_info[\"loss_names\"])\n trainer._set_device_worker(device_worker)\n return trainer\n\n\nclass FetchHandlerMonitor(object):\n \"\"\"\n Defination of FetchHandlerMonitor class,\n it's for fetch handler.\n \"\"\"\n\n def __init__(self, scope, handler):\n self.fetch_instance = handler\n self.fetch_thread = threading.Thread(\n target=self.handler_launch_func, args=(scope, self.fetch_instance))\n self.running_lock = threading.Lock()\n self.running = False\n\n def handler_launch_func(self, scope, handler):\n fetch_instance = handler\n period_secs = fetch_instance.period_secs\n var_name_to_key = {}\n for key in fetch_instance.var_dict:\n if isinstance(fetch_instance.var_dict[key], Variable):\n var_name_to_key[fetch_instance.var_dict[key].name] = key\n else:\n local_logger.warning(\"the value of {} is not a Variable\".format(\n key))\n var_name_to_key[\"None.var\"] = key\n elapsed_secs = 0\n while True:\n self.running_lock.acquire()\n if self.running == False:\n break\n if elapsed_secs < period_secs:\n # TODO(guru4elephant): needs customized condition\n time.sleep(1)\n elapsed_secs += 1\n else:\n elapsed_secs = 0\n fetch_dict = {}\n for key in var_name_to_key:\n var = scope.find_var(key)\n fetch_dict[key] = var\n if var == None:\n local_logger.warning(\"{} value currently not available\".\n format(var_name_to_key[key]))\n res_dict = {}\n for key in fetch_dict:\n user_name = var_name_to_key[key]\n if fetch_dict[key] == None:\n res_dict[user_name] = None\n continue\n else:\n res_dict[user_name] = fetch_dict[key].get_tensor()\n\n lod = res_dict[user_name].lod()\n if len(lod) > 0:\n raise RuntimeError(\"Some of your fetched tensors \\\n hold LoD information. \\\n They can not be completely cast \\\n to Python ndarray. We can \\\n not return LoDTensor itself directly, \\\n please choose another targets\")\n if res_dict[user_name]._is_initialized():\n res_dict[user_name] = np.array(res_dict[user_name])\n else:\n res_dict[user_name] = None\n fetch_instance.handler(res_dict)\n self.running_lock.release()\n\n def start(self):\n \"\"\"\n start monitor,\n it will start a monitor thread.\n \"\"\"\n self.running_lock.acquire()\n self.running = True\n self.running_lock.release()\n self.fetch_thread.setDaemon(True)\n self.fetch_thread.start()\n\n def stop(self):\n self.running_lock.acquire()\n self.running = False\n self.running_lock.release()\n", "# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUCI Housing dataset.\n\nThis module will download dataset from\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and\nparse training set and test set into paddle reader creators.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nimport tempfile\nimport tarfile\nimport os\nimport paddle.dataset.common\n\n__all__ = ['train', 'test']\n\nURL = 'http://paddlemodels.bj.bcebos.com/uci_housing/housing.data'\nMD5 = 'd4accdce7a25600298819f8e28e8d593'\nfeature_names = [\n 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',\n 'PTRATIO', 'B', 'LSTAT'\n]\n\nUCI_TRAIN_DATA = None\nUCI_TEST_DATA = None\n\nFLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar'\nFLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b'\n\n\ndef feature_range(maximums, minimums):\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n feature_num = len(maximums)\n ax.bar(list(range(feature_num)),\n maximums - minimums,\n color='r',\n align='center')\n ax.set_title('feature scale')\n plt.xticks(list(range(feature_num)), feature_names)\n plt.xlim([-1, feature_num])\n fig.set_figheight(6)\n fig.set_figwidth(10)\n if not os.path.exists('./image'):\n os.makedirs('./image')\n fig.savefig('image/ranges.png', dpi=48)\n plt.close(fig)\n\n\ndef load_data(filename, feature_num=14, ratio=0.8):\n global UCI_TRAIN_DATA, UCI_TEST_DATA\n if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:\n return\n\n data = np.fromfile(filename, sep=' ')\n data = data.reshape(data.shape[0] // feature_num, feature_num)\n maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(\n axis=0) / data.shape[0]\n # if you want to print the distribution of input data, you could use function of feature_range\n #feature_range(maximums[:-1], minimums[:-1])\n for i in six.moves.range(feature_num - 1):\n data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])\n offset = int(data.shape[0] * ratio)\n UCI_TRAIN_DATA = data[:offset]\n UCI_TEST_DATA = data[offset:]\n\n\ndef train():\n \"\"\"\n UCI_HOUSING training set creator.\n\n It returns a reader creator, each sample in the reader is features after\n normalization and price number.\n\n :return: Training reader creator\n :rtype: callable\n \"\"\"\n global UCI_TRAIN_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n\n def reader():\n for d in UCI_TRAIN_DATA:\n yield d[:-1], d[-1:]\n\n return reader\n\n\ndef test():\n \"\"\"\n UCI_HOUSING test set creator.\n\n It returns a reader creator, each sample in the reader is features after\n normalization and price number.\n\n :return: Test reader creator\n :rtype: callable\n \"\"\"\n global UCI_TEST_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n\n def reader():\n for d in UCI_TEST_DATA:\n yield d[:-1], d[-1:]\n\n return reader\n\n\ndef fluid_model():\n parameter_tar = paddle.dataset.common.download(\n FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar')\n\n tar = tarfile.TarFile(parameter_tar, mode='r')\n dirpath = tempfile.mkdtemp()\n tar.extractall(path=dirpath)\n\n return dirpath\n\n\ndef predict_reader():\n \"\"\"\n It returns just one tuple data to do inference.\n\n :return: one tuple data\n :rtype: tuple\n \"\"\"\n global UCI_TEST_DATA\n load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))\n return (UCI_TEST_DATA[0][:-1], )\n\n\ndef fetch():\n paddle.dataset.common.download(URL, 'uci_housing', MD5)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.dygraph.nn import Embedding\nimport paddle.fluid.framework as framework\nfrom paddle.fluid.optimizer import SGDOptimizer\nfrom paddle.fluid.dygraph.base import to_variable\nfrom test_imperative_base import new_program_scope\nimport numpy as np\nimport six\n\n\nclass SimpleNet(fluid.Layer):\n def __init__(self,\n hidden_size,\n vocab_size,\n num_steps=20,\n init_scale=0.1,\n is_sparse=False,\n dtype='float32'):\n super(SimpleNet, self).__init__()\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.init_scale = init_scale\n self.num_steps = num_steps\n self.embedding = Embedding(\n size=[vocab_size, hidden_size],\n dtype=dtype,\n is_sparse=is_sparse,\n param_attr=fluid.ParamAttr(\n name='embedding_para',\n initializer=fluid.initializer.UniformInitializer(\n low=-init_scale, high=init_scale)))\n self.softmax_weight = self.create_parameter(\n attr=fluid.ParamAttr(),\n shape=[self.hidden_size, self.hidden_size],\n dtype=dtype,\n default_initializer=fluid.initializer.UniformInitializer(\n low=-self.init_scale, high=self.init_scale))\n self.softmax_bias = self.create_parameter(\n attr=fluid.ParamAttr(),\n shape=[self.hidden_size],\n dtype=dtype,\n default_initializer=fluid.initializer.UniformInitializer(\n low=-self.init_scale, high=self.init_scale))\n\n def forward(self, input, label):\n x_emb = self.embedding(input)\n fc = fluid.layers.matmul(x_emb, self.softmax_weight)\n fc = fluid.layers.elementwise_add(fc, self.softmax_bias)\n projection = fluid.layers.matmul(\n fc, fluid.layers.transpose(\n self.embedding.weight, perm=[1, 0]))\n projection = fluid.layers.reshape(\n projection, shape=[-1, self.vocab_size])\n loss = fluid.layers.softmax_with_cross_entropy(\n logits=projection, label=label, soft_label=False)\n loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])\n loss = fluid.layers.reduce_mean(loss, dim=[0])\n loss = fluid.layers.reduce_sum(loss)\n\n return loss\n\n\nclass TestDygraphSimpleNet(unittest.TestCase):\n def test_simple_net(self):\n for is_sparse in [True, False]:\n for dtype in [\"float32\", \"float64\"]:\n self.simple_net_float(is_sparse, dtype)\n\n def simple_net_float(self, is_sparse, dtype):\n places = [fluid.CPUPlace()]\n if core.is_compiled_with_cuda():\n places.append(fluid.CUDAPlace(0))\n\n for place in places:\n seed = 90\n hidden_size = 10\n vocab_size = 1000\n num_steps = 3\n init_scale = 0.1\n batch_size = 4\n batch_num = 200\n\n for is_sort_sum_gradient in [True, False]:\n traced_layer = None\n with fluid.dygraph.guard(place):\n paddle.manual_seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n simple_net = SimpleNet(\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n num_steps=num_steps,\n init_scale=init_scale,\n is_sparse=is_sparse,\n dtype=dtype)\n\n sgd = SGDOptimizer(\n learning_rate=1e-3,\n parameter_list=simple_net.parameters())\n dy_param_updated = dict()\n dy_param_init = dict()\n dy_loss = None\n\n fluid.set_flags({\n 'FLAGS_sort_sum_gradient': is_sort_sum_gradient\n })\n\n for i in range(batch_num):\n x_data = np.arange(12).reshape(4, 3).astype('int64')\n y_data = np.arange(1, 13).reshape(4, 3).astype('int64')\n x_data = x_data.reshape((-1, num_steps))\n y_data = y_data.reshape((-1, 1))\n\n x = to_variable(x_data)\n y = to_variable(y_data)\n outs = simple_net(x, y)\n dy_loss = outs\n if i == 0:\n for param in simple_net.parameters():\n dy_param_init[param.name] = param.numpy()\n dy_loss.backward()\n sgd.minimize(dy_loss)\n sgd.clear_gradients()\n if i == batch_num - 1:\n for param in simple_net.parameters():\n dy_param_updated[param.name] = param.numpy()\n dy_loss_value = dy_loss.numpy()\n\n with new_program_scope():\n paddle.manual_seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n simple_net = SimpleNet(\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n num_steps=num_steps,\n is_sparse=is_sparse,\n dtype=dtype)\n\n exe = fluid.Executor(place)\n sgd = SGDOptimizer(learning_rate=1e-3)\n x = fluid.layers.data(\n name=\"x\", shape=[-1, num_steps], dtype='int64')\n y = fluid.layers.data(name=\"y\", shape=[-1, 1], dtype=dtype)\n\n static_loss = simple_net(x, y)\n sgd.minimize(static_loss)\n static_param_updated = dict()\n static_param_init = dict()\n static_param_name_list = list()\n for param in simple_net.parameters():\n static_param_name_list.append(param.name)\n\n out = exe.run(framework.default_startup_program(),\n fetch_list=static_param_name_list)\n for i in range(len(static_param_name_list)):\n static_param_init[static_param_name_list[i]] = out[i]\n static_loss_value = None\n for i in range(batch_num):\n x_data = np.arange(12).reshape(4, 3).astype('int64')\n y_data = np.arange(1, 13).reshape(4, 3).astype('int64')\n x_data = x_data.reshape((-1, num_steps))\n y_data = y_data.reshape((-1, 1))\n fetch_list = [static_loss]\n fetch_list.extend(static_param_name_list)\n out = exe.run(fluid.default_main_program(),\n feed={\"x\": x_data,\n \"y\": y_data},\n fetch_list=fetch_list)\n static_loss_value = out[0]\n\n if i == batch_num - 1:\n for k in range(3, len(out)):\n static_param_updated[static_param_name_list[\n k - 1]] = out[k]\n\n self.assertTrue(\n np.array_equal(static_loss_value, dy_loss_value))\n for key, value in six.iteritems(static_param_init):\n self.assertTrue(np.array_equal(value, dy_param_init[key]))\n for key, value in six.iteritems(static_param_updated):\n self.assertTrue(\n np.array_equal(value, dy_param_updated[key]))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom paddle.fluid.tests.unittests.op_test import OpTest\nfrom paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru\nfrom paddle.fluid.tests.unittests.test_fusion_lstm_op import fc, ACTIVATION\n\n\nclass TestFusionGRUINT8MKLDNNOp(OpTest):\n def set_confs(self):\n pass\n\n def setUp(self):\n self.op_type = \"fusion_gru\"\n self.lod = [[2, 4, 3]]\n self.IC = 3\n self.OC = 5\n self.is_reverse = False\n self.with_h0 = False\n self.with_bias = True\n self.act_state = 'tanh'\n self.act_gate = 'sigmoid'\n self.origin_mode = True\n self.use_mkldnn = True\n self.force_fp32_output = True\n self.error_margin = 1e-5\n self.set_confs()\n\n # RNN dimensions\n T = sum(self.lod[0])\n N = len(self.lod[0])\n\n # Input data\n x_f32 = np.random.rand(T, self.IC).astype('float32') * 2 - 1\n scale_data = 63\n shift_data = 64\n x_u8 = (x_f32 * scale_data + shift_data).astype(np.uint8)\n\n # WeightX/WeightH data\n wx = np.random.rand(self.IC, 3 * self.OC).astype('float32') * 2 - 1\n wh = np.random.rand(self.OC, 3 * self.OC).astype('float32') * 2 - 1\n\n # Calculating weight scales\n # scales = 63 / max(abs(channel_wise(weightsX + weightsH)))\n # WeightX data shape in PP: [IC, 3 * OC]\n # WeightH data shape in PP: [OC, 2 * OC] + [OC, OC]\n # Scales shape in oneDNN: [3, OC]\n scale_ur = 63 / np.max(np.abs(\n np.concatenate(\n [\n wx[:, :2 * self.OC], wh.flatten()[:2 * self.OC * self.OC]\n .reshape(self.OC, 2 * self.OC)\n ],\n axis=0)),\n axis=0)\n scale_o = 63 / np.max(np.abs(\n np.concatenate(\n [\n wx[:, 2 * self.OC:], wh.flatten()[2 * self.OC * self.OC:]\n .reshape(self.OC, self.OC)\n ],\n axis=0)),\n axis=0)\n\n scale_weights = np.concatenate([scale_ur, scale_o]).astype('float')\n\n bias = np.random.rand(\n 1, 3 * self.OC).astype('float32') if self.with_bias else np.zeros(\n (1, 3 * self.OC), dtype='float32')\n h0 = np.random.rand(\n N, self.OC).astype('float32') if self.with_h0 else np.zeros(\n (N, self.OC), dtype='float32')\n\n _, _, _, hidden_f32 = fusion_gru(x_f32, self.lod, h0, wx, wh, bias,\n self.is_reverse, self.origin_mode,\n ACTIVATION[self.act_state],\n ACTIVATION[self.act_gate])\n\n self.inputs = {'X': (x_u8, self.lod), 'WeightX': wx, 'WeightH': wh}\n\n if self.with_bias:\n self.inputs['Bias'] = bias\n\n if self.with_h0:\n self.inputs['H0'] = h0\n\n if self.force_fp32_output:\n self.error_margin = 1e-1\n self.outputs = {'Hidden': (hidden_f32, self.lod)}\n else:\n self.error_margin = 1\n hidden_u8 = (hidden_f32 * scale_data + shift_data).astype(np.uint8)\n self.outputs = {'Hidden': (hidden_u8, self.lod)}\n\n self.attrs = {\n 'activation': self.act_state,\n 'gate_activation': self.act_gate,\n 'is_reverse': self.is_reverse,\n 'origin_mode': self.origin_mode,\n 'use_mkldnn': self.use_mkldnn,\n 'force_fp32_output': self.force_fp32_output,\n 'Scale_data': scale_data,\n 'Shift_data': shift_data,\n 'Scale_weights': scale_weights\n }\n\n def test_check_output(self):\n self.check_output(check_dygraph=False, atol=self.error_margin)\n\n\nclass TestFusionGRUINT8MKLDNNOp2(TestFusionGRUINT8MKLDNNOp):\n def set_confs(self):\n self.force_fp32_output = False\n\n\nclass TestFusionGRUINT8MKLDNNOp3(TestFusionGRUINT8MKLDNNOp):\n def set_confs(self):\n self.origin_mode = False\n\n\nclass TestFusionGRUINT8MKLDNNOp4(TestFusionGRUINT8MKLDNNOp):\n def set_confs(self):\n self.with_bias = False\n\n\nclass TestFusionGRUINT8MKLDNNOp5(TestFusionGRUINT8MKLDNNOp):\n def set_confs(self):\n self.with_h0 = False\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport math\n\nfrom .sampler import Sampler, SequenceSampler, RandomSampler\nfrom .dataset import Dataset, IterableDataset\n\n__all__ = [\"BatchSampler\", \"DistributedBatchSampler\"]\n\n\nclass BatchSampler(Sampler):\n \"\"\"\n A base implement of batch sampler used by `paddle.io.DataLoader`\n which yield mini-batch indices(a list/tuple with length as\n mini-batch size and holds sample indices) iterably.\n\n Batch sampler used by :code:`paddle.io.DataLoader` should be a subclass\n of :code:`paddle.io.BatchSampler`, BatchSampler subclasses should\n implement following methods:\n\n :code:`__iter__`: return mini-batch indices iterably.\n\n :code:`__len__`: get mini-batch number in an epoch.\n\n\n Args:\n dataset(Dataset): this could be a :code:`paddle.io.Dataset` \n implement or other python object which implemented\n :code:`__len__` for BatchSampler to get indices as the\n range of :attr:`dataset` length. Default None.\n sampler (Sampler): this could be a :code:`paddle.io.Dataset`\n instance which implemented :code:`__iter__` to yield\n sample indices. :attr:`sampler` and :attr:`dataset`\n can not be set in the same time. If :attr:`sampler`\n is set, :attr:`shuffle` should not be set. Default None.\n shuffle(bool): whether to shuffle indices order before genrating\n batch indices. Default False.\n batch_size(int): sample indice number in a mini-batch indices.\n drop_last(bool): whether drop the last incomplete batch dataset size\n is not divisible by the batch size. Default False\n\n Returns:\n BatchSampler: an iterable object for indices iterating\n\n Examples:\n \n .. code-block:: python\n \n from paddle.io import RandomSampler, BatchSampler, Dataset\n\n # init with dataset\n class RandomDataset(Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n \n def __getitem__(self, idx):\n image = np.random.random([784]).astype('float32')\n label = np.random.randint(0, 9, (1, )).astype('int64')\n return image, label\n \n def __len__(self):\n return self.num_samples\n \n bs = BatchSampler(dataset=RandomDataset(100),\n shuffle=False,\n batch_size=16,\n drop_last=False)\n\n for batch_indices in bs:\n print(batch_indices)\n\n # init with sampler\n sampler = RandomSampler(RandomDataset(100))\n bs = BatchSampler(sampler=sampler,\n batch_size=8,\n drop_last=True)\n\n for batch_indices in bs:\n print(batch_indices)\n\n\n see `paddle.io.DataLoader`\n\n \"\"\"\n\n def __init__(self,\n dataset=None,\n sampler=None,\n shuffle=False,\n batch_size=1,\n drop_last=False):\n if dataset is None:\n assert sampler is not None, \\\n \"either dataset or sampler should be set\"\n assert isinstance(sampler, Sampler), \\\n \"sampler should be a paddle.io.Sampler, but got {}\".format(type(sampler))\n assert not shuffle, \"shuffle should be False when sampler is set\"\n self.sampler = sampler\n else:\n assert isinstance(dataset, Dataset), \\\n \"dataset should be a paddle.io.Dataset\"\n assert not isinstance(dataset, IterableDataset), \\\n \"dataset should not be a paddle.io.IterableDataset\"\n assert sampler is None, \\\n \"should not set both dataset and sampler\"\n assert isinstance(shuffle, bool), \\\n \"shuffle should be a boolean value, but got {}\".format(type(shuffle))\n if shuffle:\n self.sampler = RandomSampler(dataset)\n else:\n self.sampler = SequenceSampler(dataset)\n\n assert isinstance(batch_size, int) and batch_size > 0, \\\n \"batch_size should be a positive integer, but got {}\".format(batch_size)\n self.batch_size = batch_size\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean value, but got {}\".format(type(drop_last))\n self.drop_last = drop_last\n\n def __iter__(self):\n batch_indices = []\n for idx in self.sampler:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n def __len__(self):\n num_samples = len(self.sampler)\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n\n\nclass _InfiniteIterableSampler(object):\n def __init__(self, dataset, batch_size=1):\n assert isinstance(\n dataset, IterableDataset\n ), \"dataset should be an instance of paddle.io.IterableDataset\"\n self.dataset = dataset\n self.batch_size = batch_size\n\n def __iter__(self):\n while True:\n yield [None] * self.batch_size\n\n\nclass DistributedBatchSampler(BatchSampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n In such case, each process can pass a DistributedBatchSampler instance \n as a DataLoader sampler, and load a subset of the original dataset that \n is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n \n Args:\n dataset(paddle.io.Dataset): this could be a `paddle.io.Dataset` implement\n or other python object which implemented\n `__len__` for BatchSampler to get sample\n number of data source.\n batch_size(int): sample indice number in a mini-batch indices.\n num_replicas(int, optional): porcess number in distributed training.\n If :attr:`num_replicas` is None, :attr:`num_replicas` will be\n retrieved from :code:`paddle.fluid.dygraph.parallel.ParallenEnv`.\n Default None.\n rank(int, optional): the rank of the current process among :attr:`num_replicas`\n processes. If :attr:`rank` is None, :attr:`rank` is retrieved from\n :code:`paddle.fluid.dygraph.parallel.ParallenEnv`. Default None.\n shuffle(bool): whther to shuffle indices order before genrating\n batch indices. Default False.\n drop_last(bool): whether drop the last incomplete batch dataset size\n is not divisible by the batch size. Default False\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n\n from paddle.io import Dataset, DistributedBatchSampler\n\n # init with dataset\n class RandomDataset(Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n \n def __getitem__(self, idx):\n image = np.random.random([784]).astype('float32')\n label = np.random.randint(0, 9, (1, )).astype('int64')\n return image, label\n \n def __len__(self):\n return self.num_samples\n \n dataset = RandomDataset(100)\n sampler = DistributedBatchSampler(dataset, batch_size=64)\n\n for data in sampler:\n # do something\n break\n \"\"\"\n\n def __init__(self,\n dataset,\n batch_size,\n num_replicas=None,\n rank=None,\n shuffle=False,\n drop_last=False):\n self.dataset = dataset\n\n assert isinstance(batch_size, int) and batch_size > 0, \\\n \"batch_size should be a positive integer\"\n self.batch_size = batch_size\n assert isinstance(shuffle, bool), \\\n \"shuffle should be a boolean value\"\n self.shuffle = shuffle\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean number\"\n\n from paddle.fluid.dygraph.parallel import ParallelEnv\n\n if num_replicas is not None:\n assert isinstance(num_replicas, int) and num_replicas > 0, \\\n \"num_replicas should be a positive integer\"\n self.nranks = num_replicas\n else:\n self.nranks = ParallelEnv().nranks\n\n if rank is not None:\n assert isinstance(rank, int) and rank >= 0, \\\n \"rank should be a non-negative integer\"\n self.local_rank = rank\n else:\n self.local_rank = ParallelEnv().local_rank\n\n self.drop_last = drop_last\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.nranks))\n self.total_size = self.num_samples * self.nranks\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n if self.shuffle:\n np.random.RandomState(self.epoch).shuffle(indices)\n self.epoch += 1\n\n # subsample\n def _get_indices_by_batch_size(indices):\n subsampled_indices = []\n last_batch_size = self.total_size % (self.batch_size * self.nranks)\n assert last_batch_size % self.nranks == 0\n last_local_batch_size = last_batch_size // self.nranks\n\n for i in range(self.local_rank * self.batch_size,\n len(indices) - last_batch_size,\n self.batch_size * self.nranks):\n subsampled_indices.extend(indices[i:i + self.batch_size])\n\n indices = indices[len(indices) - last_batch_size:]\n subsampled_indices.extend(indices[\n self.local_rank * last_local_batch_size:(\n self.local_rank + 1) * last_local_batch_size])\n return subsampled_indices\n\n if self.nranks > 1:\n indices = _get_indices_by_batch_size(indices)\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n\n def set_epoch(self, epoch):\n \"\"\"\n Sets the epoch number. When :attr:`shuffle=True`, this number is used\n as seeds of random numbers. By default, users may not set this, all\n replicas (workers) use a different random ordering for each epoch.\n If set same number at each epoch, this sampler will yield the same\n ordering at all epoches.\n\n Arguments:\n epoch (int): Epoch number.\n\n Examples:\n .. code-block:: python\n \n import numpy as np\n \n from paddle.io import Dataset, DistributedBatchSampler\n \n # init with dataset\n class RandomDataset(Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n \n def __getitem__(self, idx):\n image = np.random.random([784]).astype('float32')\n label = np.random.randint(0, 9, (1, )).astype('int64')\n return image, label\n \n def __len__(self):\n return self.num_samples\n \n dataset = RandomDataset(100)\n sampler = DistributedBatchSampler(dataset, batch_size=64)\n \n for epoch in range(10):\n sampler.set_epoch(epoch)\n \"\"\"\n self.epoch = epoch\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\ntest for sync bachnorm op.\nfor both FP64 and FP16 input.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport os\nimport six\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler\nfrom paddle.fluid import Program, program_guard\n\nfrom op_test import OpTest, _set_use_system_allocator\n\n_set_use_system_allocator(True)\n\n\ndef create_or_get_tensor(scope, var_name, var, place):\n \"\"\"Get tensor, if not found, create a new one.\"\"\"\n tensor = scope.var(var_name).get_tensor()\n if var is not None:\n assert isinstance(var, np.ndarray)\n tensor.set_recursive_sequence_lengths([])\n tensor.set(var, place)\n return tensor\n\n\nclass TestSyncBatchNormOpTraining(unittest.TestCase):\n \"\"\"sync_batch_norm op test.\"\"\"\n\n def setUp(self):\n \"\"\"Setup.\"\"\"\n #self.dtype = np.float32\n self.dtype = np.float64\n self.N = 32\n self.C = 16\n self.H = 64\n self.W = 32\n self.dshape = [self.N, self.C, self.H, self.W]\n self.atol = 1e-3\n\n def _build_program(self,\n place,\n layout,\n seed,\n sync_bn=False,\n only_forward=False):\n \"\"\"Build program.\"\"\"\n main = fluid.Program()\n startup = fluid.Program()\n main.random_seed = seed\n startup.random_seed = seed\n use_cudnn = self.dtype == np.float16\n with fluid.unique_name.guard():\n with fluid.program_guard(main, startup):\n data = fluid.layers.data(\n name='input',\n shape=self.dshape,\n dtype=self.dtype,\n append_batch_size=False)\n conv = fluid.layers.conv2d(\n input=data,\n num_filters=32,\n filter_size=1,\n param_attr=fluid.ParamAttr(name='conv2d_weight'),\n bias_attr=False,\n use_cudnn=use_cudnn)\n bn = fluid.layers.batch_norm(\n conv,\n param_attr=fluid.ParamAttr(name='bn_scale'),\n bias_attr=fluid.ParamAttr(name='bn_bias'),\n moving_mean_name='bn_moving_mean',\n moving_variance_name='bn_moving_variance',\n data_layout=layout,\n is_test=only_forward)\n bn = fluid.layers.cast(bn, 'float64')\n sigmoid = fluid.layers.sigmoid(bn)\n out = fluid.layers.reduce_sum(sigmoid)\n if not sync_bn:\n out = out / core.get_cuda_device_count()\n if not only_forward:\n sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)\n sgd_opt.backward(out)\n return main, startup, [out, conv, bn]\n\n def _compare(self, place, layout, only_forward):\n \"\"\"Compare results.\"\"\"\n seed = 10\n os.environ['FLAGS_cudnn_deterministic'] = \"1\"\n scope = core.Scope()\n data = np.random.random(size=self.dshape).astype(self.dtype) * 4. - 2\n data = create_or_get_tensor(scope, \"input\",\n OpTest.np_dtype_to_fluid_dtype(data), place)\n\n # Single-GPU, N = 32 per GPU\n main, startup, outs = self._build_program(place, layout, seed, False,\n only_forward)\n exe = fluid.Executor(place)\n exe.run(startup)\n fetch_names = [v.name for v in outs] + [\n 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias'\n ]\n if not only_forward:\n others = [\n 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD',\n 'bn_bias@GRAD', 'batch_norm_0.tmp_2@GRAD', 'conv2d_0.tmp_0@GRAD'\n ]\n fetch_names += others\n bn_fetches = exe.run(program=main,\n feed={'input': data},\n fetch_list=fetch_names)\n\n #####################################################################\n # Multi-GPUs, self.N / core.get_cuda_device_count() per GPU\n assert core.get_cuda_device_count() > 1\n main, startup, outs = self._build_program(place, layout, seed, True,\n only_forward)\n exe = fluid.Executor(place)\n exe.run(startup)\n fetch_names = [v.name for v in outs] + [\n 'bn_moving_mean', 'bn_moving_variance', 'bn_scale', 'bn_bias'\n ]\n if not only_forward:\n others = [\n 'batch_norm_0.tmp_0', 'batch_norm_0.tmp_1', 'bn_scale@GRAD',\n 'bn_bias@GRAD', 'batch_norm_0.tmp_2@GRAD', 'conv2d_0.tmp_0@GRAD'\n ]\n fetch_names += others\n for nm in fetch_names:\n fv = fluid.framework._get_var(str(nm), program=main)\n fv.persistable = True\n build_strategy = fluid.BuildStrategy()\n build_strategy.sync_batch_norm = True\n build_strategy.enable_inplace = False\n build_strategy.memory_optimize = False\n comp_prog = compiler.CompiledProgram(main).with_data_parallel(\n outs[0].name if not only_forward else None,\n build_strategy=build_strategy)\n sync_bn_fetches = exe.run(program=comp_prog,\n feed={'input': data},\n fetch_list=fetch_names)\n\n for i in six.moves.xrange(1, len(sync_bn_fetches)):\n bn_val = bn_fetches[i]\n sync_bn_val = sync_bn_fetches[i]\n if sync_bn_val.shape != bn_val.shape:\n sync_bn_val = sync_bn_val[:bn_val.shape[0]]\n self.assertTrue(\n np.allclose(\n bn_val, sync_bn_val, atol=self.atol),\n \"Output (\" + fetch_names[i] + \") has diff. \\n\" + \"\\nBN \" +\n str(bn_val) + \"\\n\" + \"Sync BN \" + str(sync_bn_val))\n\n def test_train(self):\n \"\"\"Test training.\"\"\"\n if not core.is_compiled_with_cuda():\n return\n\n places = [core.CUDAPlace(0)]\n for place in places:\n for layout in [\"NCHW\", \"NHWC\"]:\n self._compare(place, layout, False)\n\n def test_infer(self):\n \"\"\"Test inference.\"\"\"\n if not core.is_compiled_with_cuda():\n return\n\n places = [core.CUDAPlace(0)]\n for place in places:\n for layout in [\"NCHW\", \"NHWC\"]:\n self._compare(place, layout, True)\n\n\nclass TestFP16SyncBatchNormOpTraining(TestSyncBatchNormOpTraining):\n \"\"\"sync_batch_norm op test for FP16 input.\"\"\"\n\n def setUp(self):\n \"\"\"Setup.\"\"\"\n self.dtype = np.float16\n self.N = 32\n self.C = 16\n self.H = 64\n self.W = 32\n self.dshape = [self.N, self.C, self.H, self.W]\n self.atol = 1e-2\n\n\nclass TestDygraphSyncBatchNormAPIError(unittest.TestCase):\n def test_errors(self):\n if not core.is_compiled_with_cuda():\n return\n\n with program_guard(Program(), Program()):\n my_sync_batch_norm = paddle.nn.SyncBatchNorm(10)\n x1 = fluid.create_lod_tensor(\n np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0))\n self.assertRaises(TypeError, my_sync_batch_norm, x1)\n\n # the input dtype of SyncBatchNorm must be float16 or float32 or float64\n # float16 only can be set on GPU place\n x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype=\"int32\")\n self.assertRaises(TypeError, my_sync_batch_norm, x2)\n\n\nclass TestConvertSyncBatchNorm(unittest.TestCase):\n def test_convert(self):\n if not core.is_compiled_with_cuda():\n return\n\n with program_guard(Program(), Program()):\n compare_model = paddle.nn.Sequential(\n paddle.nn.Conv2d(3, 5, 3), paddle.nn.BatchNorm2d(5))\n model = paddle.nn.Sequential(\n paddle.nn.Conv2d(3, 5, 3), paddle.nn.BatchNorm2d(5))\n model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n for idx, sublayer in enumerate(compare_model.sublayers()):\n if isinstance(sublayer, paddle.nn.BatchNorm2d):\n self.assertEqual(\n isinstance(model[idx], paddle.nn.SyncBatchNorm), True)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ], [ "numpy.fromfile", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close" ], [ "numpy.arange", "numpy.array_equal" ], [ "numpy.concatenate", "numpy.zeros", "numpy.random.rand" ], [ "numpy.arange", "numpy.random.RandomState" ], [ "numpy.array", "numpy.random.random", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishikksh20/TalkNet2-pytorch
[ "baa6bf90c054634185932ed4b17a6ce8866feaba" ]
[ "model.py" ]
[ "\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom utils import get_mask_from_lengths\nfrom embedding import GaussianEmbedding\nfrom quartznet import QuartzNet5x5, QuartzNet9x5\nfrom module import MaskedInstanceNorm1d, StyleResidual, Postnet\n\n\n\nclass GraphemeDuration(nn.Module):\n\n def __init__(self, idim, embed_dim=64, padding_idx=0):\n super(GraphemeDuration, self).__init__()\n self.embed = nn.Embedding(idim, embedding_dim=embed_dim, padding_idx=padding_idx)\n self.predictor = QuartzNet5x5(embed_dim, 32)\n self.projection = nn.Conv1d(32, 1, kernel_size=1)\n\n def forward(self, text, text_len, is_mask=True):\n x, x_len = self.embed(text).transpose(1, 2), text_len\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n out = self.predictor(x, mask)\n out = self.projection(out).squeeze(1)\n\n return out\n\n @staticmethod\n def _metrics(true_durs, true_text_len, pred_durs):\n loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none')\n mask = get_mask_from_lengths(true_text_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n\n durs_pred = pred_durs.exp() - 1\n durs_pred[durs_pred < 0.0] = 0.0\n durs_pred = durs_pred.round().long()\n\n acc = ((true_durs == durs_pred) * mask).sum().float() / mask.sum() * 100\n acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * mask).sum().float() / mask.sum() * 100\n acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * mask).sum().float() / mask.sum() * 100\n\n return loss, acc, acc_dist_1, acc_dist_3\n\n\nclass PitchPredictor(nn.Module):\n\n def __init__(self, idim, embed_dim=64):\n super(PitchPredictor, self).__init__()\n self.embed = GaussianEmbedding(idim, embed_dim)\n self.predictor = QuartzNet5x5(embed_dim, 32)\n self.sil_proj = nn.Conv1d(32, 1, kernel_size=1)\n self.body_proj = nn.Conv1d(32, 1, kernel_size=1)\n\n def forward(self, text, durs, is_mask=True):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n out = self.predictor(x, mask)\n uv = self.sil_proj(out).squeeze(1)\n value = self.body_proj(out).squeeze(1)\n\n return uv, value\n\n def _metrics(self, true_f0, true_f0_mask, pred_f0_sil, pred_f0_body):\n sil_mask = true_f0 < 1e-5\n sil_gt = sil_mask.long()\n sil_loss = F.binary_cross_entropy_with_logits(input=pred_f0_sil, target=sil_gt.float(), reduction='none', )\n sil_loss *= true_f0_mask.type_as(sil_loss)\n sil_loss = sil_loss.sum() / true_f0_mask.sum()\n sil_acc = ((torch.sigmoid(pred_f0_sil) > 0.5).long() == sil_gt).float() # noqa\n sil_acc *= true_f0_mask.type_as(sil_acc)\n sil_acc = sil_acc.sum() / true_f0_mask.sum()\n\n body_mse = F.mse_loss(pred_f0_body, (true_f0 - self.f0_mean) / self.f0_std, reduction='none')\n body_mask = ~sil_mask\n body_mse *= body_mask.type_as(body_mse) # noqa\n body_mse = body_mse.sum() / body_mask.sum() # noqa\n body_mae = ((pred_f0_body * self.f0_std + self.f0_mean) - true_f0).abs()\n body_mae *= body_mask.type_as(body_mae) # noqa\n body_mae = body_mae.sum() / body_mask.sum() # noqa\n\n loss = sil_loss + body_mse\n\n return loss, sil_acc, body_mae\n\n\nclass TalkNet2(nn.Module):\n\n def __init__(self, idim, odim=80, embed_dim=256, postnet_layers = 0):\n super(TalkNet2, self).__init__()\n self.embed = GaussianEmbedding(idim, embed_dim)\n self.norm_f0 = MaskedInstanceNorm1d(1)\n self.res_f0 = StyleResidual(embed_dim, 1, kernel_size=3)\n\n self.generator = QuartzNet9x5(embed_dim, odim)\n\n # define postnet\n self.postnet = (\n None\n if postnet_layers == 0\n else Postnet(\n odim=odim,\n n_layers=postnet_layers,\n n_chans=256,\n n_filts=5,\n use_batch_norm=True,\n dropout_rate=0.5,\n )\n )\n\n\n def forward(self, text, durs, f0, is_mask=True):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n f0, f0_mask = f0.clone(), f0 > 0.0\n f0 = self.norm_f0(f0.unsqueeze(1), f0_mask)\n f0[~f0_mask.unsqueeze(1)] = 0.0\n x = self.res_f0(x, f0)\n if is_mask:\n mask = get_mask_from_lengths(x_len)\n else:\n mask = None\n\n before_outs = self.generator(x, mask)\n if self.postnet is None:\n return before_outs, None\n else:\n after_outs = before_outs + self.postnet(\n before_outs\n )\n return before_outs, after_outs\n\n\n\n @staticmethod\n def _metrics(true_mel, true_mel_len, pred_mel):\n loss = F.mse_loss(pred_mel, true_mel, reduction='none').mean(dim=-2)\n mask = get_mask_from_lengths(true_mel_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n return loss\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.nn.Embedding", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kmaasrud/vmc-fys4411
[ "e96e2f6b1403118ee48ad5b5ff38582310ba4d2a" ]
[ "vmc/python/SGD_alphas.py" ]
[ "import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\n\nimport numpy as np\n\n\n#location for files and plots\n\nPLOT_DIR = \"../plots/\"\nDATA_DIR = \"../data\"\nFILENAME_PLOT = 'SGD_alphas'\nPLOT_DIR = \"./\"\n\n\n#figure size and resolution\nfig = plt.figure()\nplt.style.use(\"seaborn\")\n#colour, linewith, linestyle\n#boundaries\n#plt.xlim(min(x)*1.1, max(x)*1.1)\nplt.ylim(0.15, 0.95)\n#legend\nplt.legend(loc = 'best', prop = {'size':14}, frameon = False)\nplt.rc('font', size=10)\nplt.rc('axes', titlesize=12)\nplt.xlabel(\"Iterations\")\nplt.ylabel(r\"$\\alpha$\")\nplt.title(r\"SGD: Start $\\alpha$\")\n\n\n\n\nstart_alphas = [\"0.2\",\"0.3\", \"0.4\", \"0.5\", \"0.6\", \"0.7\", \"0.8\", \"0.9\"]\n\nfor start_alpha in start_alphas:\n dim = 3\n\n DATA_DIR = f\"../data/sgd_noninteracting/start-alpha/start-alpha_{start_alpha}.csv\"\n\n df = pd.read_csv(DATA_DIR)\n\n energy = df[\"Energy\"]\n \n alpha = df[\"Alpha\"]\n x = np.linspace(0, len(alpha), len(alpha))\n plt.plot(x, alpha, label = start_alpha, linewidth = 2)\n\nplt.legend()\nplt.draw()\nplt.show()\n#plt.save_fig(PLOT_DIR + \"SGD_start-alpha.png\")\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.rc", "matplotlib.pyplot.draw", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
idies/RMHDConverter
[ "24ff6e5ae2767b3aac4d24e9d6f1a116ef002eba" ]
[ "py/plot_slices.py" ]
[ "########################################################################\n#\n# Copyright 2015 Johns Hopkins University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Contact: [email protected]\n# Website: http://turbulence.pha.jhu.edu/\n#\n########################################################################\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport subprocess\n\ndef slice_plotter(n0 = 4, n1 = 8):\n ax = plt.figure(figsize = (8, 8)).add_axes([.0, .0, 1., 1.])\n ax.set_axis_off()\n for i in range(n0, n1, 4):\n print('plotting frame ', i)\n data = np.load(\n '/export/scratch0/clalescu/RMHD/2D_slices/data_rs2_u_t{0:0>4x}.npy'.format(i))\n for j in range(3):\n for k in range(2):\n ax.cla()\n ax.set_axis_off()\n ax.imshow(data[j, :, :, k])\n plt.gcf().savefig(\n 'figs/u{0}_{1}_t{2:0>4}.png'.format(j, k, i),\n dpi = max(data[j, :, :, k].shape)//8,\n format = 'png')\n return None\n\n\ngenerate_png = True\ngenerate_gif = False\n\nif generate_png:\n slice_plotter(n0 = 128, n1 = 0x280)\n\nif generate_gif:\n for j in range(3):\n for k in range(2):\n subprocess.call(['convert',\n 'figs/u{0}_{1}_t*.png'.format(j, k),\n 'figs/u{0}_{1}.gif'.format(j, k)])\n\n\n\n" ]
[ [ "matplotlib.pyplot.gcf", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jessica-dl/2XB3-ML-Training
[ "aa82d64c7b8b35eb79060a7bd7d22d09323b1c06" ]
[ "trainer/dataset.py" ]
[ "from tensorflow.python.lib.io import file_io\nimport h5py\nimport numpy as np\n\n\nclass Dataset:\n\n def __init__(self, path, local):\n \"\"\"\n Initialize the dataset\n :param path: Path to the hdf5 dataset file\n :param local: True if the path is to a local file, False otherwise\n \"\"\"\n self.path = path\n self.local = local\n\n if not local:\n with file_io.FileIO(path, mode='rb') as dataset_f:\n with open('dataset.h5', 'wb') as local_dataset:\n local_dataset.write(dataset_f.read())\n path = 'dataset.h5'\n\n hf = h5py.File(path, 'r')\n self.x = hf.get('x')[:]\n self.y = hf.get('y')[:]\n hf.close()\n\n self.x = (self.x.astype(np.float32) - 127.5) / 127.5\n\n # self.__make_overfit()\n\n print('Loaded dataset')\n print('X:', self.x.shape)\n print('Y:', self.y.shape)\n\n def __make_overfit(self):\n \"\"\"\n Modify dataset for overfitting by only including 3 samples from each class\n :return:\n \"\"\"\n minimal_x = self.x[:1]\n minimal_y = self.y[:1]\n\n per_class = 3\n\n i = 1\n found = np.array([0 for _ in range(self.y.shape[-1])])\n found[np.argmax(minimal_y[0])] += 1\n\n while sum(found) < self.y.shape[-1] * per_class:\n for c in range(self.y.shape[-1]):\n if found[np.argmax(self.y[i])] < per_class:\n minimal_x = np.concatenate([minimal_x, self.x[i:i+1]])\n minimal_y = np.concatenate([minimal_y, self.y[i:i+1]])\n found[np.argmax(self.y[i])] += 1\n i += 1\n\n self.x = minimal_x\n self.y = minimal_y\n\n\n\n\n\n" ]
[ [ "numpy.concatenate", "tensorflow.python.lib.io.file_io.FileIO", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
SigfriedHache/euler-project
[ "7c38deee65a793a441830a6d0916da61e86b8cf7" ]
[ "Solutions/Q0005_Smallest_Evenly_Divisible_Number.py" ]
[ "\"\"\"\nhttps://projecteuler.net/problem=5\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\"\"\"\nfrom numpy import prod\n\nfrom Common.Logger import get_logger, init_logger\nfrom Common.Numbers import prime_factorization\nfrom Common.Utilities import performance_run\n\nPERFORMANCE_RUNS = 100_000\nUPPER_BOUND = 20\n\n\ndef fastest(ceiling: int = UPPER_BOUND) -> int:\n \"\"\"\n This algorithm finds the prime factorization of every number between 1 and ceiling, maintains the greatest counts of\n each prime factor found, and then multiplies them out at the end\n :param ceiling: The upper bound (inclusive) of the factor for which to find the evenly-divided quotient\n :return: The quotient evenly-divisible by every number from 1 to ceiling\n \"\"\"\n return prime_tally(ceiling)\n\n\ndef prime_tally(ceiling: int = UPPER_BOUND) -> int:\n \"\"\"\n This algorithm finds the prime factorization of every number between 1 and ceiling, maintains the greatest counts of\n each prime factor found, and then multiplies them out at the end\n --> benchmark: 104 ms/run\n :param ceiling: The upper bound (inclusive) of the factor for which to find the evenly-divided quotient\n :return: The quotient evenly-divisible by every number from 1 to ceiling\n \"\"\"\n factorization_merge = []\n\n for number in range(1, ceiling+1):\n number_factorization = prime_factorization(number)\n for factor in set(number_factorization):\n factor_count_difference = number_factorization.count(factor) - factorization_merge.count(factor)\n if factor_count_difference > 0:\n factorization_merge += [factor] * factor_count_difference\n\n return prod(factorization_merge)\n\n\nif __name__ == \"__main__\":\n # Log stuff\n init_logger()\n logger = get_logger()\n\n # Performance run\n # performance_run(prime_tally, iterations=PERFORMANCE_RUNS)()\n print(fastest(UPPER_BOUND))\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
garrettmflynn/sortingview
[ "0bb3df40d5d031ec651c4821f928787bbee71fbb" ]
[ "sortingview/helpers/prepare_snippets_h5_old.py" ]
[ "from typing import Dict, Union\n\nimport os\nimport hither2 as hi\nimport kachery_client as kc\nimport numpy as np\nimport spikeextractors as se\nfrom sortingview.extractors import LabboxEphysSortingExtractor, LabboxEphysRecordingExtractor\nfrom .SubsampledSortingExtractor import SubsampledSortingExtractor\nfrom .find_unit_peak_channels import find_unit_peak_channels\nfrom .find_unit_neighborhoods import find_unit_neighborhoods\nfrom .get_unit_waveforms import get_unit_waveforms\n\[email protected](\n 'prepare_snippets_h5', '0.2.7',\n image=hi.RemoteDockerImage('docker://magland/labbox-ephys-processing:0.3.19'),\n modules=['sortingview']\n)\ndef prepare_snippets_h5(\n recording_object,\n sorting_object,\n start_frame=None,\n end_frame=None,\n max_events_per_unit=None,\n max_neighborhood_size=15,\n snippet_len=(50, 80)\n):\n if recording_object['recording_format'] == 'snippets1':\n return recording_object['data']['snippets_h5_uri']\n\n recording = LabboxEphysRecordingExtractor(recording_object)\n sorting = LabboxEphysSortingExtractor(sorting_object)\n\n with kc.TemporaryDirectory() as tmpdir:\n save_path = tmpdir + '/snippets.h5'\n prepare_snippets_h5_from_extractors(\n recording=recording,\n sorting=sorting,\n output_h5_path=save_path,\n start_frame=start_frame,\n end_frame=end_frame,\n max_events_per_unit=max_events_per_unit,\n max_neighborhood_size=max_neighborhood_size,\n snippet_len=snippet_len\n )\n return kc.store_file(save_path)\n\ndef prepare_snippets_h5_from_extractors(\n recording: se.RecordingExtractor,\n sorting: se.SortingExtractor,\n output_h5_path: str,\n start_frame,\n end_frame,\n max_neighborhood_size: int,\n max_events_per_unit: Union[None, int]=None,\n snippet_len=(50, 80)\n):\n import h5py\n if start_frame is not None:\n recording = se.SubRecordingExtractor(parent_recording=recording, start_frame=start_frame, end_frame=end_frame)\n sorting = se.SubSortingExtractor(parent_sorting=sorting, start_frame=start_frame, end_frame=end_frame)\n\n unit_ids = sorting.get_unit_ids()\n samplerate = recording.get_sampling_frequency()\n \n # Use this optimized function rather than spiketoolkit's version\n # for efficiency with long recordings and/or many channels, units or spikes\n # we should submit this to the spiketoolkit project as a PR\n print('Subsampling sorting')\n if max_events_per_unit is not None:\n sorting_subsampled = SubsampledSortingExtractor(parent_sorting=sorting, max_events_per_unit=max_events_per_unit, method='random')\n else:\n sorting_subsampled = sorting\n print('Finding unit peak channels')\n peak_channels_by_unit = find_unit_peak_channels(recording=recording, sorting=sorting, unit_ids=unit_ids)\n print('Finding unit neighborhoods')\n channel_ids_by_unit = find_unit_neighborhoods(recording=recording, peak_channels_by_unit=peak_channels_by_unit, max_neighborhood_size=max_neighborhood_size)\n print(f'Getting unit waveforms for {len(unit_ids)} units')\n unit_waveforms = get_unit_waveforms(\n recording=recording,\n sorting=sorting_subsampled,\n unit_ids=unit_ids,\n channel_ids_by_unit=channel_ids_by_unit,\n snippet_len=snippet_len\n )\n # unit_waveforms = st.postprocessing.get_unit_waveforms(\n # recording=recording,\n # sorting=sorting,\n # unit_ids=unit_ids,\n # ms_before=1,\n # ms_after=1.5,\n # max_spikes_per_unit=500\n # )\n\n save_path = output_h5_path\n with h5py.File(save_path, 'w') as f:\n f.create_dataset('unit_ids', data=np.array(unit_ids).astype(np.int32))\n f.create_dataset('sampling_frequency', data=np.array([samplerate]).astype(np.float64))\n f.create_dataset('channel_ids', data=np.array(recording.get_channel_ids()))\n f.create_dataset('num_frames', data=np.array([recording.get_num_frames()]).astype(np.int32))\n channel_locations = recording.get_channel_locations()\n f.create_dataset(f'channel_locations', data=np.array(channel_locations))\n for ii, unit_id in enumerate(unit_ids):\n x = sorting.get_unit_spike_train(unit_id=unit_id)\n f.create_dataset(f'unit_spike_trains/{unit_id}', data=np.array(x).astype(np.float64))\n f.create_dataset(f'unit_waveforms/{unit_id}/waveforms', data=unit_waveforms[ii].astype(np.float32))\n f.create_dataset(f'unit_waveforms/{unit_id}/channel_ids', data=np.array(channel_ids_by_unit[int(unit_id)]).astype(int))\n f.create_dataset(f'unit_waveforms/{unit_id}/spike_train', data=np.array(sorting_subsampled.get_unit_spike_train(unit_id=unit_id)).astype(np.float64))\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YimiaoSun/network-slimming
[ "8ab3e6932fc3febd893faf83e23bee1aeb28be13" ]
[ "models/preresnet.py" ]
[ "# 本resnet结构选自:https://arxiv.org/pdf/1603.05027.pdf\n# 是原作者在resnet上的更新版本(实际用的没有原始版本广,认可度有质疑)\n\nfrom __future__ import absolute_import\nimport math\nimport torch.nn as nn\nfrom .channel_selection import channel_selection\n\n\n__all__ = ['resnet']\n\n\"\"\"\npreactivation resnet with bottleneck design.\n\"\"\"\n\nclass Bottleneck(nn.Module):\n expansion = 4\n # self.inplanes = 16, 64, 128 (planes * 4)\n # planes = 16, 32, 64\n # stride=1, 2, 2\n def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.select = channel_selection(inplanes)\n self.conv1 = nn.Conv2d(cfg[0], cfg[1], kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(cfg[1])\n self.conv2 = nn.Conv2d(cfg[1], cfg[2], kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(cfg[2])\n # 从conv3看出,planes*4应该=inplances\n self.conv3 = nn.Conv2d(cfg[2], planes * 4, kernel_size=1, bias=False)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.bn1(x)\n out = self.select(out)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n return out\n\nclass resnet(nn.Module):\n def __init__(self, depth=164, dataset='cifar10', cfg=None):\n # depth=164 is too big to me. Let me change it to 20.\n super(resnet, self).__init__()\n assert (depth - 2) % 9 == 0, 'depth should be 9n+2'\n\n n = (depth - 2) // 9 # n = blocks = 2\n block = Bottleneck\n\n # cfg目的是针对prune后生成相应channel的新的network(cfg中的个数来源于bn.weight.data.gt(thre),定义于resprune.py)\n if cfg is None:\n # Construct config variable.\n cfg = [[16, 16, 16], [64, 16, 16]*(n-1), [64, 32, 32], [128, 32, 32]*(n-1), [128, 64, 64], [256, 64, 64]*(n-1), [256]]\n # 此行:拆掉最内层维度,使cfg变成一维list\n cfg = [item for sub_list in cfg for item in sub_list]\n\n self.inplanes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,\n bias=False)\n self.layer1 = self._make_layer(block, 16, n, cfg=cfg[0:3*n])\n self.layer2 = self._make_layer(block, 32, n, cfg=cfg[3*n:6*n], stride=2)\n self.layer3 = self._make_layer(block, 64, n, cfg=cfg[6*n:9*n], stride=2)\n self.bn = nn.BatchNorm2d(64 * block.expansion)\n self.select = channel_selection(64 * block.expansion) # select必须接在batchnorm后面\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(8)\n\n if dataset == 'cifar10':\n self.fc = nn.Linear(cfg[-1], 10)\n elif dataset == 'cifar100':\n self.fc = nn.Linear(cfg[-1], 100)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(0.5)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, cfg, stride=1):\n \"\"\"\n 做resnet的基本组件(层,layer)\n :param block: 原始resnet有两种。Basic和Bottleneck。区别在于Basic适用于短的resnet(34层以下);bottleneck是50层以上的。\n Basic是2个3x3 conv,Bottleneck是1x1+3x3+1x1。不过它们彼此channel是兼容的,所以其实可以混用。\n 这里为了简化,全部采用Bottleneck。\n :param planes: 根据3层(layer),具体分别为: 16, 32, 64.\n :param blocks: 每层中多少个block,我用的是depth 20,所以blocks个数是2((depth - 2) // 9 = 2)\n :param cfg: 每次batchnorm需要保留的channel个数\n :param stride: layer1=1,layer2/3=2\n :return:\n \"\"\"\n downsample = None\n # self.inplanes = 16, 64, 128 (planes * 4)\n # planes = 16, 32, 64\n # block.expansion = 4\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, cfg[0:3], stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, cfg[3*i: 3*(i+1)]))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.layer1(x) # 32x32\n x = self.layer2(x) # 16x16\n x = self.layer3(x) # 8x8\n x = self.bn(x)\n x = self.select(x)\n x = self.relu(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amroid/ibis
[ "9df27958f305a901728b540200bd8fa2820d4625" ]
[ "ibis/expr/tests/test_value_exprs.py" ]
[ "import functools\nimport operator\nimport os\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time\nfrom operator import methodcaller\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport toolz\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.analysis as L\nimport ibis.expr.api as api\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.rules as rlz\nimport ibis.expr.types as ir\nfrom ibis import literal\nfrom ibis.common.exceptions import IbisTypeError\nfrom ibis.expr.signature import Argument as Arg\nfrom ibis.tests.util import assert_equal\n\n\ndef test_null():\n expr = ibis.literal(None)\n assert isinstance(expr, ir.NullScalar)\n assert isinstance(expr.op(), ops.NullLiteral)\n assert expr._arg.value is None\n\n expr2 = ibis.null()\n assert_equal(expr, expr2)\n\n assert expr is expr2\n assert expr.type() is dt.null\n assert expr2.type() is dt.null\n\n\[email protected](\n raises=AssertionError,\n reason='UTF-8 support in Impala non-existent at the moment?',\n)\ndef test_unicode():\n assert False\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (5, 'int8'),\n (127, 'int8'),\n (128, 'int16'),\n (32767, 'int16'),\n (32768, 'int32'),\n (2147483647, 'int32'),\n (2147483648, 'int64'),\n (-5, 'int8'),\n (-128, 'int8'),\n (-129, 'int16'),\n (-32769, 'int32'),\n (-2147483649, 'int64'),\n (1.5, 'double'),\n ('foo', 'string'),\n ([1, 2, 3], 'array<int8>'),\n ],\n)\ndef test_literal_with_implicit_type(value, expected_type):\n expr = ibis.literal(value)\n\n assert isinstance(expr, ir.ScalarExpr)\n assert expr.type() == dt.dtype(expected_type)\n\n assert isinstance(expr.op(), ops.Literal)\n assert expr.op().value is value\n\n\npointA = (1, 2)\npointB = (-3, 4)\npointC = (5, 19)\nlineAB = [pointA, pointB]\nlineBC = [pointB, pointC]\nlineCA = [pointC, pointA]\npolygon1 = [lineAB, lineBC, lineCA]\npolygon2 = [lineAB, lineBC, lineCA]\nmultipolygon1 = [polygon1, polygon2]\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (5, 'int16'),\n (127, 'double'),\n (128, 'int64'),\n (32767, 'double'),\n (32768, 'float'),\n (2147483647, 'int64'),\n (-5, 'int16'),\n (-128, 'int32'),\n (-129, 'int64'),\n (-32769, 'float'),\n (-2147483649, 'double'),\n (1.5, 'double'),\n ('foo', 'string'),\n (list(pointA), 'point'),\n (tuple(pointA), 'point'),\n (list(lineAB), 'linestring'),\n (tuple(lineAB), 'linestring'),\n (list(polygon1), 'polygon'),\n (tuple(polygon1), 'polygon'),\n (list(multipolygon1), 'multipolygon'),\n (tuple(multipolygon1), 'multipolygon'),\n ],\n)\ndef test_literal_with_explicit_type(value, expected_type):\n expr = ibis.literal(value, type=expected_type)\n assert expr.type().equals(dt.validate_type(expected_type))\n\n\[email protected](\n ['value', 'expected_type', 'expected_class'],\n [\n (list('abc'), 'array<string>', ir.ArrayScalar),\n ([1, 2, 3], 'array<int8>', ir.ArrayScalar),\n ({'a': 1, 'b': 2, 'c': 3}, 'map<string, int8>', ir.MapScalar),\n ({1: 2, 3: 4, 5: 6}, 'map<int8, int8>', ir.MapScalar),\n (\n {'a': [1.0, 2.0], 'b': [], 'c': [3.0]},\n 'map<string, array<double>>',\n ir.MapScalar,\n ),\n (\n OrderedDict(\n [\n ('a', 1),\n ('b', list('abc')),\n ('c', OrderedDict([('foo', [1.0, 2.0])])),\n ]\n ),\n 'struct<a: int8, b: array<string>, c: struct<foo: array<double>>>',\n ir.StructScalar,\n ),\n ],\n)\ndef test_literal_complex_types(value, expected_type, expected_class):\n expr = ibis.literal(value)\n expr_type = expr.type()\n assert expr_type.equals(dt.validate_type(expected_type))\n assert isinstance(expr, expected_class)\n assert isinstance(expr.op(), ops.Literal)\n assert expr.op().value is value\n\n\ndef test_struct_operations():\n value = OrderedDict(\n [\n ('a', 1),\n ('b', list('abc')),\n ('c', OrderedDict([('foo', [1.0, 2.0])])),\n ]\n )\n expr = ibis.literal(value)\n assert isinstance(expr, ir.StructValue)\n assert isinstance(expr.b, ir.ArrayValue)\n assert isinstance(expr.a.op(), ops.StructField)\n\n\ndef test_simple_map_operations():\n value = {'a': [1.0, 2.0], 'b': [], 'c': [3.0]}\n value2 = {'a': [1.0, 2.0], 'c': [3.0], 'd': [4.0, 5.0]}\n expr = ibis.literal(value)\n expr2 = ibis.literal(value2)\n assert isinstance(expr, ir.MapValue)\n assert isinstance(expr.length().op(), ops.MapLength)\n assert isinstance((expr + expr2).op(), ops.MapConcat)\n assert isinstance((expr2 + expr).op(), ops.MapConcat)\n\n default = ibis.literal([0.0])\n assert isinstance(expr.get('d', default).op(), ops.MapValueOrDefaultForKey)\n\n # test for an invalid default type, nulls are ok\n with pytest.raises(IbisTypeError):\n expr.get('d', ibis.literal('foo'))\n\n assert isinstance(\n expr.get('d', ibis.literal(None)).op(), ops.MapValueOrDefaultForKey\n )\n\n assert isinstance(expr['b'].op(), ops.MapValueForKey)\n assert isinstance(expr.keys().op(), ops.MapKeys)\n assert isinstance(expr.values().op(), ops.MapValues)\n\n\[email protected](\n ['value', 'expected_type'],\n [\n (32767, 'int8'),\n (32768, 'int16'),\n (2147483647, 'int16'),\n (2147483648, 'int32'),\n ('foo', 'double'),\n ],\n)\ndef test_literal_with_non_coercible_type(value, expected_type):\n expected_msg = 'Value .* cannot be safely coerced to .*'\n with pytest.raises(TypeError, match=expected_msg):\n ibis.literal(value, type=expected_type)\n\n\ndef test_non_inferrable_literal():\n expected_msg = (\n 'The datatype of value .* cannot be inferred, try '\n 'passing it explicitly with the `type` keyword.'\n )\n\n value = tuple(pointA)\n\n with pytest.raises(TypeError, match=expected_msg):\n ibis.literal(value)\n\n point = ibis.literal(value, type='point')\n assert point.type() == dt.point\n\n\ndef test_literal_list():\n what = [1, 2, 1000]\n expr = api.literal(what)\n\n assert isinstance(expr, ir.ArrayScalar)\n\n # it works!\n repr(expr)\n\n\ndef test_literal_array():\n what = []\n expr = api.literal(what)\n assert isinstance(expr, ir.ArrayValue)\n assert expr.type().equals(dt.Array(dt.null))\n\n\ndef test_mixed_arity(table):\n what = [\"bar\", table.g, \"foo\"]\n expr = api.as_value_expr(what)\n\n values = expr.op().values\n assert isinstance(values[1], ir.StringColumn)\n\n # it works!\n repr(expr)\n\n\[email protected]('container', [list, tuple, set, frozenset])\ndef test_isin_notin_list(table, container):\n values = container([1, 2, 3, 4])\n\n expr = table.a.isin(values)\n not_expr = table.a.notin(values)\n\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.Contains)\n\n assert isinstance(not_expr, ir.BooleanColumn)\n assert isinstance(not_expr.op(), ops.NotContains)\n\n\ndef test_value_counts(table, string_col):\n bool_clause = table[string_col].notin(['1', '4', '7'])\n expr = table[bool_clause][string_col].value_counts()\n assert isinstance(expr, ir.TableExpr)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_not_comparable():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_array_expr():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_invalid_cases():\n # For example, array expression in a list of values, where the inner\n # array values originate from some other table\n assert False\n\n\ndef test_isin_notin_scalars():\n a, b, c = [ibis.literal(x) for x in [1, 1, 2]]\n\n result = a.isin([1, 2])\n assert isinstance(result, ir.BooleanScalar)\n\n result = a.notin([b, c, 3])\n assert isinstance(result, ir.BooleanScalar)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isin_null():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_negate_isin():\n # Should yield a NotContains\n assert False\n\n\ndef test_scalar_isin_list_with_array(table):\n val = ibis.literal(2)\n\n options = [table.a, table.b, table.c]\n\n expr = val.isin(options)\n assert isinstance(expr, ir.BooleanColumn)\n\n not_expr = val.notin(options)\n assert isinstance(not_expr, ir.BooleanColumn)\n\n\ndef test_distinct_basic(functional_alltypes):\n expr = functional_alltypes.distinct()\n assert isinstance(expr.op(), ops.Distinct)\n assert isinstance(expr, ir.TableExpr)\n assert expr.op().table is functional_alltypes\n\n expr = functional_alltypes.string_col.distinct()\n assert isinstance(expr.op(), ops.DistinctColumn)\n\n assert isinstance(expr, ir.StringColumn)\n\n\[email protected](reason='NYT')\ndef test_distinct_array_interactions(functional_alltypes):\n # array cardinalities / shapes are likely to be different.\n a = functional_alltypes.int_col.distinct()\n b = functional_alltypes.bigint_col\n\n with pytest.raises(ir.RelationError):\n a + b\n\n\[email protected]('where', [lambda t: None, lambda t: t.int_col != 0])\ndef test_distinct_count(functional_alltypes, where):\n result = functional_alltypes.string_col.distinct().count(\n where=where(functional_alltypes)\n )\n assert isinstance(result.op(), ops.CountDistinct)\n\n expected = functional_alltypes.string_col.nunique(\n where=where(functional_alltypes)\n ).name('count')\n assert result.equals(expected)\n\n\ndef test_distinct_unnamed_array_expr():\n table = ibis.table(\n [('year', 'int32'), ('month', 'int32'), ('day', 'int32')], 'foo'\n )\n\n # it works!\n expr = (\n ibis.literal('-')\n .join(\n [\n table.year.cast('string'),\n table.month.cast('string'),\n table.day.cast('string'),\n ]\n )\n .distinct()\n )\n repr(expr)\n\n\ndef test_distinct_count_numeric_types(functional_alltypes):\n metric = (\n functional_alltypes.bigint_col.distinct()\n .count()\n .name('unique_bigints')\n )\n functional_alltypes.group_by('string_col').aggregate(metric)\n\n\ndef test_nunique(functional_alltypes):\n expr = functional_alltypes.string_col.nunique()\n assert isinstance(expr.op(), ops.CountDistinct)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_project_with_distinct():\n assert False\n\n\ndef test_isnull(table):\n expr = table['g'].isnull()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsNull)\n\n expr = ibis.literal('foo').isnull()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsNull)\n\n\ndef test_notnull(table):\n expr = table['g'].notnull()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.NotNull)\n\n expr = ibis.literal('foo').notnull()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.NotNull)\n\n\[email protected]('column', ['e', 'f'], ids=['float', 'double'])\ndef test_isnan_isinf_column(table, column):\n expr = table[column].isnan()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsNan)\n\n expr = table[column].isinf()\n assert isinstance(expr, ir.BooleanColumn)\n assert isinstance(expr.op(), ops.IsInf)\n\n\[email protected]('value', [1.3, np.nan, np.inf, -np.inf])\ndef test_isnan_isinf_scalar(value):\n expr = ibis.literal(value).isnan()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsNan)\n\n expr = ibis.literal(value).isinf()\n assert isinstance(expr, ir.BooleanScalar)\n assert isinstance(expr.op(), ops.IsInf)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_null_literal():\n assert False\n\n\[email protected](\n ['column', 'operation'],\n [\n ('d', 'cumsum'),\n ('d', 'cummean'),\n ('d', 'cummin'),\n ('d', 'cummax'),\n ('h', 'cumany'),\n ('h', 'cumall'),\n ],\n)\ndef test_cumulative_yield_array_types(table, column, operation):\n expr = getattr(getattr(table, column), operation)()\n assert isinstance(expr, ir.ColumnExpr)\n\n\[email protected](params=['ln', 'log', 'log2', 'log10'])\ndef log(request):\n return operator.methodcaller(request.param)\n\n\[email protected]('column', list('abcdef'))\ndef test_log(table, log, column):\n result = log(table[column])\n assert isinstance(result, ir.FloatingColumn)\n\n # is this what we want?\n # assert result.get_name() == c\n\n\ndef test_log_string(table):\n g = table.g\n\n with pytest.raises(IbisTypeError):\n ops.Log(g, None).to_expr()\n\n\[email protected]('klass', [ops.Ln, ops.Log2, ops.Log10])\ndef test_log_variants_string(table, klass):\n g = table.g\n\n with pytest.raises(IbisTypeError):\n klass(g).to_expr()\n\n\ndef test_log_boolean(table, log):\n # boolean not implemented for these\n h = table['h']\n with pytest.raises(IbisTypeError):\n log(h)\n\n\ndef test_log_literal(log):\n assert isinstance(log(ibis.literal(5)), ir.FloatingScalar)\n assert isinstance(log(ibis.literal(5.5)), ir.FloatingScalar)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_exp():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_sqrt():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_trig_functions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_round():\n assert False\n\n\ndef test_cast_same_type_noop(table):\n c = table.g\n assert c.cast('string') is c\n\n i = ibis.literal(5)\n assert i.cast('int8') is i\n\n\[email protected]('type', ['int8', 'int32', 'double', 'float'])\ndef test_string_to_number(table, type):\n casted = table.g.cast(type)\n casted_literal = ibis.literal('5').cast(type).name('bar')\n\n assert isinstance(casted, ir.ColumnExpr)\n assert casted.type() == dt.dtype(type)\n\n assert isinstance(casted_literal, ir.ScalarExpr)\n assert casted_literal.type() == dt.dtype(type)\n assert casted_literal.get_name() == 'bar'\n\n\[email protected]('col', list('abcdefh'))\ndef test_number_to_string_column(table, col):\n casted = table[col].cast('string')\n assert isinstance(casted, ir.StringColumn)\n\n\ndef test_number_to_string_scalar():\n casted_literal = ibis.literal(5).cast('string').name('bar')\n assert isinstance(casted_literal, ir.StringScalar)\n assert casted_literal.get_name() == 'bar'\n\n\ndef test_casted_exprs_are_named(table):\n expr = table.f.cast('string')\n assert expr.get_name() == 'cast(f, string)'\n\n # it works! per GH #396\n expr.value_counts()\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_nonzero():\n assert False\n\n\[email protected]('col', list('abcdefh'))\ndef test_negate(table, col):\n c = table[col]\n result = -c\n assert isinstance(result, type(c))\n assert isinstance(result.op(), ops.Negate)\n\n\ndef test_negate_boolean_scalar():\n result = -ibis.literal(False)\n assert isinstance(result, ir.BooleanScalar)\n assert isinstance(result.op(), ops.Negate)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_isnull_notnull():\n assert False\n\n\[email protected]('column', ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\[email protected]('how', [None, 'first', 'last', 'heavy'])\[email protected]('condition_fn', [lambda t: None, lambda t: t.a > 8])\ndef test_arbitrary(table, column, how, condition_fn):\n col = table[column]\n where = condition_fn(table)\n expr = col.arbitrary(how=how, where=where)\n assert expr.type() == col.type()\n assert isinstance(expr, ir.ScalarExpr)\n assert L.is_reduction(expr)\n\n\[email protected](\n ['column', 'operation'],\n [\n ('h', lambda column: column.any()),\n ('h', lambda column: column.notany()),\n ('h', lambda column: column.all()),\n ('c', lambda column: (column == 0).any()),\n ('c', lambda column: (column == 0).all()),\n ],\n)\ndef test_any_all_notany(table, column, operation):\n expr = operation(table[column])\n assert isinstance(expr, ir.BooleanScalar)\n assert L.is_reduction(expr)\n\n\[email protected](\n 'operation',\n [\n operator.lt,\n operator.gt,\n operator.ge,\n operator.le,\n operator.eq,\n operator.ne,\n ],\n)\[email protected]('column', list('abcdef'))\[email protected]('case', [2, 2 ** 9, 2 ** 17, 2 ** 33, 1.5])\ndef test_numbers_compare_numeric_literal(table, operation, column, case):\n ex_op_class = {\n operator.eq: ops.Equals,\n operator.ne: ops.NotEquals,\n operator.le: ops.LessEqual,\n operator.lt: ops.Less,\n operator.ge: ops.GreaterEqual,\n operator.gt: ops.Greater,\n }\n\n col = table[column]\n\n result = operation(col, case)\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(result.op(), ex_op_class[operation])\n\n\ndef test_boolean_comparisons(table):\n bool_col = table.h\n\n result = bool_col == True # noqa\n assert isinstance(result, ir.BooleanColumn)\n\n result = bool_col == False # noqa\n assert isinstance(result, ir.BooleanColumn)\n\n\[email protected](\n 'operation',\n [\n operator.lt,\n operator.gt,\n operator.ge,\n operator.le,\n operator.eq,\n operator.ne,\n ],\n)\ndef test_string_comparisons(table, operation):\n string_col = table.g\n result = operation(string_col, 'foo')\n assert isinstance(result, ir.BooleanColumn)\n\n\[email protected](\n 'operation', [operator.xor, operator.or_, operator.and_]\n)\ndef test_boolean_logical_ops(table, operation):\n expr = table.a > 0\n\n result = operation(expr, table.h)\n assert isinstance(result, ir.BooleanColumn)\n\n result = operation(expr, True)\n refl_result = operation(True, expr)\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(refl_result, ir.BooleanColumn)\n\n true = ibis.literal(True)\n false = ibis.literal(False)\n\n result = operation(true, false)\n assert isinstance(result, ir.BooleanScalar)\n\n\ndef test_null_column():\n t = ibis.table([('a', 'string')], name='t')\n s = t.mutate(b=ibis.NA)\n assert s.b.type() == dt.null\n assert isinstance(s.b, ir.NullColumn)\n\n\ndef test_null_column_union():\n s = ibis.table([('a', 'string'), ('b', 'double')])\n t = ibis.table([('a', 'string')])\n with pytest.raises(ibis.common.exceptions.RelationError):\n s.union(t.mutate(b=ibis.NA)) # needs a type\n assert s.union(t.mutate(b=ibis.NA.cast('double'))).schema() == s.schema()\n\n\ndef test_string_compare_numeric_array(table):\n with pytest.raises(TypeError):\n table.g == table.f\n\n with pytest.raises(TypeError):\n table.g == table.c\n\n\ndef test_string_compare_numeric_literal(table):\n with pytest.raises(TypeError):\n table.g == ibis.literal(1.5)\n\n with pytest.raises(TypeError):\n table.g == ibis.literal(5)\n\n\ndef test_between(table):\n result = table.f.between(0, 1)\n\n assert isinstance(result, ir.BooleanColumn)\n assert isinstance(result.op(), ops.Between)\n\n # it works!\n result = table.g.between('a', 'f')\n assert isinstance(result, ir.BooleanColumn)\n\n result = ibis.literal(1).between(table.a, table.c)\n assert isinstance(result, ir.BooleanColumn)\n\n result = ibis.literal(7).between(5, 10)\n assert isinstance(result, ir.BooleanScalar)\n\n # Cases where between should immediately fail, e.g. incomparables\n with pytest.raises(TypeError):\n table.f.between('0', '1')\n\n with pytest.raises(TypeError):\n table.f.between(0, '1')\n\n with pytest.raises(TypeError):\n table.f.between('0', 1)\n\n\ndef test_chained_comparisons_not_allowed(table):\n with pytest.raises(ValueError):\n 0 < table.f < 1\n\n\[email protected](\n 'operation', [operator.add, operator.mul, operator.truediv, operator.sub]\n)\ndef test_binop_string_type_error(table, operation):\n # Strings are not valid for any numeric arithmetic\n ints = table.d\n strs = table.g\n\n with pytest.raises(TypeError):\n operation(ints, strs)\n\n with pytest.raises(TypeError):\n operation(strs, ints)\n\n\[email protected](\n ['op', 'name', 'case', 'ex_type'],\n [\n (operator.add, 'a', 0, 'int8'),\n (operator.add, 'a', 5, 'int16'),\n (operator.add, 'a', 100000, 'int32'),\n (operator.add, 'a', -100000, 'int32'),\n (operator.add, 'a', 1.5, 'double'),\n (operator.add, 'b', 0, 'int16'),\n (operator.add, 'b', 5, 'int32'),\n (operator.add, 'b', -5, 'int32'),\n (operator.add, 'c', 0, 'int32'),\n (operator.add, 'c', 5, 'int64'),\n (operator.add, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.add, 'd', 5, 'int64'),\n (operator.mul, 'a', 0, 'int8'),\n (operator.mul, 'a', 5, 'int16'),\n (operator.mul, 'a', 2 ** 24, 'int32'),\n (operator.mul, 'a', -2 ** 24 + 1, 'int32'),\n (operator.mul, 'a', 1.5, 'double'),\n (operator.mul, 'b', 0, 'int16'),\n (operator.mul, 'b', 5, 'int32'),\n (operator.mul, 'b', -5, 'int32'),\n (operator.mul, 'c', 0, 'int32'),\n (operator.mul, 'c', 5, 'int64'),\n (operator.mul, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.mul, 'd', 5, 'int64'),\n (operator.sub, 'a', 5, 'int16'),\n (operator.sub, 'a', 100000, 'int32'),\n (operator.sub, 'a', -100000, 'int32'),\n (operator.sub, 'a', 1.5, 'double'),\n (operator.sub, 'b', 5, 'int32'),\n (operator.sub, 'b', -5, 'int32'),\n (operator.sub, 'c', 5, 'int64'),\n (operator.sub, 'c', -5, 'int64'),\n # technically this can overflow, but we allow it\n (operator.sub, 'd', 5, 'int64'),\n (operator.truediv, 'a', 5, 'double'),\n (operator.truediv, 'a', 1.5, 'double'),\n (operator.truediv, 'b', 5, 'double'),\n (operator.truediv, 'b', -5, 'double'),\n (operator.truediv, 'c', 5, 'double'),\n (operator.pow, 'a', 0, 'double'),\n (operator.pow, 'b', 0, 'double'),\n (operator.pow, 'c', 0, 'double'),\n (operator.pow, 'd', 0, 'double'),\n (operator.pow, 'e', 0, 'float'),\n (operator.pow, 'f', 0, 'double'),\n (operator.pow, 'a', 2, 'double'),\n (operator.pow, 'b', 2, 'double'),\n (operator.pow, 'c', 2, 'double'),\n (operator.pow, 'd', 2, 'double'),\n (operator.pow, 'a', 1.5, 'double'),\n (operator.pow, 'b', 1.5, 'double'),\n (operator.pow, 'c', 1.5, 'double'),\n (operator.pow, 'd', 1.5, 'double'),\n (operator.pow, 'e', 2, 'float'),\n (operator.pow, 'f', 2, 'double'),\n (operator.pow, 'a', -2, 'double'),\n (operator.pow, 'b', -2, 'double'),\n (operator.pow, 'c', -2, 'double'),\n (operator.pow, 'd', -2, 'double'),\n ],\n ids=lambda arg: str(getattr(arg, '__name__', arg)),\n)\ndef test_literal_promotions(table, op, name, case, ex_type):\n col = table[name]\n\n result = op(col, case)\n assert result.type() == dt.dtype(ex_type)\n\n result = op(case, col)\n assert result.type() == dt.dtype(ex_type)\n\n\[email protected](\n ('op', 'left_fn', 'right_fn', 'ex_type'),\n [\n (operator.sub, lambda t: t['a'], lambda t: 0, 'int8'),\n (operator.sub, lambda t: 0, lambda t: t['a'], 'int16'),\n (operator.sub, lambda t: t['b'], lambda t: 0, 'int16'),\n (operator.sub, lambda t: 0, lambda t: t['b'], 'int32'),\n (operator.sub, lambda t: t['c'], lambda t: 0, 'int32'),\n (operator.sub, lambda t: 0, lambda t: t['c'], 'int64'),\n ],\n ids=lambda arg: str(getattr(arg, '__name__', arg)),\n)\ndef test_zero_subtract_literal_promotions(\n table, op, left_fn, right_fn, ex_type\n):\n # in case of zero subtract the order of operands matters\n left, right = left_fn(table), right_fn(table)\n result = op(left, right)\n\n assert result.type() == dt.dtype(ex_type)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_add_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_subtract_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_multiply_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_divide_array_promotions():\n assert False\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_string_add_concat():\n assert False\n\n\[email protected]\ndef expr():\n exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]\n\n return ibis.expr_list(exprs)\n\n\ndef test_names(expr):\n assert expr.names() == ['a', 'b']\n\n\ndef test_prefix(expr):\n prefixed = expr.prefix('foo_')\n result = prefixed.names()\n assert result == ['foo_a', 'foo_b']\n\n\ndef test_rename(expr):\n renamed = expr.rename(lambda x: 'foo({0})'.format(x))\n result = renamed.names()\n assert result == ['foo(a)', 'foo(b)']\n\n\ndef test_suffix(expr):\n suffixed = expr.suffix('.x')\n result = suffixed.names()\n assert result == ['a.x', 'b.x']\n\n\ndef test_concat():\n exprs = [ibis.literal(1).name('a'), ibis.literal(2).name('b')]\n exprs2 = [ibis.literal(3).name('c'), ibis.literal(4).name('d')]\n\n list1 = ibis.expr_list(exprs)\n list2 = ibis.expr_list(exprs2)\n\n result = list1.concat(list2)\n expected = ibis.expr_list(exprs + exprs2)\n assert_equal(result, expected)\n\n\ndef test_substitute_dict():\n table = ibis.table([('foo', 'string'), ('bar', 'string')], 't1')\n subs = {'a': 'one', 'b': table.bar}\n\n result = table.foo.substitute(subs)\n expected = (\n table.foo.case()\n .when('a', 'one')\n .when('b', table.bar)\n .else_(table.foo)\n .end()\n )\n assert_equal(result, expected)\n\n result = table.foo.substitute(subs, else_=ibis.NA)\n expected = (\n table.foo.case()\n .when('a', 'one')\n .when('b', table.bar)\n .else_(ibis.NA)\n .end()\n )\n assert_equal(result, expected)\n\n\[email protected](\n 'typ',\n [\n 'array<map<string, array<array<double>>>>',\n 'string',\n 'double',\n 'float',\n 'int64',\n ],\n)\ndef test_not_without_boolean(typ):\n t = ibis.table([('a', typ)], name='t')\n c = t.a\n with pytest.raises(TypeError):\n ~c\n\n\[email protected](\n ('position', 'names'),\n [\n (0, 'foo'),\n (1, 'bar'),\n ([0], ['foo']),\n ([1], ['bar']),\n ([0, 1], ['foo', 'bar']),\n ([1, 0], ['bar', 'foo']),\n ],\n)\[email protected](\n 'expr_func',\n [\n lambda t, args: t[args],\n lambda t, args: t.sort_by(args),\n lambda t, args: t.group_by(args).aggregate(bar_avg=t.bar.mean()),\n ],\n)\ndef test_table_operations_with_integer_column(position, names, expr_func):\n t = ibis.table([('foo', 'string'), ('bar', 'double')])\n result = expr_func(t, position)\n expected = expr_func(t, names)\n assert result.equals(expected)\n\n\[email protected]('value', ['abcdefg', ['a', 'b', 'c'], [1, 2, 3]])\[email protected](\n 'operation', ['pow', 'sub', 'truediv', 'floordiv', 'mod']\n)\ndef test_generic_value_api_no_arithmetic(value, operation):\n func = getattr(operator, operation)\n expr = ibis.literal(value)\n with pytest.raises(TypeError):\n func(expr, expr)\n\n\[email protected](\n ('value', 'expected'), [(5, dt.int8), (5.4, dt.double), ('abc', dt.string)]\n)\ndef test_fillna_null(value, expected):\n assert ibis.NA.fillna(value).type().equals(expected)\n\n\[email protected](\n ('left', 'right'),\n [\n (literal('2017-04-01'), date(2017, 4, 2)),\n (date(2017, 4, 2), literal('2017-04-01')),\n (literal('2017-04-01 01:02:33'), datetime(2017, 4, 1, 1, 3, 34)),\n (datetime(2017, 4, 1, 1, 3, 34), literal('2017-04-01 01:02:33')),\n ],\n)\[email protected](\n 'op',\n [\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n lambda left, right: ibis.timestamp('2017-04-01 00:02:34').between(\n left, right\n ),\n lambda left, right: ibis.timestamp('2017-04-01')\n .cast(dt.date)\n .between(left, right),\n ],\n)\ndef test_string_temporal_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\[email protected](\n ('value', 'type', 'expected_type_class'),\n [\n (2.21, 'decimal', dt.Decimal),\n (3.14, 'double', dt.Double),\n (4.2, 'int64', dt.Double),\n (4, 'int64', dt.Int64),\n ],\n)\ndef test_decimal_modulo_output_type(value, type, expected_type_class):\n t = ibis.table([('a', type)])\n expr = t.a % value\n assert isinstance(expr.type(), expected_type_class)\n\n\[email protected](\n ('left', 'right'),\n [(literal('10:00'), time(10, 0)), (time(10, 0), literal('10:00'))],\n)\[email protected](\n 'op',\n [\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n ],\n)\ndef test_time_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\[email protected](\n ('left', 'right'),\n [\n (literal('10:00'), date(2017, 4, 2)),\n (literal('10:00'), datetime(2017, 4, 2, 1, 1)),\n (literal('10:00'), literal('2017-04-01')),\n ],\n)\[email protected](\n 'op', [operator.eq, operator.lt, operator.le, operator.gt, operator.ge]\n)\ndef test_time_timestamp_invalid_compare(op, left, right):\n result = op(left, right)\n assert result.type().equals(dt.boolean)\n\n\ndef test_scalar_parameter_set():\n value = ibis.param({dt.int64})\n\n assert isinstance(value.op(), ops.ScalarParameter)\n assert value.type().equals(dt.Set(dt.int64))\n\n\ndef test_scalar_parameter_repr():\n value = ibis.param(dt.timestamp).name('value')\n assert repr(value) == 'value = ScalarParameter[timestamp]'\n\n value_op = value.op()\n assert repr(value_op) == \"ScalarParameter(type=timestamp)\"\n\n\[email protected](\n ('left', 'right', 'expected'),\n [\n (\n # same value type, same name\n ibis.param(dt.timestamp),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different value type, same name\n ibis.param(dt.date),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # same value type, different name\n ibis.param(dt.timestamp),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different value type, different name\n ibis.param(dt.date),\n ibis.param(dt.timestamp),\n False,\n ),\n (\n # different Python class, left side is param\n ibis.param(dt.timestamp),\n dt.date,\n False,\n ),\n (\n # different Python class, right side is param\n dt.date,\n ibis.param(dt.timestamp),\n False,\n ),\n ],\n)\ndef test_scalar_parameter_compare(left, right, expected):\n assert left.equals(right) == expected\n\n\[email protected](\n ('case', 'creator'),\n [\n (datetime.now(), toolz.compose(methodcaller('time'), ibis.timestamp)),\n ('now', toolz.compose(methodcaller('time'), ibis.timestamp)),\n (datetime.now().time(), ibis.time),\n ('10:37', ibis.time),\n ],\n)\[email protected](\n ('left', 'right'), [(1, 'a'), ('a', 1), (1.0, 2.0), (['a'], [1])]\n)\ndef test_between_time_failure_time(case, creator, left, right):\n value = creator(case)\n with pytest.raises(TypeError):\n value.between(left, right)\n\n\ndef test_custom_type_binary_operations():\n class Foo(ir.ValueExpr):\n def __add__(self, other):\n op = self.op()\n return type(op)(op.value + other).to_expr()\n\n __radd__ = __add__\n\n class FooNode(ops.ValueOp):\n value = Arg(rlz.integer)\n\n def output_type(self):\n return functools.partial(Foo, dtype=dt.int64)\n\n left = ibis.literal(2)\n right = FooNode(3).to_expr()\n result = left + right\n assert isinstance(result, Foo)\n assert isinstance(result.op(), FooNode)\n\n left = FooNode(3).to_expr()\n right = ibis.literal(2)\n result = left + right\n assert isinstance(result, Foo)\n assert isinstance(result.op(), FooNode)\n\n\ndef test_empty_array_as_argument():\n class Foo(ir.Expr):\n pass\n\n class FooNode(ops.ValueOp):\n value = Arg(rlz.value(dt.Array(dt.int64)))\n\n def output_type(self):\n return Foo\n\n node = FooNode([])\n value = node.value\n expected = literal([]).cast(dt.Array(dt.int64))\n\n assert value.type().equals(dt.Array(dt.null))\n assert value.cast(dt.Array(dt.int64)).equals(expected)\n\n\ndef test_struct_field_dir():\n t = ibis.table([('struct_col', 'struct<my_field: string>')])\n assert 'struct_col' in dir(t)\n assert 'my_field' in dir(t.struct_col)\n\n\ndef test_nullable_column_propagated():\n t = ibis.table(\n [\n ('a', dt.Int32(nullable=True)),\n ('b', dt.Int32(nullable=False)),\n ('c', dt.String(nullable=False)),\n ('d', dt.double), # nullable by default\n ('f', dt.Double(nullable=False)),\n ]\n )\n\n assert t.a.type().nullable is True\n assert t.b.type().nullable is False\n assert t.c.type().nullable is False\n assert t.d.type().nullable is True\n assert t.f.type().nullable is False\n\n s = t.a + t.d\n assert s.type().nullable is True\n\n s = t.b + t.d\n assert s.type().nullable is True\n\n s = t.b + t.f\n assert s.type().nullable is False\n\n\[email protected](\n 'base_expr',\n [\n ibis.table([('interval_col', dt.Interval(unit='D'))]).interval_col,\n ibis.interval(seconds=42),\n ],\n)\ndef test_interval_negate(base_expr):\n expr = -base_expr\n expr2 = base_expr.negate()\n expr3 = ibis.negate(base_expr)\n assert isinstance(expr.op(), ops.Negate)\n assert expr.equals(expr2)\n assert expr.equals(expr3)\n\n\ndef test_large_timestamp():\n expr = ibis.timestamp('4567-02-03')\n expected = datetime(year=4567, month=2, day=3)\n result = expr.op().value\n assert result == expected\n\n\[email protected]('tz', [None, 'UTC'])\ndef test_timestamp_with_timezone(tz):\n expr = ibis.timestamp('2017-01-01', timezone=tz)\n expected = pd.Timestamp('2017-01-01', tz=tz)\n result = expr.op().value\n assert expected == result\n\n\[email protected]('tz', [None, 'UTC'])\ndef test_timestamp_timezone_type(tz):\n expr = ibis.timestamp('2017-01-01', timezone=tz)\n expected = dt.Timestamp(timezone=tz)\n assert expected == expr.op().dtype\n\n\ndef test_map_get_broadcast():\n t = ibis.table([('a', 'string')], name='t')\n lookup_table = ibis.literal({'a': 1, 'b': 2})\n expr = lookup_table.get(t.a)\n assert isinstance(expr, ir.IntegerColumn)\n\n\ndef test_map_getitem_broadcast():\n t = ibis.table([('a', 'string')], name='t')\n lookup_table = ibis.literal({'a': 1, 'b': 2})\n expr = lookup_table[t.a]\n assert isinstance(expr, ir.IntegerColumn)\n\n\ndef test_map_keys_output_type():\n mapping = ibis.literal({'a': 1, 'b': 2})\n assert mapping.keys().type() == dt.Array(dt.string)\n\n\ndef test_map_values_output_type():\n mapping = ibis.literal({'a': 1, 'b': 2})\n assert mapping.values().type() == dt.Array(dt.int8)\n\n\ndef test_scalar_isin_map_keys():\n mapping = ibis.literal({'a': 1, 'b': 2})\n key = ibis.literal('a')\n expr = key.isin(mapping.keys())\n assert isinstance(expr, ir.BooleanScalar)\n\n\ndef test_column_isin_map_keys():\n t = ibis.table([('a', 'string')], name='t')\n mapping = ibis.literal({'a': 1, 'b': 2})\n expr = t.a.isin(mapping.keys())\n assert isinstance(expr, ir.BooleanColumn)\n\n\ndef test_map_get_with_compatible_value_smaller():\n value = ibis.literal({'A': 1000, 'B': 2000})\n expr = value.get('C', 3)\n assert value.type() == dt.Map(dt.string, dt.int16)\n assert expr.type() == dt.int16\n\n\ndef test_map_get_with_compatible_value_bigger():\n value = ibis.literal({'A': 1, 'B': 2})\n expr = value.get('C', 3000)\n assert value.type() == dt.Map(dt.string, dt.int8)\n assert expr.type() == dt.int16\n\n\ndef test_map_get_with_incompatible_value_different_kind():\n value = ibis.literal({'A': 1000, 'B': 2000})\n with pytest.raises(IbisTypeError):\n value.get('C', 3.0)\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_not_nullable(null_value):\n map_type = dt.Map(dt.string, dt.Int16(nullable=False))\n value = ibis.literal({'A': 1000, 'B': 2000}).cast(map_type)\n assert value.type() == map_type\n with pytest.raises(IbisTypeError):\n assert value.get('C', null_value)\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_nullable(null_value):\n value = ibis.literal({'A': 1000, 'B': None})\n result = value.get('C', null_value)\n assert result.type().nullable\n\n\[email protected]('null_value', [None, ibis.NA])\ndef test_map_get_with_null_on_null_type_with_null(null_value):\n value = ibis.literal({'A': None, 'B': None})\n result = value.get('C', null_value)\n assert result.type().nullable\n\n\ndef test_map_get_with_null_on_null_type_with_non_null():\n value = ibis.literal({'A': None, 'B': None})\n assert value.get('C', 1).type() == dt.int8\n\n\ndef test_map_get_with_incompatible_value():\n value = ibis.literal({'A': 1000, 'B': 2000})\n with pytest.raises(IbisTypeError):\n value.get('C', ['A'])\n\n\[email protected](\n ('value', 'expected_type'),\n [\n (datetime.now(), dt.timestamp),\n (datetime.now().date(), dt.date),\n (datetime.now().time(), dt.time),\n ],\n)\ndef test_invalid_negate(value, expected_type):\n expr = ibis.literal(value)\n assert expr.type() == expected_type\n with pytest.raises(TypeError):\n -expr\n\n\[email protected](\n 'type',\n [\n np.float16,\n np.float32,\n np.float64,\n np.int16,\n np.int32,\n np.int64,\n np.int64,\n np.int8,\n np.timedelta64,\n np.uint16,\n np.uint32,\n np.uint64,\n np.uint64,\n np.uint8,\n float,\n int,\n ],\n)\ndef test_valid_negate(type):\n value = type(1)\n expr = ibis.literal(value)\n assert -expr is not None\n\n\[email protected](\n reason='Type not supported in most backends', raises=TypeError\n)\[email protected](\n os.name == 'nt', reason='np.float128 not appear to exist on windows'\n)\ndef test_valid_negate_float128():\n value = np.float128(1)\n expr = ibis.literal(value)\n assert -expr is not None\n\n\[email protected](\n ('kind', 'begin', 'end'),\n [\n ('preceding', None, None),\n ('preceding', 1, None),\n ('preceding', -1, 1),\n ('preceding', 1, -1),\n ('preceding', -1, -1),\n ('following', None, None),\n ('following', None, 1),\n ('following', -1, 1),\n ('following', 1, -1),\n ('following', -1, -1),\n ],\n)\ndef test_window_unbounded_invalid(kind, begin, end):\n kwargs = {kind: (begin, end)}\n with pytest.raises(com.IbisInputError):\n ibis.window(**kwargs)\n\n\[email protected](\n ('left', 'right', 'expected'),\n [\n (ibis.literal(1), ibis.literal(1.0), dt.float64),\n (ibis.literal('a'), ibis.literal('b'), dt.string),\n (ibis.literal(1.0), ibis.literal(1), dt.float64),\n (ibis.literal(1), ibis.literal(1), dt.int8),\n (ibis.literal(1), ibis.literal(1000), dt.int16),\n (ibis.literal(2 ** 16), ibis.literal(2 ** 17), dt.int32),\n (ibis.literal(2 ** 50), ibis.literal(1000), dt.int64),\n (ibis.literal([1, 2]), ibis.literal([1, 2]), dt.Array(dt.int8)),\n (ibis.literal(['a']), ibis.literal([]), dt.Array(dt.string)),\n (ibis.literal([]), ibis.literal(['a']), dt.Array(dt.string)),\n (ibis.literal([]), ibis.literal([]), dt.Array(dt.null)),\n ],\n)\ndef test_nullif_type(left, right, expected):\n assert left.nullif(right).type() == expected\n\n\[email protected](\n ('left', 'right'), [(ibis.literal(1), ibis.literal('a'))]\n)\ndef test_nullif_fail(left, right):\n with pytest.raises(com.IbisTypeError):\n left.nullif(right)\n with pytest.raises(com.IbisTypeError):\n right.nullif(left)\n\n\[email protected](\n \"join_method\",\n [\n \"left_join\",\n pytest.param(\n \"right_join\",\n marks=pytest.mark.xfail(\n raises=AttributeError, reason=\"right_join is not an ibis API\"\n ),\n ),\n \"inner_join\",\n \"outer_join\",\n \"asof_join\",\n pytest.param(\n \"semi_join\",\n marks=pytest.mark.xfail(\n raises=com.IbisTypeError,\n reason=(\n \"semi_join only gives access to the left table's \"\n \"columns\"\n ),\n ),\n ),\n ],\n)\[email protected](\n raises=(com.IbisError, AttributeError),\n reason=\"Select from unambiguous joins not implemented\",\n)\ndef test_select_on_unambiguous_join(join_method):\n t = ibis.table([(\"a0\", dt.int64), (\"b1\", dt.string)], name=\"t\")\n s = ibis.table([(\"a1\", dt.int64), (\"b2\", dt.string)], name=\"s\")\n method = getattr(t, join_method)\n join = method(s, t.b1 == s.b2)\n expr1 = join[\"a0\", \"a1\"]\n expr2 = join[[\"a0\", \"a1\"]]\n expr3 = join.select([\"a0\", \"a1\"])\n assert expr1.equals(expr2)\n assert expr1.equals(expr3)\n\n\ndef test_chained_select_on_join():\n t = ibis.table([(\"a\", dt.int64)], name=\"t\")\n s = ibis.table([(\"a\", dt.int64), (\"b\", dt.string)], name=\"s\")\n join = t.join(s)[t.a, s.b]\n expr1 = join[\"a\", \"b\"]\n expr2 = join.select([\"a\", \"b\"])\n assert expr1.equals(expr2)\n\n\ndef test_repr_list_of_lists():\n lit = ibis.literal([[1]])\n result = repr(lit)\n expected = \"\"\"\\\nLiteral[array<array<int8>>]\n [[1]]\"\"\"\n assert result == expected\n\n\ndef test_repr_list_of_lists_in_table():\n t = ibis.table([('a', 'int64')], name='t')\n lit = ibis.literal([[1]])\n expr = t[t, lit.name('array_of_array')]\n result = repr(expr)\n expected = \"\"\"\\\nref_0\nUnboundTable[table]\n name: t\n schema:\n a : int64\n\nSelection[table]\n table:\n Table: ref_0\n selections:\n Table: ref_0\n array_of_array = Literal[array<array<int8>>]\n [[1]]\"\"\"\n assert result == expected\n" ]
[ [ "pandas.Timestamp", "numpy.float128" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NamiKaze7/FinQA
[ "cf61ae2611ae205b62574e2b4264cb0318dd7202" ]
[ "code/generator/Model_new.py" ]
[ "import torch\nfrom torch import nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport math\nimport numpy as np\nfrom config import parameters as conf\n\nif conf.pretrained_model == \"bert\":\n from transformers import BertModel\nelif conf.pretrained_model == \"roberta\":\n from transformers import RobertaModel\nelif conf.pretrained_model == \"finbert\":\n from transformers import BertModel\nelif conf.pretrained_model == \"longformer\":\n from transformers import LongformerModel\n\n\nclass Bert_model(nn.Module):\n\n def __init__(self, num_decoder_layers, hidden_size, dropout_rate, input_length,\n program_length, op_list, const_list):\n\n super(Bert_model, self).__init__()\n\n self.op_list_size = len(op_list)\n self.const_list_size = len(const_list)\n self.reserved_token_size = self.op_list_size + self.const_list_size\n self.program_length = program_length\n self.hidden_size = hidden_size\n self.const_list = const_list\n self.op_list = op_list\n self.input_length = input_length\n\n self.reserved_ind = nn.Parameter(torch.arange(\n 0, self.reserved_token_size), requires_grad=False)\n self.reserved_go = nn.Parameter(torch.arange(op_list.index(\n 'GO'), op_list.index('GO') + 1), requires_grad=False)\n\n self.reserved_para = nn.Parameter(torch.arange(op_list.index(\n ')'), op_list.index(')') + 1), requires_grad=False)\n\n # masking for decoidng for test time\n op_ones = nn.Parameter(torch.ones(\n self.op_list_size), requires_grad=False)\n op_zeros = nn.Parameter(torch.zeros(\n self.op_list_size), requires_grad=False)\n other_ones = nn.Parameter(torch.ones(\n input_length + self.const_list_size), requires_grad=False)\n other_zeros = nn.Parameter(torch.zeros(\n input_length + self.const_list_size), requires_grad=False)\n self.op_only_mask = nn.Parameter(\n torch.cat((op_ones, other_zeros), 0), requires_grad=False)\n self.seq_only_mask = nn.Parameter(\n torch.cat((op_zeros, other_ones), 0), requires_grad=False)\n\n # for \")\"\n para_before_ones = nn.Parameter(torch.ones(\n op_list.index(')')), requires_grad=False)\n para_after_ones = nn.Parameter(torch.ones(\n input_length + self.reserved_token_size - op_list.index(')') - 1), requires_grad=False)\n para_zero = nn.Parameter(torch.zeros(1), requires_grad=False)\n self.para_mask = nn.Parameter(torch.cat(\n (para_before_ones, para_zero, para_after_ones), 0), requires_grad=False)\n\n # for step embedding\n # self.step_masks = []\n all_tmp_list = self.op_list + self.const_list\n self.step_masks = nn.Parameter(torch.zeros(\n conf.max_step_ind, input_length + self.reserved_token_size), requires_grad=False)\n for i in range(conf.max_step_ind):\n this_step_mask_ind = all_tmp_list.index(\"#\" + str(i))\n self.step_masks[i, this_step_mask_ind] = 1.0\n\n # self.step_mask_eye = torch.eye(conf.max_step_ind)\n\n if conf.pretrained_model == \"bert\":\n self.bert = BertModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"roberta\":\n self.bert = RobertaModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"finbert\":\n self.bert = BertModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n elif conf.pretrained_model == \"longformer\":\n self.bert = LongformerModel.from_pretrained(\n conf.model_size, cache_dir=conf.cache_dir)\n\n self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.cls_dropout = nn.Dropout(dropout_rate)\n\n self.seq_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.seq_dropout = nn.Dropout(dropout_rate)\n\n self.reserved_token_embedding = nn.Embedding(\n self.reserved_token_size, hidden_size)\n\n # attentions\n self.decoder_history_attn_prj = nn.Linear(\n hidden_size, hidden_size, bias=True)\n self.decoder_history_attn_dropout = nn.Dropout(dropout_rate)\n\n self.question_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)\n self.question_attn_dropout = nn.Dropout(dropout_rate)\n\n self.question_summary_attn_prj = nn.Linear(\n hidden_size, hidden_size, bias=True)\n self.question_summary_attn_dropout = nn.Dropout(dropout_rate)\n\n if conf.sep_attention:\n self.input_embeddings_prj = nn.Linear(\n hidden_size*3, hidden_size, bias=True)\n else:\n self.input_embeddings_prj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n self.input_embeddings_layernorm = nn.LayerNorm([1, hidden_size])\n\n self.option_embeddings_prj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n\n # decoder lstm\n self.rnn = torch.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size,\n num_layers=conf.num_decoder_layers, batch_first=True)\n\n # step vector\n self.decoder_step_proj = nn.Linear(\n 3*hidden_size, hidden_size, bias=True)\n self.decoder_step_proj_dropout = nn.Dropout(dropout_rate)\n\n self.step_mix_proj = nn.Linear(\n hidden_size*2, hidden_size, bias=True)\n\n def forward(self, is_training, input_ids, input_mask, segment_ids, option_mask, program_ids, program_mask, device):\n\n bert_outputs = self.bert(\n input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)\n\n bert_sequence_output = bert_outputs.last_hidden_state\n bert_pooled_output = bert_sequence_output[:, 0, :]\n batch_size, seq_length, bert_dim = list(bert_sequence_output.size())\n\n split_program_ids = torch.split(program_ids, 1, dim=1)\n # print(self.program_length)\n # print(program_ids.size())\n # print(split_program_ids[0].size())\n\n pooled_output = self.cls_prj(bert_pooled_output)\n pooled_output = self.cls_dropout(pooled_output)\n\n option_size = self.reserved_token_size + seq_length\n\n sequence_output = self.seq_prj(bert_sequence_output)\n sequence_output = self.seq_dropout(sequence_output)\n\n op_embeddings = self.reserved_token_embedding(self.reserved_ind)\n op_embeddings = op_embeddings.repeat(batch_size, 1, 1)\n\n logits = []\n\n init_decoder_output = self.reserved_token_embedding(self.reserved_go)\n decoder_output = init_decoder_output.repeat(batch_size, 1, 1)\n\n # [batch, op + seq len, hidden]\n initial_option_embeddings = torch.cat(\n [op_embeddings, sequence_output], dim=1)\n\n if conf.sep_attention:\n decoder_history = decoder_output\n else:\n decoder_history = torch.unsqueeze(pooled_output, dim=-1)\n\n decoder_state_h = torch.zeros(\n 1, batch_size, self.hidden_size, device=device)\n decoder_state_c = torch.zeros(\n 1, batch_size, self.hidden_size, device=device)\n\n float_input_mask = input_mask.float()\n float_input_mask = torch.unsqueeze(float_input_mask, dim=-1)\n\n this_step_new_op_emb = initial_option_embeddings\n\n for cur_step in range(self.program_length):\n\n # decoder history att\n decoder_history_attn_vec = self.decoder_history_attn_prj(\n decoder_output)\n decoder_history_attn_vec = self.decoder_history_attn_dropout(\n decoder_history_attn_vec)\n\n decoder_history_attn_w = torch.matmul(\n decoder_history, torch.transpose(decoder_history_attn_vec, 1, 2))\n decoder_history_attn_w = F.softmax(decoder_history_attn_w, dim=1)\n\n decoder_history_ctx_embeddings = torch.matmul(\n torch.transpose(decoder_history_attn_w, 1, 2), decoder_history)\n\n if conf.sep_attention:\n # input seq att\n question_attn_vec = self.question_attn_prj(decoder_output)\n question_attn_vec = self.question_attn_dropout(\n question_attn_vec)\n\n question_attn_w = torch.matmul(\n sequence_output, torch.transpose(question_attn_vec, 1, 2))\n question_attn_w -= 1e6 * (1 - float_input_mask)\n question_attn_w = F.softmax(question_attn_w, dim=1)\n\n question_ctx_embeddings = torch.matmul(\n torch.transpose(question_attn_w, 1, 2), sequence_output)\n\n # another input seq att\n question_summary_vec = self.question_summary_attn_prj(\n decoder_output)\n question_summary_vec = self.question_summary_attn_dropout(\n question_summary_vec)\n\n question_summary_w = torch.matmul(\n sequence_output, torch.transpose(question_summary_vec, 1, 2))\n question_summary_w -= 1e6 * (1 - float_input_mask)\n question_summary_w = F.softmax(question_summary_w, dim=1)\n\n question_summary_embeddings = torch.matmul(\n torch.transpose(question_summary_w, 1, 2), sequence_output)\n\n if conf.sep_attention:\n concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,\n question_ctx_embeddings,\n decoder_output], dim=-1)\n else:\n concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,\n decoder_output], dim=-1)\n\n input_embeddings = self.input_embeddings_prj(\n concat_input_embeddings)\n\n if conf.layer_norm:\n input_embeddings = self.input_embeddings_layernorm(\n input_embeddings)\n\n question_option_vec = this_step_new_op_emb * question_summary_embeddings\n option_embeddings = torch.cat(\n [this_step_new_op_emb, question_option_vec], dim=-1)\n\n option_embeddings = self.option_embeddings_prj(option_embeddings)\n option_logits = torch.matmul(\n option_embeddings, torch.transpose(input_embeddings, 1, 2))\n option_logits = torch.squeeze(\n option_logits, dim=2) # [batch, op + seq_len]\n option_logits -= 1e6 * (1 - option_mask)\n logits.append(option_logits)\n\n if is_training:\n program_index = torch.unsqueeze(\n split_program_ids[cur_step], dim=1)\n else:\n # constrain decoding\n if cur_step % 4 == 0 or (cur_step + 1) % 4 == 0:\n # op round\n option_logits -= 1e6 * self.seq_only_mask\n else:\n # number round\n option_logits -= 1e6 * self.op_only_mask\n\n if (cur_step + 1) % 4 == 0:\n # \")\" round\n option_logits -= 1e6 * self.para_mask\n # print(program_index)\n\n program_index = torch.argmax(\n option_logits, axis=-1, keepdim=True)\n\n program_index = torch.unsqueeze(\n program_index, dim=1\n )\n\n if (cur_step + 1) % 4 == 0:\n\n # update op embeddings\n this_step_index = cur_step // 4\n this_step_list_index = (\n self.op_list + self.const_list).index(\"#\" + str(this_step_index))\n this_step_mask = self.step_masks[this_step_index, :]\n\n decoder_step_vec = self.decoder_step_proj(\n concat_input_embeddings)\n decoder_step_vec = self.decoder_step_proj_dropout(\n decoder_step_vec)\n decoder_step_vec = torch.squeeze(decoder_step_vec)\n\n this_step_new_emb = decoder_step_vec # [batch, hidden]\n\n this_step_new_emb = torch.unsqueeze(this_step_new_emb, 1)\n this_step_new_emb = this_step_new_emb.repeat(\n 1, self.reserved_token_size+self.input_length, 1) # [batch, op seq, hidden]\n\n this_step_mask = torch.unsqueeze(\n this_step_mask, 0) # [1, op seq]\n # print(this_step_mask)\n\n this_step_mask = torch.unsqueeze(\n this_step_mask, 2) # [1, op seq, 1]\n this_step_mask = this_step_mask.repeat(\n batch_size, 1, self.hidden_size) # [batch, op seq, hidden]\n\n this_step_new_op_emb = torch.where(\n this_step_mask > 0, this_step_new_emb, initial_option_embeddings)\n\n # print(program_index.size())\n program_index = torch.repeat_interleave(\n program_index, self.hidden_size, dim=2) # [batch, 1, hidden]\n\n input_program_embeddings = torch.gather(\n option_embeddings, dim=1, index=program_index)\n\n decoder_output, (decoder_state_h, decoder_state_c) = self.rnn(\n input_program_embeddings, (decoder_state_h, decoder_state_c))\n decoder_history = torch.cat(\n [decoder_history, input_program_embeddings], dim=1)\n\n logits = torch.stack(logits, dim=1)\n return logits\n" ]
[ [ "torch.nn.functional.softmax", "torch.transpose", "torch.cat", "torch.zeros", "torch.nn.Embedding", "torch.repeat_interleave", "torch.where", "torch.split", "torch.nn.Dropout", "torch.ones", "torch.arange", "torch.squeeze", "torch.unsqueeze", "torch.nn.Linear", "torch.stack", "torch.nn.LSTM", "torch.nn.LayerNorm", "torch.gather", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anishvaidya/MIT-Indoor-Scene-Recognition
[ "04b2c35b25996d420c6fe90c480b86635f3baffd" ]
[ "model_inception_v2_67class.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport imageio\nimport skimage\nimport skimage.io\nimport skimage.transform\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.optimizers import Adam,RMSprop,SGD\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input, decode_predictions\nimport matplotlib.pylab as plt\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n# Using pre-trained model\nconv_base = InceptionResNetV2(include_top = False, weights = '/home/vanish/prgs/MLandDL/MITTest/Models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5', input_shape = (200,200,3)) #150,150\nconv_base.summary()\n\n# build on top of imported model\nmodel = Sequential()\nmodel.add(conv_base)\nmodel.add(Flatten())\n#model.add(Dense(512,activation='relu'))\n#model.add(Dropout(0.5))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dense(67, activation='softmax'))\n\nmodel.compile(Adam(lr=0.0001),loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n#model.compile(SGD(lr=0.0001),loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\nmodel.summary()\n\n\ntrain_data_dir = 'Dataset/trainingset/'\nimg_width = 200\nimg_height = 200\nbatch_size = 8\nnb_epochs = 10\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n validation_split=0.1) # set validation split\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training') # set as training data\n\nvalidation_generator = train_datagen.flow_from_directory(\n train_data_dir, # same directory as training data\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation') # set as validation data\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples // batch_size,\n validation_data = validation_generator, \n validation_steps = validation_generator.samples // batch_size,\n epochs = nb_epochs)\n\nmodel.save('inception_v2_200px.h5') \nmodel.save_weights('Weightsinception_v2_200px.h5')\n\nfrom keras.models import load_model\nmodel = load_model('inception_v2_200px.h5')\n\n# check classification mapping\ndict = train_generator.class_indices\n\n# Graphs\nprint(history.history.keys())\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(acc) + 1)\n\nplt.title('Training and validation accuracy')\nplt.plot(epochs, acc, 'red', label='Training acc')\nplt.plot(epochs, val_acc, 'blue', label='Validation acc')\nplt.legend()\n\nplt.figure()\nplt.title('Training and validation loss')\nplt.plot(epochs, loss, 'red', label='Training loss')\nplt.plot(epochs, val_loss, 'blue', label='Validation loss')\n\nplt.legend()\nplt.show()\n\nimport time\nimport numpy as np\nfrom keras.preprocessing import image\ntest_image = image.load_img('/home/vanish/prgs/MLandDL/MITIndoor/Dataset/trainingset/bathroom/b1.jpg', target_size = (200, 200))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\ntest_image = preprocess_input(test_image) # added to check same preds issue\nstart_time = time.time()\nresult = model.predict(test_image)\n#decode_predictions(result)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nfor i in range (0,dict.__len__()):\n if result[0][i] >= 0.05:\n listOfKeys = [key for (key, value) in dict.items() if value == i]\n for key in listOfKeys:\n print(key) \n break\n" ]
[ [ "matplotlib.pylab.show", "numpy.expand_dims", "matplotlib.pylab.title", "matplotlib.pylab.figure", "matplotlib.pylab.plot", "matplotlib.pylab.legend" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
veritaass/pig_mmdet
[ "6bb348a002695e83b2f16b84173ce0aebbb20e60" ]
[ "topdown_coco_tiny_dataset.py" ]
[ "import json\nimport os\nimport os.path as osp\n#from collections import OrderedDict\nimport tempfile\n\nimport numpy as np\n\nfrom mmpose.core.evaluation.top_down_eval import (keypoint_nme,\n keypoint_pck_accuracy)\nfrom mmpose.datasets.builder import DATASETS\nfrom mmpose.datasets.datasets.base import Kpt2dSviewRgbImgTopDownDataset\n\n######\n\nfrom collections import OrderedDict, defaultdict\n\nimport json_tricks as json\nimport numpy as np\nfrom mmcv import Config, deprecated_api_warning\nfrom xtcocotools.cocoeval import COCOeval\n\nfrom ....core.post_processing import oks_nms, soft_oks_nms\nfrom ...builder import DATASETS\nfrom ..base import Kpt2dSviewRgbImgTopDownDataset\n\n#####\n\[email protected]_module()\nclass TopDownCOCOTinyDataset(Kpt2dSviewRgbImgTopDownDataset):\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=None,\n test_mode=False):\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, dataset_info, coco_style=False, test_mode=test_mode)\n\n # flip_pairs, upper_body_ids and lower_body_ids will be used\n # in some data augmentations like random flip\n self.ann_info['flip_pairs'] = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16]]\n self.ann_info['upper_body_ids'] = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n self.ann_info['lower_body_ids'] = (11, 12, 13, 14, 15, 16)\n\n self.ann_info['joint_weights'] = None\n self.ann_info['use_different_joint_weights'] = False\n\n self.dataset_name = 'coco_tiny'\n self.db = self._get_db()\n\n def _get_db(self):\n with open(self.ann_file) as f:\n anns = json.load(f)\n\n db = []\n for idx, ann in enumerate(anns):\n # get image path\n image_file = osp.join(self.img_prefix, ann['image_file'])\n # get bbox\n bbox = ann['bbox']\n center, scale = self._xywh2cs(*bbox)\n # get keypoints\n keypoints = np.array(\n ann['keypoints'], dtype=np.float32).reshape(-1, 3)\n num_joints = keypoints.shape[0]\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n sample = {\n 'image_file': image_file,\n 'center': center,\n 'scale': scale,\n 'bbox': bbox,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'bbox_score': 1,\n 'bbox_id': idx,\n }\n db.append(sample)\n\n return db\n\n def _xywh2cs(self, x, y, w, h):\n \"\"\"This encodes bbox(x, y, w, h) into (center, scale)\n Args:\n x, y, w, h\n Returns:\n tuple: A tuple containing center and scale.\n - center (np.ndarray[float32](2,)): center of the bbox (x, y).\n - scale (np.ndarray[float32](2,)): scale of the bbox w & h.\n \"\"\"\n aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[\n 'image_size'][1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n # padding to include proper amount of context\n scale = scale * 1.25\n return center, scale\n\n @deprecated_api_warning(name_dict=dict(outputs='results'))\n def evaluate(self, results, res_folder=None, metric='mAP', **kwargs):\n \"\"\"Evaluate coco keypoint results. The pose prediction results will be\n saved in ``${res_folder}/result_keypoints.json``.\n\n Note:\n - batch_size: N\n - num_keypoints: K\n - heatmap height: H\n - heatmap width: W\n\n Args:\n results (list[dict]): Testing results containing the following\n items:\n\n - preds (np.ndarray[N,K,3]): The first two dimensions are \\\n coordinates, score is the third dimension of the array.\n - boxes (np.ndarray[N,6]): [center[0], center[1], scale[0], \\\n scale[1],area, score]\n - image_paths (list[str]): For example, ['data/coco/val2017\\\n /000000393226.jpg']\n - heatmap (np.ndarray[N, K, H, W]): model output heatmap\n - bbox_id (list(int)).\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['mAP']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = defaultdict(list)\n\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n kpts[image_id].append({\n 'keypoints': preds[i],\n 'center': boxes[i][0:2],\n 'scale': boxes[i][2:4],\n 'area': boxes[i][4],\n 'score': boxes[i][5],\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n # rescoring and oks nms\n num_joints = self.ann_info['num_joints']\n vis_thr = self.vis_thr\n oks_thr = self.oks_thr\n valid_kpts = []\n for image_id in kpts.keys():\n img_kpts = kpts[image_id]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > vis_thr:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.use_nms:\n nms = soft_oks_nms if self.soft_nms else oks_nms\n keep = nms(img_kpts, oks_thr, sigmas=self.sigmas)\n valid_kpts.append([img_kpts[_keep] for _keep in keep])\n else:\n valid_kpts.append(img_kpts)\n\n self._write_coco_keypoint_results(valid_kpts, res_file)\n\n info_str = self._do_python_keypoint_eval(res_file)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n data_pack = [{\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n } for cls_ind, cls in enumerate(self.classes)\n if not cls == '__background__']\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n \"\"\"Get coco keypoint results.\"\"\"\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array(\n [img_kpt['keypoints'] for img_kpt in img_kpts])\n key_points = _key_points.reshape(-1,\n self.ann_info['num_joints'] * 3)\n\n result = [{\n 'image_id': img_kpt['image_id'],\n 'category_id': cat_id,\n 'keypoints': key_point.tolist(),\n 'score': float(img_kpt['score']),\n 'center': img_kpt['center'].tolist(),\n 'scale': img_kpt['scale'].tolist()\n } for img_kpt, key_point in zip(img_kpts, key_points)]\n\n cat_results.extend(result)\n\n return cat_results\n\n\n def _do_python_keypoint_eval(self, res_file):\n \"\"\"Keypoint evaluation using COCOAPI.\"\"\"\n coco_det = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas)\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n stats_names = [\n 'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',\n 'AR .75', 'AR (M)', 'AR (L)'\n ]\n\n info_str = list(zip(stats_names, coco_eval.stats))\n\n return info_str\n\n def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):\n \"\"\"sort kpts and remove the repeated ones.\"\"\"\n for img_id, persons in kpts.items():\n num = len(persons)\n kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key])\n for i in range(num - 1, 0, -1):\n if kpts[img_id][i][key] == kpts[img_id][i - 1][key]:\n del kpts[img_id][i]\n\n return kpts\n\n\n def evaluate1111(self, results, res_folder=None, metric='PCK', **kwargs):\n# def evaluate(self, results, res_folder=None, metric='PCK', **kwargs):\n \"\"\"Evaluate keypoint detection results. The pose prediction results will\n be saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n results (list(preds, boxes, image_path, output_heatmap))\n :preds (np.ndarray[N,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['Test/source/0.jpg']\n :output_heatmap (np.ndarray[N, K, H, W]): model outputs.\n\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'NME'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'NME']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = []\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n\n def _report_metric(self, res_file, metrics, pck_thr=0.3):\n \"\"\"Keypoint evaluation.\n\n Args:\n res_file (str): Json file stored prediction results.\n metrics (str | list[str]): Metric to be performed.\n Options: 'PCK', 'NME'.\n pck_thr (float): PCK threshold, default: 0.3.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n info_str = []\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n outputs = []\n gts = []\n masks = []\n\n for pred, item in zip(preds, self.db):\n outputs.append(np.array(pred['keypoints'])[:, :-1])\n gts.append(np.array(item['joints_3d'])[:, :-1])\n masks.append((np.array(item['joints_3d_visible'])[:, 0]) > 0)\n\n outputs = np.array(outputs)\n gts = np.array(gts)\n masks = np.array(masks)\n\n normalize_factor = self._get_normalize_factor(gts)\n\n if 'PCK' in metrics:\n _, pck, _ = keypoint_pck_accuracy(outputs, gts, masks, pck_thr,\n normalize_factor)\n info_str.append(('PCK', pck))\n\n if 'NME' in metrics:\n info_str.append(\n ('NME', keypoint_nme(outputs, gts, masks, normalize_factor)))\n\n return info_str\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n @staticmethod\n def _sort_and_unique_bboxes(kpts, key='bbox_id'):\n \"\"\"sort kpts and remove the repeated ones.\"\"\"\n kpts = sorted(kpts, key=lambda x: x[key])\n num = len(kpts)\n for i in range(num - 1, 0, -1):\n if kpts[i][key] == kpts[i - 1][key]:\n del kpts[i]\n\n return kpts\n\n @staticmethod\n def _get_normalize_factor(gts):\n \"\"\"Get inter-ocular distance as the normalize factor, measured as the\n Euclidean distance between the outer corners of the eyes.\n\n Args:\n gts (np.ndarray[N, K, 2]): Groundtruth keypoint location.\n\n Return:\n np.ndarray[N, 2]: normalized factor\n \"\"\"\n\n interocular = np.linalg.norm(\n gts[:, 0, :] - gts[:, 1, :], axis=1, keepdims=True)\n return np.tile(interocular, [1, 2])\n" ]
[ [ "numpy.minimum", "numpy.tile", "numpy.linalg.norm", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guilhermealvess/emotions
[ "a65df8f44a9ff4c25421e4b5bf8dc5d918dbd38c", "a65df8f44a9ff4c25421e4b5bf8dc5d918dbd38c" ]
[ "core/gabor.py", "core/emotions.py" ]
[ "\n\nimport numpy as np\nfrom skimage.filters import gabor_kernel\nimport cv2\n\n\nclass KernelParams:\n def __init__(self, wavelength, orientation):\n self.wavelength = wavelength\n self.orientation = orientation\n\n def __hash__(self):\n return hash((self.wavelength, self.orientation))\n\n def __eq__(self, other):\n return (self.wavelength, self.orientation) == \\\n (other.wavelength, other.orientation)\n\n def __ne__(self, other):\n return not(self == other)\n\n\nclass GaborBank:\n def __init__(self, w = [4, 7, 10, 13],\n o = [i for i in np.arange(0, np.pi, np.pi / 8)]):\n self._wavelengths = w\n self._orientations = o\n self._kernels = {}\n for wavelength in self._wavelengths:\n for orientation in self._orientations:\n frequency = 1 / wavelength\n kernel = gabor_kernel(frequency, orientation)\n par = KernelParams(wavelength, orientation)\n self._kernels[par] = kernel\n\n def filter(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n responses = []\n for wavelength in self._wavelengths:\n for orientation in self._orientations:\n frequency = 1 / wavelength\n par = KernelParams(wavelength, orientation)\n kernel = self._kernels[par]\n real = cv2.filter2D(image, cv2.CV_32F, kernel.real)\n imag = cv2.filter2D(image, cv2.CV_32F, kernel.imag)\n mag = cv2.magnitude(real, imag)\n cv2.normalize(mag, mag, -1, 1, cv2.NORM_MINMAX)\n responses.append(mag)\n return np.array(responses)", "\n\nimport os\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom core.gabor import GaborBank\nfrom core.data import FaceData\nfrom core.faces import FaceDetector\n\nfrom sklearn import svm\nfrom sklearn.externals import joblib\n\n\nclass InvalidModelException(Exception):\n pass\n\nclass EmotionsDetector:\n\n def __init__(self):\n self._clf = svm.SVC(kernel='rbf', gamma=0.001, C=10,\n decision_function_shape='ovr',\n probability=True, class_weight='balanced')\n self._emotions = OrderedDict([\n (0, 'neutral'), (1, 'happiness'), (2, 'sadness'),\n (3, 'anger'), (4, 'fear'), (5, 'surprise'),\n (6, 'disgust')\n ])\n modulePath = os.path.dirname(__file__)\n self._modelFile = os.path.abspath('{}/models/emotions_model.dat' \\\n .format(modulePath))\n if not os.path.isfile(self._modelFile):\n raise InvalidModelException('Could not find model file: {}' \\\n .format(self._modelFile))\n if not self.load():\n raise InvalidModelException('Could not load model from file: {}' \\\n .format(self._modelFile))\n\n def load(self):\n try:\n clf = joblib.load(self._modelFile)\n except:\n return False\n self._clf = clf\n return True\n\n def _relevantFeatures(self, gaborResponses, facialLandmarks):\n points = np.array(facialLandmarks)\n try:\n responses = gaborResponses[:, points[:, 1], points[:, 0]]\n except:\n w = gaborResponses.shape[2]\n h = gaborResponses.shape[1]\n responses = np.zeros((32, 68), dtype=float)\n for i in range(len(points)):\n x = points[i][0]\n y = points[i][1]\n if x < w and y < h:\n responses[:, i] = gaborResponses[:, y, x]\n else:\n responses[:, i] = 0.0\n\n featureVector = responses.reshape(-1).tolist()\n return featureVector\n\n \n def detect(self, face, gaborResponses):\n features = self._relevantFeatures(gaborResponses, face.landmarks)\n return self.predict(features)\n\n def predict(self, features):\n probas = self._clf.predict_proba([features])[0]\n ret = OrderedDict()\n for i in range(len(self._emotions)):\n label = self._emotions[i]\n ret[label] = probas[i]\n return ret" ]
[ [ "numpy.arange", "numpy.array" ], [ "numpy.array", "sklearn.externals.joblib.load", "numpy.zeros", "sklearn.svm.SVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mihir135/deep_learning_nanodegree
[ "018bf9228d72a8c0580eb82070223cf5225ffd4a" ]
[ "3. Generate TV scripts/helper.py" ]
[ "import os\nimport pickle\nimport torch\n\n\nSPECIAL_WORDS = {'PADDING': '<PAD>'}\n\n\ndef load_data(path):\n \"\"\"\n Load Dataset from File\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, \"r\") as f:\n data = f.read()\n\n return data\n\n\ndef preprocess_and_save_data(dataset_path, token_lookup, create_lookup_tables):\n \"\"\"\n Preprocess Text Data\n \"\"\"\n text = load_data(dataset_path)\n \n # Ignore notice, since we don't use it for analysing the data\n text = text[81:]\n\n token_dict = token_lookup()\n for key, token in token_dict.items():\n text = text.replace(key, ' {} '.format(token))\n\n text = text.lower()\n text = text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(text + list(SPECIAL_WORDS.values()))\n int_text = [vocab_to_int[word] for word in text]\n pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('preprocess.p', 'wb'))\n\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of <batch_size> or less\n \"\"\"\n return pickle.load(open('preprocess.p', mode='rb'))\n\n\ndef save_model(filename, decoder):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n torch.save(decoder, save_filename)\n\n\ndef load_model(filename):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n print(save_filename)\n return torch.load(save_filename)\n" ]
[ [ "torch.load", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HumanCompatibleAI/malmo
[ "147dad058c00574e57205833159decc91c8adfd1" ]
[ "MalmoEnv/malmoenv/core.py" ]
[ "# ------------------------------------------------------------------------------------------------\n# Copyright (c) 2018 Microsoft Corporation\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute,\n# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all copies or\n# substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# ------------------------------------------------------------------------------------------------\n\nfrom lxml import etree\nimport struct\nimport socket\nimport time\nimport random\nimport numpy as np\nfrom malmoenv import comms\nfrom malmoenv.commands import CommandParser\nimport uuid\nimport gym.spaces\nfrom malmoenv.comms import retry\nfrom malmoenv.version import malmo_version\n\n\nclass StringActionSpace(gym.spaces.Discrete):\n \"\"\"Malmo actions as their strings.\"\"\"\n def __init__(self):\n gym.spaces.Discrete.__init__(self, 1)\n\n def __getitem__(self, action):\n return action\n\n\nclass ActionSpace(gym.spaces.Discrete):\n \"\"\"Malmo actions as gym action space\"\"\"\n def __init__(self, actions):\n self.actions = actions\n gym.spaces.Discrete.__init__(self, len(self.actions))\n\n def sample(self):\n return random.randint(1, len(self.actions)) - 1\n\n def __getitem__(self, action):\n return self.actions[action]\n\n def __len__(self):\n return len(self.actions)\n\n\nclass VisualObservationSpace(gym.spaces.Box):\n \"\"\"Space for visual observations: width x height x depth as a flat array.\n Where depth is 3 or 4 if encoding scene depth.\n \"\"\"\n def __init__(self, width, height, depth):\n gym.spaces.Box.__init__(self,\n low=np.iinfo(np.int8).min, high=np.iinfo(np.int8).max,\n shape=(height, width, depth), dtype=np.int8)\n\n\nclass EnvException(Exception):\n def __init__(self, message):\n super(EnvException, self).__init__(message)\n\n\nclass MissionInitException(Exception):\n def __init__(self, message):\n super(MissionInitException, self).__init__(message)\n\n\nMAX_WAIT = 60 * 3\n\n\nclass Env:\n \"\"\"Malmo \"Env\" open ai gym compatible environment API\"\"\"\n def __init__(self):\n self.action_space = None\n self.observation_space = None\n self.xml = None\n self.integratedServerPort = 0\n self.role = 0\n self.agent_count = 0\n self.resets = 0\n self.ns = '{http://ProjectMalmo.microsoft.com}'\n self.client_socket = None\n self.server = 'localhost' # The mission server\n self.port = 9000 # The mission server port\n self.server2 = self.server # optional server for agent (role <> 0)\n self.port2 = self.port + self.role # optional server port for agent\n self.resync_period = 0\n self.turn_key = \"\"\n self.exp_uid = \"\"\n self.done = True\n self.synchronous = False\n self.step_options = None\n self.width = 0\n\n \n self.height = 0\n self.depth = 0\n\n def init(self, xml, port, server=None,\n server2=None, port2=None,\n role=0, exp_uid=None, episode=0,\n action_filter=None, resync=0, step_options=0, action_space=None, synchronous=False):\n \"\"\"\"Initialize a Malmo environment.\n xml - the mission xml.\n port - the MalmoEnv service's port.\n server - the MalmoEnv service address. Default is localhost.\n server2 - the MalmoEnv service address for given role if not 0.\n port2 - the MalmoEnv service port for given role if not 0.\n role - the agent role (0..N-1) for missions with N agents. Defaults to 0.\n exp_uid - the experiment's unique identifier. Generated if not given.\n episode - the \"reset\" start count for experiment re-starts. Defaults to 0.\n action_filter - an optional list of valid actions to filter by. Defaults to simple commands.\n step_options - encodes withTurnKey and withInfo in step messages. Defaults to info included,\n turn if required.\n \"\"\"\n self.synchronous = synchronous\n\n if action_filter is None:\n action_filter = {\"move\", \"turn\", \"use\", \"attack\"}\n\n if not xml.startswith('<Mission'):\n i = xml.index(\"<Mission\")\n if i == -1:\n raise EnvException(\"Mission xml must contain <Mission> tag.\")\n xml = xml[i:]\n\n self.xml = etree.fromstring(xml)\n self.role = role\n if exp_uid is None:\n self.exp_uid = str(uuid.uuid4())\n else:\n self.exp_uid = exp_uid\n\n command_parser = CommandParser(action_filter)\n commands = command_parser.get_commands_from_xml(self.xml, self.role)\n actions = command_parser.get_actions(commands)\n # print(\"role \" + str(self.role) + \" actions \" + str(actions)\n\n if action_space:\n self.action_space = action_space\n else:\n self.action_space = ActionSpace(actions)\n\n self.port = port\n if server is not None:\n self.server = server\n if server2 is not None:\n self.server2 = server2\n else:\n self.server2 = self.server\n if port2 is not None:\n self.port2 = port2\n else:\n self.port2 = self.port + self.role\n\n self.agent_count = len(self.xml.findall(self.ns + 'AgentSection'))\n turn_based = self.xml.find('.//' + self.ns + 'TurnBasedCommands') is not None\n if turn_based:\n self.turn_key = 'AKWozEre'\n else:\n self.turn_key = \"\"\n if step_options is None:\n self.step_options = 0 if not turn_based else 2\n else:\n self.step_options = step_options\n self.done = True\n # print(\"agent count \" + str(self.agent_count) + \" turn based \" + turn_based)\n self.resync_period = resync\n self.resets = episode\n\n e = etree.fromstring(\"\"\"<MissionInit xmlns=\"http://ProjectMalmo.microsoft.com\" \n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \n SchemaVersion=\"\" PlatformVersion=\"\"\" + '\\\"' + malmo_version + '\\\"' +\n \"\"\">\n <ExperimentUID></ExperimentUID>\n <ClientRole>0</ClientRole>\n <ClientAgentConnection>\n <ClientIPAddress>127.0.0.1</ClientIPAddress>\n <ClientMissionControlPort>0</ClientMissionControlPort>\n <ClientCommandsPort>0</ClientCommandsPort>\n <AgentIPAddress>127.0.0.1</AgentIPAddress>\n <AgentMissionControlPort>0</AgentMissionControlPort>\n <AgentVideoPort>0</AgentVideoPort>\n <AgentDepthPort>0</AgentDepthPort>\n <AgentLuminancePort>0</AgentLuminancePort>\n <AgentObservationsPort>0</AgentObservationsPort>\n <AgentRewardsPort>0</AgentRewardsPort>\n <AgentColourMapPort>0</AgentColourMapPort>\n </ClientAgentConnection>\n </MissionInit>\"\"\")\n e.insert(0, self.xml)\n self.xml = e\n self.xml.find(self.ns + 'ClientRole').text = str(self.role)\n self.xml.find(self.ns + 'ExperimentUID').text = self.exp_uid\n if self.role != 0 and self.agent_count > 1:\n e = etree.Element(self.ns + 'MinecraftServerConnection',\n attrib={'address': self.server,\n 'port': str(0)\n })\n self.xml.insert(2, e)\n\n video_producers = self.xml.findall('.//' + self.ns + 'VideoProducer')\n assert len(video_producers) == self.agent_count\n video_producer = video_producers[self.role]\n self.width = int(video_producer.find(self.ns + 'Width').text)\n self.height = int(video_producer.find(self.ns + 'Height').text)\n want_depth = video_producer.attrib[\"want_depth\"]\n self.depth = 4 if want_depth is not None and (want_depth == \"true\" or want_depth == \"1\") else 3\n # print(str(self.width) + \"x\" + str(self.height) + \"x\" + str(self.depth))\n self.observation_space = VisualObservationSpace(self.width, self.height, self.depth)\n # print(etree.tostring(self.xml))\n\n @staticmethod\n def _hello(sock):\n comms.send_message(sock, (\"<MalmoEnv\" + malmo_version + \"/>\").encode())\n\n def reset(self):\n \"\"\"gym api reset\"\"\"\n\n if self.resync_period > 0 and (self.resets + 1) % self.resync_period == 0:\n self.exit_resync()\n\n while not self.done:\n self.done = self._quit_episode()\n if not self.done:\n time.sleep(0.1)\n\n return self._start_up()\n\n @retry\n def _start_up(self):\n self.resets += 1\n if self.role != 0:\n self._find_server()\n if not self.client_socket:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n # print(\"connect \" + self.server2 + \":\" + str(self.port2))\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n self.client_socket = sock # Now retries will use connected socket.\n self._init_mission()\n self.done = False\n return self._peek_obs()\n\n def _peek_obs(self):\n obs = None\n start_time = time.time()\n while not self.done and (obs is None or len(obs) == 0):\n peek_message = \"<Peek/>\"\n comms.send_message(self.client_socket, peek_message.encode())\n obs = comms.recv_message(self.client_socket)\n info = comms.recv_message(self.client_socket).decode('utf-8')\n reply = comms.recv_message(self.client_socket)\n done, = struct.unpack('!b', reply)\n self.done = done == 1\n if obs is None or len(obs) == 0:\n if time.time() - start_time > MAX_WAIT:\n self.client_socket.close()\n self.client_socket = None\n raise MissionInitException('too long waiting for first observation')\n time.sleep(0.1)\n\n obs = np.frombuffer(obs, dtype=np.uint8)\n\n if obs is None or len(obs) == 0:\n obs = np.zeros((self.height, self.width, self.depth), dtype=np.int8)\n return obs, info\n\n def _quit_episode(self):\n comms.send_message(self.client_socket, \"<Quit/>\".encode())\n reply = comms.recv_message(self.client_socket)\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def render(self):\n \"\"\"gym api render\"\"\"\n pass\n\n def seed(self):\n pass\n\n def step(self, action):\n \"\"\"gym api step\"\"\"\n obs = None\n reward = None\n info = None\n turn = True\n withturnkey = self.step_options < 2\n print(withturnkey)\n withinfo = self.step_options == 0 or self.step_options == 2\n\n while not self.done and \\\n ((obs is None or len(obs) == 0) or\n (withinfo and info is None) or turn):\n step_message = \"<Step\" + str(self.step_options) + \">\" + \\\n self.action_space[action] + \\\n \"</Step\" + str(self.step_options) + \" >\"\n t0 = time.time()\n comms.send_message(self.client_socket, step_message.encode())\n print(\"send action {}\".format(time.time() - t0)); t0 = time.time()\n if withturnkey:\n comms.send_message(self.client_socket, self.turn_key.encode())\n obs = comms.recv_message(self.client_socket)\n\n reply = comms.recv_message(self.client_socket)\n reward, done, sent = struct.unpack('!dbb', reply)\n print(\"recieve reward {}\".format(time.time() - t0)); t0 = time.time()\n self.done = done == 1\n if withinfo:\n info = comms.recv_message(self.client_socket).decode('utf-8')\n\n turn_key = comms.recv_message(self.client_socket).decode('utf-8') if withturnkey else \"\"\n # print(\"[\" + str(self.role) + \"] TK \" + turn_key + \" self.TK \" + str(self.turn_key))\n if turn_key != \"\":\n if sent != 0:\n turn = False\n # Done turns if: turn = self.turn_key == turn_key\n self.turn_key = turn_key\n else:\n turn = sent == 0\n\n # if (obs is None or len(obs) == 0) or turn:\n # time.sleep(0.1)\n print(\"turnkeyprocessor {}\".format(time.time() - t0)); t0 = time.time()\n obs = np.frombuffer(obs, dtype=np.uint8)\n print(\"creating obs from buffer {}\".format(time.time() - t0)); t0 = time.time()\n return obs, reward, self.done, info\n\n def close(self):\n \"\"\"gym api close\"\"\"\n try:\n # Purge last token from head node with <Close> message.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Close>\" + self._get_token() + \"</Close>\").encode())\n reply = comms.recv_message(sock)\n ok, = struct.unpack('!I', reply)\n assert ok\n sock.close()\n except Exception as e:\n self._log_error(e)\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n\n def reinit(self):\n \"\"\"Use carefully to reset the episode count to 0.\"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Init>\" + self._get_token() + \"</Init>\").encode())\n reply = comms.recv_message(sock)\n sock.close()\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def status(self, head):\n \"\"\"Get status from server.\n head - Ping the the head node if True.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if head:\n sock.connect((self.server, self.port))\n else:\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n\n comms.send_message(sock, \"<Status/>\".encode())\n status = comms.recv_message(sock).decode('utf-8')\n sock.close()\n return status\n\n def exit(self):\n \"\"\"Use carefully to cause the Minecraft service to exit (and hopefully restart).\n Likely to throw communication errors so wrap in exception handler.\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server2, self.port2))\n self._hello(sock)\n\n comms.send_message(sock, (\"<Exit>\" + self._get_token() + \"</Exit>\").encode())\n reply = comms.recv_message(sock)\n sock.close()\n ok, = struct.unpack('!I', reply)\n return ok != 0\n\n def resync(self):\n \"\"\"make sure we can ping the head and assigned node.\n Possibly after an env.exit()\"\"\"\n success = 0\n for head in [True, False]:\n for _ in range(30):\n try:\n self.status(head)\n success += 1\n break\n except Exception as e:\n self._log_error(e)\n time.sleep(10)\n\n if success != 2:\n raise EnvException(\"Failed to contact service\" + (\" head\" if success == 0 else \"\"))\n\n def exit_resync(self):\n \"\"\"Exit the current Minecraft and wait for new one to replace it.\"\"\"\n print(\"********** exit & resync **********\")\n try:\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n try:\n self.exit()\n except Exception as e:\n self._log_error(e)\n print(\"Pause for exit(s) ...\")\n time.sleep(60)\n except (socket.error, ConnectionError):\n pass\n self.resync()\n\n def _log_error(self, exn):\n pass # Keeping pylint happy\n\n def _find_server(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.server, self.port))\n self._hello(sock)\n\n start_time = time.time()\n port = 0\n while port == 0:\n comms.send_message(sock, (\"<Find>\" + self._get_token() + \"</Find>\").encode())\n reply = comms.recv_message(sock)\n port, = struct.unpack('!I', reply)\n if port == 0:\n if time.time() - start_time > MAX_WAIT:\n if self.client_socket:\n self.client_socket.close()\n self.client_socket = None\n raise MissionInitException('too long finding mission to join')\n time.sleep(1)\n sock.close()\n # print(\"Found mission integrated server port \" + str(port))\n self.integratedServerPort = port\n e = self.xml.find(self.ns + 'MinecraftServerConnection')\n if e is not None:\n e.attrib['port'] = str(self.integratedServerPort)\n\n def _init_mission(self):\n ok = 0\n while ok != 1:\n xml = etree.tostring(self.xml)\n # syncticking always ;))))))))))))))))))))))))))))))))))))))))))))))))))))\n token = (self._get_token() + \":\" + str(self.agent_count) + \":\" + str(self.synchronous).lower()).encode()\n # print(xml.decode())\n comms.send_message(self.client_socket, xml)\n comms.send_message(self.client_socket, token)\n\n reply = comms.recv_message(self.client_socket)\n ok, = struct.unpack('!I', reply)\n self.turn_key = comms.recv_message(self.client_socket).decode('utf-8')\n if ok != 1:\n time.sleep(1)\n\n def _get_token(self):\n return self.exp_uid + \":\" + str(self.role) + \":\" + str(self.resets)\n\n\ndef make():\n return Env()\n" ]
[ [ "numpy.frombuffer", "numpy.zeros", "numpy.iinfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ThompsonJ314/openmc
[ "173c85c2cbb1784e49edf51d2d379a0f981de4e3" ]
[ "openmc/filter.py" ]
[ "from abc import ABCMeta\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\nimport hashlib\nfrom itertools import product\nfrom numbers import Real, Integral\nfrom xml.etree import ElementTree as ET\n\nimport numpy as np\nimport pandas as pd\n\nimport openmc\nimport openmc.checkvalue as cv\nfrom .cell import Cell\nfrom .material import Material\nfrom .mixin import IDManagerMixin\nfrom .surface import Surface\nfrom .universe import Universe\n\n\n_FILTER_TYPES = (\n 'universe', 'material', 'cell', 'cellborn', 'surface', 'mesh', 'energy',\n 'energyout', 'mu', 'polar', 'azimuthal', 'distribcell', 'delayedgroup',\n 'energyfunction', 'cellfrom', 'legendre', 'spatiallegendre',\n 'sphericalharmonics', 'zernike', 'zernikeradial', 'particle', 'cellinstance'\n)\n\n_CURRENT_NAMES = (\n 'x-min out', 'x-min in', 'x-max out', 'x-max in',\n 'y-min out', 'y-min in', 'y-max out', 'y-max in',\n 'z-min out', 'z-min in', 'z-max out', 'z-max in'\n)\n\n_PARTICLES = {'neutron', 'photon', 'electron', 'positron'}\n\n\nclass FilterMeta(ABCMeta):\n \"\"\"Metaclass for filters that ensures class names are appropriate.\"\"\"\n\n def __new__(cls, name, bases, namespace, **kwargs):\n # Check the class name.\n required_suffix = 'Filter'\n if not name.endswith(required_suffix):\n raise ValueError(\"All filter class names must end with 'Filter'\")\n\n # Create a 'short_name' attribute that removes the 'Filter' suffix.\n namespace['short_name'] = name[:-len(required_suffix)]\n\n # Subclass methods can sort of inherit the docstring of parent class\n # methods. If a function is defined without a docstring, most (all?)\n # Python interpreters will search through the parent classes to see if\n # there is a docstring for a function with the same name, and they will\n # use that docstring. However, Sphinx does not have that functionality.\n # This chunk of code handles this docstring inheritance manually so that\n # the autodocumentation will pick it up.\n if name != required_suffix:\n # Look for newly-defined functions that were also in Filter.\n for func_name in namespace:\n if func_name in Filter.__dict__:\n # Inherit the docstring from Filter if not defined.\n if isinstance(namespace[func_name],\n (classmethod, staticmethod)):\n new_doc = namespace[func_name].__func__.__doc__\n old_doc = Filter.__dict__[func_name].__func__.__doc__\n if new_doc is None and old_doc is not None:\n namespace[func_name].__func__.__doc__ = old_doc\n else:\n new_doc = namespace[func_name].__doc__\n old_doc = Filter.__dict__[func_name].__doc__\n if new_doc is None and old_doc is not None:\n namespace[func_name].__doc__ = old_doc\n\n # Make the class.\n return super().__new__(cls, name, bases, namespace, **kwargs)\n\n\ndef _repeat_and_tile(bins, repeat_factor, data_size):\n filter_bins = np.repeat(bins, repeat_factor)\n tile_factor = data_size // len(filter_bins)\n return np.tile(filter_bins, tile_factor)\n\n\nclass Filter(IDManagerMixin, metaclass=FilterMeta):\n \"\"\"Tally modifier that describes phase-space and other characteristics.\n\n Parameters\n ----------\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter. This takes on different meaning for different\n filters. See the docstrings for sublcasses of this filter or the online\n documentation for more details.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral or Iterable of Integral or Iterable of Real\n The bins for the filter\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n next_id = 1\n used_ids = set()\n\n def __init__(self, bins, filter_id=None):\n self.bins = bins\n self.id = filter_id\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif len(self.bins) != len(other.bins):\n return False\n else:\n return np.allclose(self.bins, other.bins)\n\n def __gt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta > 0\n else:\n return False\n else:\n return max(self.bins) > max(other.bins)\n\n def __lt__(self, other):\n return not self > other\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tBins', self.bins)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tBins', self.bins)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def _recursive_subclasses(cls):\n \"\"\"Return all subclasses and their subclasses, etc.\"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass._recursive_subclasses())\n\n return all_subclasses\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n \"\"\"Construct a new Filter instance from HDF5 data.\n\n Parameters\n ----------\n group : h5py.Group\n HDF5 group to read from\n\n Keyword arguments\n -----------------\n meshes : dict\n Dictionary mapping integer IDs to openmc.MeshBase objects. Only\n used for openmc.MeshFilter objects.\n\n \"\"\"\n\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n # If the HDF5 'type' variable matches this class's short_name, then\n # there is no overriden from_hdf5 method. Pass the bins to __init__.\n if group['type'][()].decode() == cls.short_name.lower():\n out = cls(group['bins'][()], filter_id=filter_id)\n out._num_bins = group['n_bins'][()]\n return out\n\n # Search through all subclasses and find the one matching the HDF5\n # 'type'. Call that class's from_hdf5 method.\n for subclass in cls._recursive_subclasses():\n if group['type'][()].decode() == subclass.short_name.lower():\n return subclass.from_hdf5(group, **kwargs)\n\n raise ValueError(\"Unrecognized Filter class: '\"\n + group['type'][()].decode() + \"'\")\n\n @property\n def bins(self):\n return self._bins\n\n @bins.setter\n def bins(self, bins):\n self.check_bins(bins)\n self._bins = bins\n\n @property\n def num_bins(self):\n return len(self.bins)\n\n def check_bins(self, bins):\n \"\"\"Make sure given bins are valid for this filter.\n\n Raises\n ------\n TypeError\n ValueError\n\n \"\"\"\n\n pass\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'bins')\n subelement.text = ' '.join(str(b) for b in self.bins)\n\n return element\n\n def can_merge(self, other):\n \"\"\"Determine if filter can be merged with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to compare with\n\n Returns\n -------\n bool\n Whether the filter can be merged\n\n \"\"\"\n return type(self) is type(other)\n\n def merge(self, other):\n \"\"\"Merge this filter with another.\n\n Parameters\n ----------\n other : openmc.Filter\n Filter to merge with\n\n Returns\n -------\n merged_filter : openmc.Filter\n Filter resulting from the merge\n\n \"\"\"\n\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" '.format(\n type(self), type(other))\n raise ValueError(msg)\n\n # Merge unique filter bins\n merged_bins = np.concatenate((self.bins, other.bins))\n merged_bins = np.unique(merged_bins, axis=0)\n\n # Create a new filter with these bins and a new auto-generated ID\n return type(self)(merged_bins)\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if type(self) is not type(other):\n return False\n\n for b in other.bins:\n if b not in self.bins:\n return False\n\n return True\n\n def get_bin_index(self, filter_bin):\n \"\"\"Returns the index in the Filter for some bin.\n\n Parameters\n ----------\n filter_bin : int or tuple\n The bin is the integer ID for 'material', 'surface', 'cell',\n 'cellborn', and 'universe' Filters. The bin is an integer for the\n cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of\n floats for 'energy' and 'energyout' filters corresponding to the\n energy boundaries of the bin of interest. The bin is an (x,y,z)\n 3-tuple for 'mesh' filters corresponding to the mesh cell of\n interest.\n\n Returns\n -------\n filter_index : int\n The index in the Tally data array for this filter bin.\n\n \"\"\"\n\n if filter_bin not in self.bins:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n if isinstance(self.bins, np.ndarray):\n return np.where(self.bins == filter_bin)[0][0]\n else:\n return self.bins.index(filter_bin)\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Keyword arguments\n -----------------\n paths : bool\n Only used for DistribcellFilter. If True (default), expand\n distribcell indices into multi-index columns describing the path\n to that distribcell through the CSG tree. NOTE: This option assumes\n that all distribcell paths are of the same length and do not have\n the same universes and cells but different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns of strings that characterize the\n filter's bins. The number of rows in the DataFrame is the same as\n the total number of bins in the corresponding tally, with the filter\n bin appropriately tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n filter_bins = np.repeat(self.bins, stride)\n tile_factor = data_size // len(filter_bins)\n filter_bins = np.tile(filter_bins, tile_factor)\n df = pd.concat([df, pd.DataFrame(\n {self.short_name.lower(): filter_bins})])\n\n return df\n\n\nclass WithIDFilter(Filter):\n \"\"\"Abstract parent for filters of types with IDs (Cell, Material, etc.).\"\"\"\n def __init__(self, bins, filter_id=None):\n bins = np.atleast_1d(bins)\n\n # Make sure bins are either integers or appropriate objects\n cv.check_iterable_type('filter bins', bins,\n (Integral, self.expected_type))\n\n # Extract ID values\n bins = np.array([b if isinstance(b, Integral) else b.id\n for b in bins])\n super().__init__(bins, filter_id)\n\n def check_bins(self, bins):\n # Check the bin values.\n for edge in bins:\n cv.check_greater_than('filter bin', edge, 0, equality=True)\n\n\nclass UniverseFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Universe they occured in.\n\n Parameters\n ----------\n bins : openmc.Universe, int, or iterable thereof\n The Universes to tally. Either openmc.Universe objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Universe IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Universe\n\n\nclass MaterialFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Material they occured in.\n\n Parameters\n ----------\n bins : openmc.Material, Integral, or iterable thereof\n The Materials to tally. Either openmc.Material objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Material IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Material\n\n\nclass CellFilter(WithIDFilter):\n \"\"\"Bins tally event locations based on the Cell they occured in.\n\n Parameters\n ----------\n bins : openmc.Cell, int, or iterable thereof\n The cells to tally. Either openmc.Cell objects or their ID numbers can\n be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellFromFilter(WithIDFilter):\n \"\"\"Bins tally on which Cell the neutron came from.\n\n Parameters\n ----------\n bins : openmc.Cell, Integral, or iterable thereof\n The Cell(s) to tally. Either openmc.Cell objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral or Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellbornFilter(WithIDFilter):\n \"\"\"Bins tally events based on which Cell the neutron was born in.\n\n Parameters\n ----------\n bins : openmc.Cell, Integral, or iterable thereof\n The birth Cells to tally. Either openmc.Cell objects or their\n Integral ID numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n openmc.Cell IDs.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Cell\n\n\nclass CellInstanceFilter(Filter):\n \"\"\"Bins tally events based on which cell instance a particle is in.\n\n This filter is similar to :class:`DistribcellFilter` but allows one to\n select particular instances to be tallied (instead of obtaining *all*\n instances by default) and allows instances from different cells to be\n specified in a single filter.\n\n .. versionadded:: 0.12\n\n Parameters\n ----------\n bins : iterable of 2-tuples or numpy.ndarray\n The cell instances to tally, given as 2-tuples. For the first value in\n the tuple, either openmc.Cell objects or their integral ID numbers can\n be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : numpy.ndarray\n 2D numpy array of cell IDs and instances\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n See Also\n --------\n DistribcellFilter\n\n \"\"\"\n def __init__(self, bins, filter_id=None):\n self.bins = bins\n self.id = filter_id\n\n @Filter.bins.setter\n def bins(self, bins):\n pairs = np.empty((len(bins), 2), dtype=int)\n for i, (cell, instance) in enumerate(bins):\n cv.check_type('cell', cell, (openmc.Cell, Integral))\n cv.check_type('instance', instance, Integral)\n pairs[i, 0] = cell if isinstance(cell, Integral) else cell.id\n pairs[i, 1] = instance\n self._bins = pairs\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with a multi-index column for the cell instance.\n The number of rows in the DataFrame is the same as the total number\n of bins in the corresponding tally, with the filter bin appropriately\n tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Repeat and tile bins as necessary to account for other filters.\n bins = np.repeat(self.bins, stride, axis=0)\n tile_factor = data_size // len(bins)\n bins = np.tile(bins, (tile_factor, 1))\n\n columns = pd.MultiIndex.from_product([[self.short_name.lower()],\n ['cell', 'instance']])\n return pd.DataFrame(bins, columns=columns)\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'bins')\n subelement.text = ' '.join(str(i) for i in self.bins.ravel())\n return element\n\n\nclass SurfaceFilter(WithIDFilter):\n \"\"\"Filters particles by surface crossing\n\n Parameters\n ----------\n bins : openmc.Surface, int, or iterable of Integral\n The surfaces to tally over. Either openmc.Surface objects or their ID\n numbers can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n The surfaces to tally over. Either openmc.Surface objects or their ID\n numbers can be used.\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n expected_type = Surface\n\n\nclass ParticleFilter(Filter):\n \"\"\"Bins tally events based on the Particle type.\n\n Parameters\n ----------\n bins : str, or iterable of str\n The particles to tally represented as strings ('neutron', 'photon',\n 'electron', 'positron').\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n The Particles to tally\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif len(self.bins) != len(other.bins):\n return False\n else:\n return np.all(self.bins == other.bins)\n\n __hash__ = Filter.__hash__\n\n @Filter.bins.setter\n def bins(self, bins):\n bins = np.atleast_1d(bins)\n cv.check_iterable_type('filter bins', bins, str)\n for edge in bins:\n cv.check_value('filter bin', edge, _PARTICLES)\n self._bins = bins\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n particles = [b.decode() for b in group['bins'][()]]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n return cls(particles, filter_id=filter_id)\n\n\nclass MeshFilter(Filter):\n \"\"\"Bins tally event locations onto a regular, rectangular mesh.\n\n Parameters\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n id : int\n Unique identifier for the filter\n bins : list of tuple\n A list of mesh indices for each filter bin, e.g. [(1, 1, 1), (2, 1, 1),\n ...]\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n def __init__(self, mesh, filter_id=None):\n self.mesh = mesh\n self.id = filter_id\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tMesh ID', self.mesh.id)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tMesh ID', self.mesh.id)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n if 'meshes' not in kwargs:\n raise ValueError(cls.__name__ + \" requires a 'meshes' keyword \"\n \"argument.\")\n\n mesh_id = group['bins'][()]\n mesh_obj = kwargs['meshes'][mesh_id]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n out = cls(mesh_obj, filter_id=filter_id)\n\n return out\n\n @property\n def mesh(self):\n return self._mesh\n\n @mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, openmc.MeshBase)\n self._mesh = mesh\n if isinstance(mesh, openmc.UnstructuredMesh):\n self.bins = list(range(len(mesh.volumes)))\n else:\n self.bins = list(mesh.indices)\n\n def can_merge(self, other):\n # Mesh filters cannot have more than one bin\n return False\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with three columns describing the x,y,z mesh\n cell indices corresponding to each filter bin. The number of rows\n in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append mesh ID as outermost index of multi-index\n mesh_key = 'mesh {}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n n_dim = len(self.mesh.dimension)\n if n_dim == 3:\n nx, ny, nz = self.mesh.dimension\n elif n_dim == 2:\n nx, ny = self.mesh.dimension\n nz = 1\n else:\n nx = self.mesh.dimension\n ny = nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_dict[mesh_key, 'x'] = _repeat_and_tile(\n np.arange(1, nx + 1), stride, data_size)\n\n # Generate multi-index sub-column for y-axis\n filter_dict[mesh_key, 'y'] = _repeat_and_tile(\n np.arange(1, ny + 1), nx * stride, data_size)\n\n # Generate multi-index sub-column for z-axis\n filter_dict[mesh_key, 'z'] = _repeat_and_tile(\n np.arange(1, nz + 1), nx * ny * stride, data_size)\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n df = pd.concat([df, pd.DataFrame(filter_dict)])\n\n return df\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = super().to_xml_element()\n element[0].text = str(self.mesh.id)\n return element\n\n\nclass MeshSurfaceFilter(MeshFilter):\n \"\"\"Filter events by surface crossings on a regular, rectangular mesh.\n\n Parameters\n ----------\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Integral\n The mesh ID\n mesh : openmc.MeshBase\n The mesh object that events will be tallied onto\n id : int\n Unique identifier for the filter\n bins : list of tuple\n\n A list of mesh indices / surfaces for each filter bin, e.g. [(1, 1,\n 'x-min out'), (1, 1, 'x-min in'), ...]\n\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n\n @MeshFilter.mesh.setter\n def mesh(self, mesh):\n cv.check_type('filter mesh', mesh, openmc.MeshBase)\n self._mesh = mesh\n\n # Take the product of mesh indices and current names\n n_dim = mesh.n_dimension\n self.bins = [mesh_tuple + (surf,) for mesh_tuple, surf in\n product(mesh.indices, _CURRENT_NAMES[:4*n_dim])]\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with three columns describing the x,y,z mesh\n cell indices corresponding to each filter bin. The number of rows\n in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Initialize dictionary to build Pandas Multi-index column\n filter_dict = {}\n\n # Append mesh ID as outermost index of multi-index\n mesh_key = 'mesh {}'.format(self.mesh.id)\n\n # Find mesh dimensions - use 3D indices for simplicity\n n_surfs = 4 * len(self.mesh.dimension)\n if len(self.mesh.dimension) == 3:\n nx, ny, nz = self.mesh.dimension\n elif len(self.mesh.dimension) == 2:\n nx, ny = self.mesh.dimension\n nz = 1\n else:\n nx = self.mesh.dimension\n ny = nz = 1\n\n # Generate multi-index sub-column for x-axis\n filter_dict[mesh_key, 'x'] = _repeat_and_tile(\n np.arange(1, nx + 1), n_surfs * stride, data_size)\n\n # Generate multi-index sub-column for y-axis\n if len(self.mesh.dimension) > 1:\n filter_dict[mesh_key, 'y'] = _repeat_and_tile(\n np.arange(1, ny + 1), n_surfs * nx * stride, data_size)\n\n # Generate multi-index sub-column for z-axis\n if len(self.mesh.dimension) > 2:\n filter_dict[mesh_key, 'z'] = _repeat_and_tile(\n np.arange(1, nz + 1), n_surfs * nx * ny * stride, data_size)\n\n # Generate multi-index sub-column for surface\n filter_dict[mesh_key, 'surf'] = _repeat_and_tile(\n _CURRENT_NAMES[:n_surfs], stride, data_size)\n\n # Initialize a Pandas DataFrame from the mesh dictionary\n return pd.concat([df, pd.DataFrame(filter_dict)])\n\n\nclass RealFilter(Filter):\n \"\"\"Tally modifier that describes phase-space and other characteristics\n\n Parameters\n ----------\n values : iterable of float\n A list of values for which each successive pair constitutes a range of\n values for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n values for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of values indicating a\n filter bin range\n num_bins : int\n The number of filter bins\n\n \"\"\"\n def __init__(self, values, filter_id=None):\n self.values = np.asarray(values)\n self.bins = np.vstack((self.values[:-1], self.values[1:])).T\n self.id = filter_id\n\n def __gt__(self, other):\n if type(self) is type(other):\n # Compare largest/smallest bin edges in filters\n # This logic is used when merging tallies with real filters\n return self.values[0] >= other.values[-1]\n else:\n return super().__gt__(other)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tValues', self.values)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @Filter.bins.setter\n def bins(self, bins):\n Filter.bins.__set__(self, np.asarray(bins))\n\n def check_bins(self, bins):\n for v0, v1 in bins:\n # Values should be real\n cv.check_type('filter value', v0, Real)\n cv.check_type('filter value', v1, Real)\n\n # Make sure that each tuple has values that are increasing\n if v1 < v0:\n raise ValueError('Values {} and {} appear to be out of order'\n .format(v0, v1))\n\n for pair0, pair1 in zip(bins[:-1], bins[1:]):\n # Successive pairs should be ordered\n if pair1[1] < pair0[1]:\n raise ValueError('Values {} and {} appear to be out of order'\n .format(pair1[1], pair0[1]))\n\n def can_merge(self, other):\n if type(self) is not type(other):\n return False\n\n if self.bins[0, 0] == other.bins[-1][1]:\n # This low edge coincides with other's high edge\n return True\n elif self.bins[-1][1] == other.bins[0, 0]:\n # This high edge coincides with other's low edge\n return True\n else:\n return False\n\n def merge(self, other):\n if not self.can_merge(other):\n msg = 'Unable to merge \"{0}\" with \"{1}\" ' \\\n 'filters'.format(type(self), type(other))\n raise ValueError(msg)\n\n # Merge unique filter bins\n merged_values = np.concatenate((self.values, other.values))\n merged_values = np.unique(merged_values)\n\n # Create a new filter with these bins and a new auto-generated ID\n return type(self)(sorted(merged_values))\n\n def is_subset(self, other):\n \"\"\"Determine if another filter is a subset of this filter.\n\n If all of the bins in the other filter are included as bins in this\n filter, then it is a subset of this filter.\n\n Parameters\n ----------\n other : openmc.Filter\n The filter to query as a subset of this filter\n\n Returns\n -------\n bool\n Whether or not the other filter is a subset of this filter\n\n \"\"\"\n\n if type(self) is not type(other):\n return False\n elif self.num_bins != other.num_bins:\n return False\n else:\n return np.allclose(self.values, other.values)\n\n def get_bin_index(self, filter_bin):\n i = np.where(self.bins[:, 1] == filter_bin[1])[0]\n if len(i) == 0:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n else:\n return i[0]\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with one column of the lower energy bound and one\n column of upper energy bound for each filter bin. The number of\n rows in the DataFrame is the same as the total number of bins in the\n corresponding tally, with the filter bin appropriately tiled to map\n to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n # Extract the lower and upper energy bounds, then repeat and tile\n # them as necessary to account for other filters.\n lo_bins = np.repeat(self.bins[:, 0], stride)\n hi_bins = np.repeat(self.bins[:, 1], stride)\n tile_factor = data_size // len(lo_bins)\n lo_bins = np.tile(lo_bins, tile_factor)\n hi_bins = np.tile(hi_bins, tile_factor)\n\n # Add the new energy columns to the DataFrame.\n if hasattr(self, 'units'):\n units = ' [{}]'.format(self.units)\n else:\n units = ''\n\n df.loc[:, self.short_name.lower() + ' low' + units] = lo_bins\n df.loc[:, self.short_name.lower() + ' high' + units] = hi_bins\n\n return df\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = super().to_xml_element()\n element[0].text = ' '.join(str(x) for x in self.values)\n return element\n\n\nclass EnergyFilter(RealFilter):\n \"\"\"Bins tally events based on incident particle energy.\n\n Parameters\n ----------\n values : Iterable of Real\n A list of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of energies in [eV]\n for a single filter bin\n num_bins : int\n The number of filter bins\n\n \"\"\"\n units = 'eV'\n\n def get_bin_index(self, filter_bin):\n # Use lower energy bound to find index for RealFilters\n deltas = np.abs(self.bins[:, 1] - filter_bin[1]) / filter_bin[1]\n min_delta = np.min(deltas)\n if min_delta < 1E-3:\n return deltas.argmin()\n else:\n msg = 'Unable to get the bin index for Filter since \"{0}\" ' \\\n 'is not one of the bins'.format(filter_bin)\n raise ValueError(msg)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for v0, v1 in bins:\n cv.check_greater_than('filter value', v0, 0., equality=True)\n cv.check_greater_than('filter value', v1, 0., equality=True)\n\n\nclass EnergyoutFilter(EnergyFilter):\n \"\"\"Bins tally events based on outgoing particle energy.\n\n Parameters\n ----------\n values : Iterable of Real\n A list of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n energies in [eV] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of energies in [eV]\n for a single filter bin\n num_bins : int\n The number of filter bins\n\n \"\"\"\n\n\ndef _path_to_levels(path):\n \"\"\"Convert distribcell path to list of levels\n\n Parameters\n ----------\n path : str\n Distribcell path\n\n Returns\n -------\n list\n List of levels in path\n\n \"\"\"\n # Split path into universes/cells/lattices\n path_items = path.split('->')\n\n # Pair together universe and cell information from the same level\n idx = [i for i, item in enumerate(path_items) if item.startswith('u')]\n for i in reversed(idx):\n univ_id = int(path_items.pop(i)[1:])\n cell_id = int(path_items.pop(i)[1:])\n path_items.insert(i, ('universe', univ_id, cell_id))\n\n # Reformat lattice into tuple\n idx = [i for i, item in enumerate(path_items) if isinstance(item, str)]\n for i in idx:\n item = path_items.pop(i)[1:-1]\n lat_id, lat_xyz = item.split('(')\n lat_id = int(lat_id)\n lat_xyz = tuple(int(x) for x in lat_xyz.split(','))\n path_items.insert(i, ('lattice', lat_id, lat_xyz))\n\n return path_items\n\n\nclass DistribcellFilter(Filter):\n \"\"\"Bins tally event locations on instances of repeated cells.\n\n This filter provides a separate score for each unique instance of a repeated\n cell in a geometry. Note that only one cell can be specified in this filter.\n The related :class:`CellInstanceFilter` allows one to obtain scores for\n particular cell instances as well as instances from different cells.\n\n Parameters\n ----------\n cell : openmc.Cell or Integral\n The distributed cell to tally. Either an openmc.Cell or an Integral\n cell ID number can be used.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : Iterable of Integral\n An iterable with one element---the ID of the distributed Cell.\n id : int\n Unique identifier for the filter\n num_bins : int\n The number of filter bins\n paths : list of str\n The paths traversed through the CSG tree to reach each distribcell\n instance (for 'distribcell' filters only)\n\n See Also\n --------\n CellInstanceFilter\n\n \"\"\"\n\n def __init__(self, cell, filter_id=None):\n self._paths = None\n super().__init__(cell, filter_id)\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n out = cls(group['bins'][()], filter_id=filter_id)\n out._num_bins = group['n_bins'][()]\n\n return out\n\n @property\n def num_bins(self):\n # Need to handle number of bins carefully -- for distribcell tallies, we\n # need to know how many instances of the cell there are\n return self._num_bins\n\n @property\n def paths(self):\n return self._paths\n\n @Filter.bins.setter\n def bins(self, bins):\n # Format the bins as a 1D numpy array.\n bins = np.atleast_1d(bins)\n\n # Make sure there is only 1 bin.\n if not len(bins) == 1:\n msg = 'Unable to add bins \"{0}\" to a DistribcellFilter since ' \\\n 'only a single distribcell can be used per tally'.format(bins)\n raise ValueError(msg)\n\n # Check the type and extract the id, if necessary.\n cv.check_type('distribcell bin', bins[0], (Integral, openmc.Cell))\n if isinstance(bins[0], openmc.Cell):\n bins = np.atleast_1d(bins[0].id)\n\n self._bins = bins\n\n @paths.setter\n def paths(self, paths):\n cv.check_iterable_type('paths', paths, str)\n self._paths = paths\n\n def can_merge(self, other):\n # Distribcell filters cannot have more than one bin\n return False\n\n def get_bin_index(self, filter_bin):\n # Filter bins for distribcells are indices of each unique placement of\n # the Cell in the Geometry (consecutive integers starting at 0).\n return filter_bin\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Keyword arguments\n -----------------\n paths : bool\n If True (default), expand distribcell indices into multi-index\n columns describing the path to that distribcell through the CSG\n tree. NOTE: This option assumes that all distribcell paths are of\n the same length and do not have the same universes and cells but\n different lattice cell indices.\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with columns describing distributed cells. The\n dataframe will have either:\n\n 1. a single column with the cell instance IDs (without summary info)\n 2. separate columns for the cell IDs, universe IDs, and lattice IDs\n and x,y,z cell indices corresponding to each (distribcell paths).\n\n The number of rows in the DataFrame is the same as the total number\n of bins in the corresponding tally, with the filter bin\n appropriately tiled to map to the corresponding tally bins.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n # Initialize Pandas DataFrame\n df = pd.DataFrame()\n\n level_df = None\n\n paths = kwargs.setdefault('paths', True)\n\n # Create Pandas Multi-index columns for each level in CSG tree\n if paths:\n\n # Distribcell paths require linked metadata from the Summary\n if self.paths is None:\n msg = 'Unable to construct distribcell paths since ' \\\n 'the Summary is not linked to the StatePoint'\n raise ValueError(msg)\n\n # Make copy of array of distribcell paths to use in\n # Pandas Multi-index column construction\n num_offsets = len(self.paths)\n paths = [_path_to_levels(p) for p in self.paths]\n\n # Loop over CSG levels in the distribcell paths\n num_levels = len(paths[0])\n for i_level in range(num_levels):\n # Use level key as first index in Pandas Multi-index column\n level_key = 'level {}'.format(i_level + 1)\n\n # Create a dictionary for this level for Pandas Multi-index\n level_dict = OrderedDict()\n\n # Use the first distribcell path to determine if level\n # is a universe/cell or lattice level\n path = paths[0]\n if path[i_level][0] == 'lattice':\n # Initialize prefix Multi-index keys\n lat_id_key = (level_key, 'lat', 'id')\n lat_x_key = (level_key, 'lat', 'x')\n lat_y_key = (level_key, 'lat', 'y')\n lat_z_key = (level_key, 'lat', 'z')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[lat_id_key] = np.empty(num_offsets)\n level_dict[lat_x_key] = np.empty(num_offsets)\n level_dict[lat_y_key] = np.empty(num_offsets)\n if len(path[i_level][2]) == 3:\n level_dict[lat_z_key] = np.empty(num_offsets)\n\n else:\n # Initialize prefix Multi-index keys\n univ_key = (level_key, 'univ', 'id')\n cell_key = (level_key, 'cell', 'id')\n\n # Allocate NumPy arrays for each CSG level and\n # each Multi-index column in the DataFrame\n level_dict[univ_key] = np.empty(num_offsets)\n level_dict[cell_key] = np.empty(num_offsets)\n\n # Populate Multi-index arrays with all distribcell paths\n for i, path in enumerate(paths):\n\n level = path[i_level]\n if level[0] == 'lattice':\n # Assign entry to Lattice Multi-index column\n level_dict[lat_id_key][i] = level[1]\n level_dict[lat_x_key][i] = level[2][0]\n level_dict[lat_y_key][i] = level[2][1]\n if len(level[2]) == 3:\n level_dict[lat_z_key][i] = level[2][2]\n\n else:\n # Assign entry to Universe, Cell Multi-index columns\n level_dict[univ_key][i] = level[1]\n level_dict[cell_key][i] = level[2]\n\n # Tile the Multi-index columns\n for level_key, level_bins in level_dict.items():\n level_dict[level_key] = _repeat_and_tile(\n level_bins, stride, data_size)\n\n # Initialize a Pandas DataFrame from the level dictionary\n if level_df is None:\n level_df = pd.DataFrame(level_dict)\n else:\n level_df = pd.concat([level_df, pd.DataFrame(level_dict)],\n axis=1)\n\n # Create DataFrame column for distribcell instance IDs\n # NOTE: This is performed regardless of whether the user\n # requests Summary geometric information\n filter_bins = _repeat_and_tile(\n np.arange(self.num_bins), stride, data_size)\n df = pd.DataFrame({self.short_name.lower() : filter_bins})\n\n # Concatenate with DataFrame of distribcell instance IDs\n if level_df is not None:\n level_df = level_df.dropna(axis=1, how='all')\n level_df = level_df.astype(np.int)\n df = pd.concat([level_df, df], axis=1)\n\n return df\n\n\nclass MuFilter(RealFilter):\n \"\"\"Bins tally events based on particle scattering angle.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of scattering angles which events will binned into. Values\n represent the cosine of the scattering angle. If an iterable is given,\n the values will be used explicitly as grid points. If a single int is\n given, the range [-1, 1] will be divided up equally into that number of\n bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n scattering angle cosines for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of scattering angle\n cosines for a single filter bin\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(-1., 1., values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, -1.):\n cv.check_greater_than('filter value', x, -1., equality=True)\n if not np.isclose(x, 1.):\n cv.check_less_than('filter value', x, 1., equality=True)\n\n\nclass PolarFilter(RealFilter):\n \"\"\"Bins tally events based on the incident particle's direction.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of polar angles which events will binned into. Values represent\n an angle in radians relative to the z-axis. If an iterable is given, the\n values will be used explicitly as grid points. If a single int is given,\n the range [0, pi] will be divided up equally into that number of bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n polar angles in [rad] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of polar angles for a\n single filter bin\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n units = 'rad'\n\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(0., np.pi, values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, 0.):\n cv.check_greater_than('filter value', x, 0., equality=True)\n if not np.isclose(x, np.pi):\n cv.check_less_than('filter value', x, np.pi, equality=True)\n\n\nclass AzimuthalFilter(RealFilter):\n \"\"\"Bins tally events based on the incident particle's direction.\n\n Parameters\n ----------\n values : int or Iterable of Real\n A grid of azimuthal angles which events will binned into. Values\n represent an angle in radians relative to the x-axis and perpendicular\n to the z-axis. If an iterable is given, the values will be used\n explicitly as grid points. If a single int is given, the range\n [-pi, pi) will be divided up equally into that number of bins.\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n values : numpy.ndarray\n An array of values for which each successive pair constitutes a range of\n azimuthal angles in [rad] for a single bin\n id : int\n Unique identifier for the filter\n bins : numpy.ndarray\n An array of shape (N, 2) where each row is a pair of azimuthal angles\n for a single filter bin\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n units = 'rad'\n\n def __init__(self, values, filter_id=None):\n if isinstance(values, Integral):\n values = np.linspace(-np.pi, np.pi, values + 1)\n super().__init__(values, filter_id)\n\n def check_bins(self, bins):\n super().check_bins(bins)\n for x in np.ravel(bins):\n if not np.isclose(x, -np.pi):\n cv.check_greater_than('filter value', x, -np.pi, equality=True)\n if not np.isclose(x, np.pi):\n cv.check_less_than('filter value', x, np.pi, equality=True)\n\n\nclass DelayedGroupFilter(Filter):\n \"\"\"Bins fission events based on the produced neutron precursor groups.\n\n Parameters\n ----------\n bins : iterable of int\n The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses\n 6 precursor groups so a tally with all groups will have bins =\n [1, 2, 3, 4, 5, 6].\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n bins : iterable of int\n The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses\n 6 precursor groups so a tally with all groups will have bins =\n [1, 2, 3, 4, 5, 6].\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins\n\n \"\"\"\n def check_bins(self, bins):\n # Check the bin values.\n for g in bins:\n cv.check_greater_than('delayed group', g, 0)\n\n\nclass EnergyFunctionFilter(Filter):\n \"\"\"Multiplies tally scores by an arbitrary function of incident energy.\n\n The arbitrary function is described by a piecewise linear-linear\n interpolation of energy and y values. Values outside of the given energy\n range will be evaluated as zero.\n\n Parameters\n ----------\n energy : Iterable of Real\n A grid of energy values in [eV]\n y : iterable of Real\n A grid of interpolant values in [eV]\n filter_id : int\n Unique identifier for the filter\n\n Attributes\n ----------\n energy : Iterable of Real\n A grid of energy values in [eV]\n y : iterable of Real\n A grid of interpolant values in [eV]\n id : int\n Unique identifier for the filter\n num_bins : Integral\n The number of filter bins (always 1 for this filter)\n\n \"\"\"\n\n def __init__(self, energy, y, filter_id=None):\n self.energy = energy\n self.y = y\n self.id = filter_id\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n elif not all(self.energy == other.energy):\n return False\n else:\n return all(self.y == other.y)\n\n def __gt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta > 0\n else:\n return False\n else:\n return False\n\n def __lt__(self, other):\n if type(self) is not type(other):\n if self.short_name in _FILTER_TYPES and \\\n other.short_name in _FILTER_TYPES:\n delta = _FILTER_TYPES.index(self.short_name) - \\\n _FILTER_TYPES.index(other.short_name)\n return delta < 0\n else:\n return False\n else:\n return False\n\n def __hash__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tEnergy', self.energy)\n string += '{: <16}=\\t{}\\n'.format('\\tInterpolant', self.y)\n return hash(string)\n\n def __repr__(self):\n string = type(self).__name__ + '\\n'\n string += '{: <16}=\\t{}\\n'.format('\\tEnergy', self.energy)\n string += '{: <16}=\\t{}\\n'.format('\\tInterpolant', self.y)\n string += '{: <16}=\\t{}\\n'.format('\\tID', self.id)\n return string\n\n @classmethod\n def from_hdf5(cls, group, **kwargs):\n if group['type'][()].decode() != cls.short_name.lower():\n raise ValueError(\"Expected HDF5 data for filter type '\"\n + cls.short_name.lower() + \"' but got '\"\n + group['type'][()].decode() + \" instead\")\n\n energy = group['energy'][()]\n y = group['y'][()]\n filter_id = int(group.name.split('/')[-1].lstrip('filter '))\n\n return cls(energy, y, filter_id=filter_id)\n\n @classmethod\n def from_tabulated1d(cls, tab1d):\n \"\"\"Construct a filter from a Tabulated1D object.\n\n Parameters\n ----------\n tab1d : openmc.data.Tabulated1D\n A linear-linear Tabulated1D object with only a single interpolation\n region.\n\n Returns\n -------\n EnergyFunctionFilter\n\n \"\"\"\n cv.check_type('EnergyFunctionFilter tab1d', tab1d,\n openmc.data.Tabulated1D)\n if tab1d.n_regions > 1:\n raise ValueError('Only Tabulated1Ds with a single interpolation '\n 'region are supported')\n if tab1d.interpolation[0] != 2:\n raise ValueError('Only linear-linar Tabulated1Ds are supported')\n\n return cls(tab1d.x, tab1d.y)\n\n @property\n def energy(self):\n return self._energy\n\n @property\n def y(self):\n return self._y\n\n @property\n def bins(self):\n raise AttributeError('EnergyFunctionFilters have no bins.')\n\n @property\n def num_bins(self):\n return 1\n\n @energy.setter\n def energy(self, energy):\n # Format the bins as a 1D numpy array.\n energy = np.atleast_1d(energy)\n\n # Make sure the values are Real and positive.\n cv.check_type('filter energy grid', energy, Iterable, Real)\n for E in energy:\n cv.check_greater_than('filter energy grid', E, 0, equality=True)\n\n self._energy = energy\n\n @y.setter\n def y(self, y):\n # Format the bins as a 1D numpy array.\n y = np.atleast_1d(y)\n\n # Make sure the values are Real.\n cv.check_type('filter interpolant values', y, Iterable, Real)\n\n self._y = y\n\n @bins.setter\n def bins(self, bins):\n raise RuntimeError('EnergyFunctionFilters have no bins.')\n\n def to_xml_element(self):\n \"\"\"Return XML Element representing the Filter.\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing filter data\n\n \"\"\"\n element = ET.Element('filter')\n element.set('id', str(self.id))\n element.set('type', self.short_name.lower())\n\n subelement = ET.SubElement(element, 'energy')\n subelement.text = ' '.join(str(e) for e in self.energy)\n\n subelement = ET.SubElement(element, 'y')\n subelement.text = ' '.join(str(y) for y in self.y)\n\n return element\n\n def can_merge(self, other):\n return False\n\n def is_subset(self, other):\n return self == other\n\n def get_bin_index(self, filter_bin):\n # This filter only has one bin. Always return 0.\n return 0\n\n def get_pandas_dataframe(self, data_size, stride, **kwargs):\n \"\"\"Builds a Pandas DataFrame for the Filter's bins.\n\n This method constructs a Pandas DataFrame object for the filter with\n columns annotated by filter bin information. This is a helper method for\n :meth:`Tally.get_pandas_dataframe`.\n\n Parameters\n ----------\n data_size : int\n The total number of bins in the tally corresponding to this filter\n stride : int\n Stride in memory for the filter\n\n Returns\n -------\n pandas.DataFrame\n A Pandas DataFrame with a column that is filled with a hash of this\n filter. EnergyFunctionFilters have only 1 bin so the purpose of this\n DataFrame column is to differentiate the filter from other\n EnergyFunctionFilters. The number of rows in the DataFrame is the\n same as the total number of bins in the corresponding tally.\n\n See also\n --------\n Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()\n\n \"\"\"\n df = pd.DataFrame()\n\n # There is no clean way of sticking all the energy, y data into a\n # DataFrame so instead we'll just make a column with the filter name\n # and fill it with a hash of the __repr__. We want a hash that is\n # reproducible after restarting the interpreter so we'll use hashlib.md5\n # rather than the intrinsic hash().\n hash_fun = hashlib.md5()\n hash_fun.update(repr(self).encode('utf-8'))\n out = hash_fun.hexdigest()\n\n # The full 16 bytes make for a really wide column. Just 7 bytes (14\n # hex characters) of the digest are probably sufficient.\n out = out[:14]\n\n filter_bins = _repeat_and_tile(out, stride, data_size)\n df = pd.concat([df, pd.DataFrame(\n {self.short_name.lower(): filter_bins})])\n\n return df\n" ]
[ [ "numpy.linspace", "numpy.asarray", "numpy.vstack", "pandas.DataFrame", "numpy.concatenate", "numpy.all", "numpy.where", "numpy.allclose", "numpy.unique", "numpy.arange", "numpy.atleast_1d", "numpy.ravel", "numpy.repeat", "numpy.isclose", "pandas.concat", "numpy.min", "numpy.abs", "numpy.tile", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
xiaxin2000/OpenCDA-Documents
[ "1ad4b368d4287dae8b282bac1665816a496d57c6" ]
[ "opencda/core/plan/spline.py" ]
[ "\"\"\"\nCubic spline planner\n\nAuthor: Atsushi Sakai(@Atsushi_twi)\n\n\"\"\"\nimport math\nimport numpy as np\nimport bisect\n\n\nclass Spline:\n \"\"\"\n Cubic Spline class for calculte curvature (Author: Atsushi Sakai(@Atsushi_twi)).\n\n Parameters\n -x : float\n The x coordinate.\n -y : float\n The y coordinate.\n \n Attributes\n -b : float\n The spline coefficient b.\n -c : float\n The spline coefficient c.\n -d : float\n The spline coefficient d.\n -w : float\n The spline coefficient w.\n -nx : float\n The dimension of x.\n -h : float \n The n-th discrete difference along the x-axis.\n \"\"\"\n\n def __init__(self, x, y):\n self.b, self.c, self.d, self.w = [], [], [], []\n\n self.x = x\n self.y = y\n\n self.nx = len(x) # dimension of x\n h = np.diff(x)\n\n # calc coefficient c\n self.a = [iy for iy in y]\n\n # calc coefficient c\n A = self.__calc_A(h)\n B = self.__calc_B(h)\n self.c = np.linalg.solve(A, B)\n # print(self.c1)\n\n # calc spline coefficient b and d\n for i in range(self.nx - 1):\n self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))\n tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \\\n (self.c[i + 1] + 2.0 * self.c[i]) / 3.0\n self.b.append(tb)\n\n def calc(self, t):\n \"\"\"\n Calc position\n\n Args:\n - t (float): if t is outside of the input x, return None\n Returns:\n - result (float): The calcualtion result of position. If t is outside the range of x, return None.\n\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return result\n\n def calcd(self, t):\n \"\"\"\n Calc first derivative. If t is outside of the input x, return None.\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return result\n\n def calcdd(self, t):\n \"\"\"\n Calc second derivative, If t is outside of the input x, return None.\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx\n return result\n\n def __search_index(self, x):\n \"\"\"\n Search data segment index.\n \"\"\"\n return bisect.bisect(self.x, x) - 1\n\n def __calc_A(self, h):\n \"\"\"\n Calculate matrix A for spline coefficient a.\n \"\"\"\n A = np.zeros((self.nx, self.nx))\n A[0, 0] = 1.0\n for i in range(self.nx - 1):\n if i != (self.nx - 2):\n A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])\n A[i + 1, i] = h[i]\n A[i, i + 1] = h[i]\n\n A[0, 1] = 0.0\n A[self.nx - 1, self.nx - 2] = 0.0\n A[self.nx - 1, self.nx - 1] = 1.0\n # print(A)\n return A\n\n def __calc_B(self, h):\n \"\"\"\n Calculate matrix B for spline coefficient b.\n \"\"\"\n B = np.zeros(self.nx)\n for i in range(self.nx - 2):\n B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \\\n h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]\n return B\n\n\nclass Spline2D:\n \"\"\"\n 2D Cubic Spline class for calculte curvature (Author: Atsushi Sakai(@Atsushi_twi)).\n\n Parameters\n -x : float\n The x coordinate.\n -y : float\n The y coordinate.\n \n Attributes\n -b : float\n The spline coefficient b.\n -c : float\n The spline coefficient c.\n -d : float\n The spline coefficient d.\n -w : float\n The spline coefficient w.\n -nx : float\n The dimension of x.\n -h : float \n The n-th discrete difference along the x-axis.\n\n \"\"\"\n\n def __init__(self, x, y):\n self.s = self.__calc_s(x, y)\n self.sx = Spline(self.s, x)\n self.sy = Spline(self.s, y)\n\n def __calc_s(self, x, y):\n dx = np.diff(x)\n dy = np.diff(y)\n self.ds = np.hypot(dx, dy)\n s = [0]\n s.extend(np.cumsum(self.ds))\n return s\n\n def calc_position(self, s):\n \"\"\"\n Calculate position.\n \"\"\"\n x = self.sx.calc(s)\n y = self.sy.calc(s)\n\n return x, y\n\n def calc_curvature(self, s):\n \"\"\"\n Calculate curvature.\n \"\"\"\n dx = self.sx.calcd(s)\n ddx = self.sx.calcdd(s)\n dy = self.sy.calcd(s)\n ddy = self.sy.calcdd(s)\n k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))\n return k\n\n def calc_yaw(self, s):\n \"\"\"\n Calculate yaw.\n \"\"\"\n dx = self.sx.calcd(s)\n dy = self.sy.calcd(s)\n yaw = math.atan2(dy, dx)\n return yaw\n\n\ndef calc_spline_course(x, y, ds=0.1):\n \"\"\"\n Caculate 2D splice course.\n\n Args: \n -x (float): The x coordinate of the input point. \n -y (float): The y coordinate of the input point.\n -ds (flost): The s step value. Default value equals to 0.1.\n\n Returns:\n -rx (list): List of spline course points' x coordinates.\n -ry (list): List of spline course points' y coordinates.\n -ryaw (list): List of spline course points' yaw angles.\n -rk (list): List of spline course points' curvatures.\n -s (list): List of spline course points' s values.\n \"\"\"\n sp = Spline2D(x, y)\n s = list(np.arange(0, sp.s[-1], ds))\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n return rx, ry, ryaw, rk, s\n\n\ndef main(): \n \"\"\"\n Main function to calculate spline and visulize the results.\n \"\"\"\n print(\"Spline 2D test\")\n import matplotlib.pyplot as plt\n x = [-135, -131, -131, -131]\n y = [6.43, 10.83, 100.38, 131]\n ds = 0.1 # [m] distance of each intepolated points\n\n sp = Spline2D(x, y)\n s = np.arange(0, sp.s[-1], ds)\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n plt.subplots(1)\n plt.plot(x, y, \"xb\", label=\"input\")\n plt.plot(rx, ry, \"-r\", label=\"spline\")\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel(\"x[m]\")\n plt.ylabel(\"y[m]\")\n plt.legend()\n\n plt.subplots(1)\n plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], \"-r\", label=\"yaw\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"yaw angle[deg]\")\n\n plt.subplots(1)\n plt.plot(s, rk, \"-r\", label=\"curvature\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"curvature [1/m]\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linalg.solve", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.cumsum", "numpy.rad2deg", "matplotlib.pyplot.plot", "numpy.diff", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "numpy.hypot", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Taylor-Liu/rrt-algorithms
[ "54be136b71d63f8e3ff37afadf267da49080100b" ]
[ "examples/rrt_star/rrt_star_3d.py" ]
[ "# This file is subject to the terms and conditions defined in\n# file 'LICENSE', which is part of this source code package.\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) +\n \"/../../\")\n\nimport numpy as np\n\nfrom src.rrt.rrt_star import RRTStar\nfrom src.search_space.search_space import SearchSpace\nfrom src.utilities.plotting import Plot\n\nX_dimensions = np.array([(0, 100), (0, 100), (0, 100)]) # dimensions of Search Space\n# obstacles\nObstacles = np.array(\n [(20, 20, 20, 40, 40, 40), (20, 20, 60, 40, 40, 80), (20, 60, 20, 40, 80, 40), (60, 60, 20, 80, 80, 40),\n (60, 20, 20, 80, 40, 40), (60, 20, 60, 80, 40, 80), (20, 60, 60, 40, 80, 80), (60, 60, 60, 80, 80, 80)])\nx_init = (0, 0, 0) # starting location\nx_goal = (100, 100, 100) # goal location\n\nQ = np.array([(8, 4)]) # length of tree edges\nr = 1 # length of smallest edge to check for intersection with obstacles\nmax_samples = 1024 # max number of samples to take before timing out\nrewire_count = 32 # optional, number of nearby branches to rewire\nprc = 0.1 # probability of checking for a connection to goal\n\n# create Search Space\nX = SearchSpace(X_dimensions, Obstacles)\n\n# create rrt_search\nrrt = RRTStar(X, Q, x_init, x_goal, max_samples, r, prc, rewire_count)\npath = rrt.rrt_star()\n\n# plot\nplot = Plot(\"rrt_star_3d\")\nplot.plot_tree(X, rrt.trees)\nif path is not None:\n plot.plot_path(X, path)\nplot.plot_obstacles(X, Obstacles)\nplot.plot_start(X, x_init)\nplot.plot_goal(X, x_goal)\nplot.draw(auto_open=True)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vorticityxyz/Gaia-api
[ "04e2a9ee2448830df72156aecf432eda0c6eb504", "04e2a9ee2448830df72156aecf432eda0c6eb504" ]
[ "gaia.py", "mdemo.py" ]
[ "# Description:\n#\n# WARNING!!! This file is a critical component of Vorticity Gaia API for seismic imaging\n# PLEASE DO NOT MODIFY\n#\n# (C) Vorticity Inc. Mountain View, CA 2021\n# Licence: MIT\n\nimport numpy as np\nimport grpc\nimport time\nimport os\nimport sys\nimport gaia_pb2\nimport gaia_pb2_grpc\nimport dispatch_pb2\nimport dispatch_pb2_grpc\nimport validate\n\nimport tokens\nimport codes\n\nCHUNK_SIZE = 1024 * 1024 # 1MB\n\nIN_FILE = \"_gaia_input.npz\"\nEL_IN_FILE = \"_egaia_input.npz\"\nRTM_FILE = \"_gaia_rtm.npz\"\nEL_RTM_FILE = \"_gaia_ertm.npz\"\nBF_FILE = \"_gaia_block.npz\"\nOUT_FILE = \"_shot_record.npy\"\nEL_OUT_FILE = \"_eshot_record.npz\"\nUPDATE_FILE = \"_rtm_update.npy\"\nEL_UPDATE_FILE = \"_ertm_update.npz\"\nSANITY_FILE = \"_parameters.npy\"\nEL_SANITY_FILE = \"_eparameters.npy\"\nBF_SANILTY_FILE = \"_bfparameters.npy\"\nRBF_SETUP_FILE = '_rbf_setup.npz'\n\nDISPATCH_SERVER = 'vorticity.cloud:443'\n\ndef get_file_chunks(filename):\n with open(filename, 'rb') as f:\n size = 0\n while True:\n piece = f.read(CHUNK_SIZE);\n size += sys.getsizeof(piece)\n if len(piece) == 0:\n print()\n return\n yield gaia_pb2.Chunk(buffer=piece)\n sys.stdout.write('\\r')\n sys.stdout.write('Uploading %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\ndef get_file_chunks_nv(filename):\n with open(filename, 'rb') as f:\n size = 0\n while True:\n piece = f.read(CHUNK_SIZE);\n size += sys.getsizeof(piece)\n if len(piece) == 0:\n return\n yield gaia_pb2.Chunk(buffer=piece)\n\ndef save_chunks_to_file(chunks, filename):\n size = 0\n with open(filename, 'wb') as f:\n for chunk in chunks:\n f.write(chunk.buffer)\n size += sys.getsizeof(chunk.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write('Downloading %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n \n print()\n\ndef show_progress(responses):\n final_progress_value = 0.0\n for response in responses:\n sys.stdout.write('\\r')\n sys.stdout.write('%.2f%% complete' % (response.progress * 100,))\n sys.stdout.flush()\n final_progress_value = response.progress\n print()\n return final_progress_value\n\n\nclass DispatchClient:\n def __init__(self, address):\n with open('server.crt', 'rb') as f:\n creds = grpc.ssl_channel_credentials(f.read())\n channel = grpc.secure_channel(address, creds, \n options = (('grpc.ssl_target_name_override', 'localhost'), \n ('grpc.default_authority', 'localhost')),\n compression=grpc.Compression.Gzip)\n self.stub = dispatch_pb2_grpc.DispatchServerStub(channel)\n\n def DispatchServerAddressRequest(self, token):\n request = dispatch_pb2.AddressRequest()\n request.token = token\n response = self.stub.DispatchServerAddressRequest(request)\n\n return response\n\nclass GaiaClient:\n def __init__(self, token):\n dispatch_client = DispatchClient(DISPATCH_SERVER)\n response = dispatch_client.DispatchServerAddressRequest(token)\n if (response.status == codes.SUCCESS):\n address = response.address\n with open('server.crt', 'rb') as f:\n creds = grpc.ssl_channel_credentials(f.read())\n channel = grpc.secure_channel(address, creds,\n options = (('grpc.ssl_target_name_override', 'localhost'), \n ('grpc.default_authority', 'localhost')),\n compression=grpc.Compression.Gzip)\n self.stub = gaia_pb2_grpc.GaiaServerStub(channel)\n else:\n raise Exception(\"We could not verify this account. Please contact Vorticity.\")\n\n def StatusCheck(self, token):\n request = gaia_pb2.StatusRequest()\n request.token = token\n response = self.stub.StatusCheck(request)\n return response.status\n\n def SanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.SanityCheck(chunks_generator)\n return response.status\n\n def rtmSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.rtmSanityCheck(chunks_generator)\n return response.status\n\n def eForwardSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.eForwardSanityCheck(chunks_generator)\n return response.status\n \n def eRTMSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.eRTMSanityCheck(chunks_generator)\n return response.status\n\n def Upload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.Upload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def rtmUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.rtmUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def eForwardUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.eForwardUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def eRTMUpload(self, file_name):\n print(\"Uploading...\")\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.eRTMUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def Execute(self, sent_token):\n print(\"Forward processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.Execute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def rtmExecute(self, sent_token):\n print(\"RTM processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.rtmExecute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def eForwardExecute(self, sent_token):\n print(\"Elastic forward processing.\")\n start = time.time()\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.eForwardExecute(request)\n final_progress_value = show_progress(responses)\n end = time.time()\n process_time = end - start\n print(\"Processing time:\", \"{:.2f}\".format(process_time), 's')\n return final_progress_value\n\n def eRTMExecute(self, sent_token):\n print(\"Elastic RTM processing.\") \n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n responses = self.stub.eRTMExecute(request)\n final_progress_value = show_progress(responses)\n return final_progress_value\n\n def Download(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.Download(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def rtmDownload(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.rtmDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def eForwardDownload(self, sent_token, out_file_name):\n print(\"Downloading results\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.eForwardDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def eRTMDownload(self, sent_token, out_file_name):\n print(\"downloading...\")\n start = time.time()\n request = gaia_pb2.DownloadRequest()\n request.token = sent_token\n response = self.stub.eRTMDownload(request)\n save_chunks_to_file(response, out_file_name)\n end = time.time()\n download_time = end - start\n print(\"Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_file_name)/download_time/1024/1024), 'MB/s' )\n\n def CleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.CleanUp(request)\n return response.status\n\n def rtmCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.rtmCleanUp(request)\n return response.status\n\n def eForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.eForwardCleanUp(request)\n return response.status\n\n def eRTMCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.eRTMCleanUp(request)\n return response.status\n\n def BatchForwardSanityCheck(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.BatchForwardSanityCheck(chunks_generator)\n return response.status\n\n def BatchForwardStatus(self, token, filename):\n request = gaia_pb2.BatchStatusRequest()\n request.token = token\n request.filename = filename\n response = self.stub.BatchForwardStatus(request)\n return response\n\n def BatchForwardUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.BatchForwardUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def BatchForwardInitExec(self, sent_token):\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n response = self.stub.BatchForwardInitExec(request)\n return response.status\n\n def BatchForwardDownload(self, sent_token, sent_filename, out_filename):\n #print(\"Downloading\", sent_filename)\n start = time.time()\n request = gaia_pb2.BatchDownloadRequest()\n request.token = sent_token\n request.filename = sent_filename\n responses = self.stub.BatchForwardDownload(request)\n size = 0\n with open(out_filename, 'wb') as f:\n for response in responses:\n f.write(response.buffer)\n size += sys.getsizeof(response.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write(out_filename + ' - %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\n #print()\n end = time.time()\n download_time = end - start\n print(\" Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_filename)/download_time/1024/1024), 'MB/s' )\n\n def BatchForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.BatchForwardCleanUp(request)\n return response.status\n\n def rUploadSanityCheck(self, sent_token, filename, filesize):\n request = gaia_pb2.RemoteUploadSanityRequest()\n request.token = sent_token\n request.filename = filename\n request.filesize = filesize\n response = self.stub.rUploadSanityCheck(request)\n return response.status\n\n def rUpload(self, file_name):\n start = time.time()\n chunks_generator = get_file_chunks(file_name)\n response = self.stub.rUpload(chunks_generator)\n end = time.time()\n upload_time = end - start\n print(\"Upload time:\", \"{:.2f}\".format(upload_time), 's', \n \"speed:\", \"{:.2f}\".format(os.path.getsize(file_name)/upload_time/1024/1024), 'MB/s')\n return response.length\n\n def rForwardUpload(self, file_name):\n chunks_generator = get_file_chunks_nv(file_name)\n response = self.stub.rForwardUpload(chunks_generator)\n return response\n\n def rForwardInitExec(self, sent_token):\n request = gaia_pb2.ExecuteRequest()\n request.token = sent_token\n response = self.stub.rForwardInitExec(request)\n return response.status\n\n def rForwardStatus(self, token, filename):\n request = gaia_pb2.BatchStatusRequest()\n request.token = token\n request.filename = filename\n response = self.stub.rForwardStatus(request)\n return response\n\n def rForwardDownload(self, sent_token, sent_filename, out_filename):\n start = time.time()\n request = gaia_pb2.BatchDownloadRequest()\n request.token = sent_token\n request.filename = sent_filename\n responses = self.stub.rForwardDownload(request)\n size = 0\n with open(out_filename, 'wb') as f:\n for response in responses:\n f.write(response.buffer)\n size += sys.getsizeof(response.buffer)\n sys.stdout.write('\\r')\n sys.stdout.write(out_filename + ' - %.1f MB' % (size/CHUNK_SIZE,))\n sys.stdout.flush()\n\n #print()\n end = time.time()\n download_time = end - start\n print(\" Download time:\", \"{:.2f}\".format(download_time), 's',\n \"speed:\", \"{:.2f}\".format(os.path.getsize(out_filename)/download_time/1024/1024), 'MB/s' )\n\n def rForwardCleanUp(self, sent_token):\n request = gaia_pb2.CleanUpRequest()\n request.token = sent_token\n response = self.stub.rForwardCleanUp(request)\n return response.status\n\n def rDelete(self, sent_token, filename):\n request = gaia_pb2.DeleteRequest()\n request.token = sent_token\n request.filename = filename\n response = self.stub.rDelete(request)\n return response.status\n\n def Reset(self, sent_token):\n request = gaia_pb2.ResetRequest()\n request.token = sent_token\n response = self.stub.Reset(request)\n return response.status\n\ndef reset_server():\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n status = client.Reset(token)\n if (status == codes.SUCCESS):\n print(\"Server reset successful!\")\n else:\n print(\"Error resetting server. Contact Vorticity.\")\n\n# Forward model operator\ndef f28(model, shot, shotxyz, recxxyyz, deltas):\n\n # temporal accuracy 2, spacial accuracy 8, no abc\n act = 2 \n acs = 8\n abc = 0\n cnum = 1 # num accelerator cards\n\n # no pml\n pmlw = 0\n pmla = 0\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc, pmlw, pmla, cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Forward model operator with pml\ndef f28pml(model, shot, shotxyz, recxxyyz, deltas, pml):\n\n # temporal accuracy 2, spacial accuracy 8, pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 1 # num accelerator cards\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1], # no pml\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Multi accelerator card forward model operator\ndef mf28pml(model, shot, shotxyz, recxxyyz, deltas, pml):\n\n # temporal accuracy 2, spacial accuracy 8, pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 2 # num accelerator cards\n\n # Validate that user input is usable\n validate.multicard_model(model, cnum)\n validate.shot(shot)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1], \n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(IN_FILE, model=np.square(model), shot=shot, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.SanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, receiver size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.Upload(IN_FILE)\n if (file_length != os.path.getsize(IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.Execute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.Download(token, OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.CleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n shot_record = np.load(OUT_FILE)\n os.remove(IN_FILE)\n os.remove(OUT_FILE)\n os.remove(SANITY_FILE)\n\n return shot_record\n\n# Acoustic RTM operator\ndef rtm28pml(model, shot, traces, shotxyz, recxxyyz, deltas, pml):\n \n # temporal accuracy 2, spacial accuracy 8, with pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 1 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.model(model)\n validate.shot(shot)\n validate.traces(traces, shot, model)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1],\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(RTM_FILE, model=np.square(model), shot=shot, traces=traces, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.rtmSanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.rtmUpload(RTM_FILE)\n if (file_length != os.path.getsize(RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.rtmExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.rtmDownload(token, UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.rtmCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n update = np.load(UPDATE_FILE)\n os.remove(RTM_FILE)\n os.remove(UPDATE_FILE)\n os.remove(SANITY_FILE)\n\n return update\n\n# Acoustic multi-card RTM operator\ndef mrtm28pml(model, shot, traces, shotxyz, recxxyyz, deltas, pml):\n \n # temporal accuracy 2, spacial accuracy 8, with pml\n act = 2 \n acs = 8\n abc = 1\n cnum = 4 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.multicard_model(model, cnum)\n validate.shot(shot)\n validate.traces(traces, shot, model)\n validate.shotxyz(model, shotxyz)\n validate.recxxyyz(model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(model, pml)\n\n sanity_data = np.array([model.shape[0], model.shape[1], model.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n act, abc, cnum], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n act, acs, abc,\n pml[0], pml[1],\n cnum], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(RTM_FILE, model=np.square(model), shot=shot, traces=traces, config_int=config_int, config_float=config_float)\n np.save(SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.rtmSanityCheck(SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.rtmUpload(RTM_FILE)\n if (file_length != os.path.getsize(RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.rtmExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.rtmDownload(token, UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.rtmCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # return data to user\n update = np.load(UPDATE_FILE)\n os.remove(RTM_FILE)\n os.remove(UPDATE_FILE)\n os.remove(SANITY_FILE)\n\n return update\n\n\n\n# Elastic forward model operator\ndef ef18abc(vp, vs, rho, shot, shotxyz, recxxyyz, deltas, abc):\n\n # temporal accuracy 2, spacial accuracy 8, with sponge\n temportal_ac = 1\n spacial_ac = 8\n abc_type = 2\n\n # Validate that user input is usable\n validate.emodel(vp, vs, rho)\n validate.shot(shot)\n validate.shotxyz(vp, shotxyz)\n validate.recxxyyz(vp, recxxyyz)\n validate.deltas(deltas)\n validate.abc(vp, abc)\n\n sanity_data = np.array([vp.shape[0], vp.shape[1], vp.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n temportal_ac, abc_type], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n temportal_ac, spacial_ac, abc_type,\n abc[0], abc[1],\n ], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n\n # Save data to disk for transfer\n np.savez(EL_IN_FILE, vp=np.square(vp), vs=np.square(vs), rho=rho, shot=shot, config_int=config_int, config_float=config_float)\n np.save(EL_SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.eForwardSanityCheck(EL_SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources with the current Vorticity instance.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.eForwardUpload(EL_IN_FILE)\n if (file_length != os.path.getsize(EL_IN_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.EXEC_READY):\n final_progress_value = client.eForwardExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download vx, vy and vz records\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.eForwardDownload(token, EL_OUT_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\") \n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.eForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n \n with np.load(EL_OUT_FILE) as results:\n vx_traces = results['vx']\n vy_traces = results['vy']\n vz_traces = results['vz']\n\n os.remove(EL_IN_FILE)\n os.remove(EL_OUT_FILE)\n os.remove(EL_SANITY_FILE)\n\n return vx_traces, vy_traces, vz_traces\n\n# Elastic RTM operator\ndef ertm18abc(vp, vs, rho, shot, vx, vy, vz, shotxyz, recxxyyz, deltas, abc):\n # temporal accuracy 2, spacial accuracy 8, with sponge\n temportal_ac = 1\n spacial_ac = 8\n abc_type = 2\n\n # Validate that user input is usable\n validate.emodel(vp, vs, rho)\n validate.shot(shot)\n validate.traces(vx, shot, vp)\n validate.traces(vy, shot, vp)\n validate.traces(vz, shot, vp)\n validate.shotxyz(vp, shotxyz)\n validate.recxxyyz(vp, recxxyyz)\n validate.deltas(deltas)\n validate.abc(vp, abc)\n\n sanity_data = np.array([vp.shape[0], vp.shape[1], vp.shape[2], shot.shape[0],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3],\n temportal_ac, abc_type], dtype=np.int32)\n\n config_int = np.array([shotxyz[0], shotxyz[1], shotxyz[2],\n recxxyyz[0], recxxyyz[1], recxxyyz[2], recxxyyz[3], recxxyyz[4],\n temportal_ac, spacial_ac, abc_type,\n abc[0], abc[1],\n ], dtype=np.int32)\n config_float = deltas\n\n print(\"Starting gaia process.\")\n np.savez(EL_RTM_FILE, vp=np.square(vp), vs=np.square(vs), rho=rho, shot=shot, vx=vx, vy=vy, vz=vz, config_int=config_int, config_float=config_float)\n np.save(EL_SANITY_FILE, sanity_data)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.eRTMSanityCheck(EL_SANITY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.eRTMUpload(EL_RTM_FILE)\n if (file_length != os.path.getsize(EL_RTM_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.StatusCheck(token)\n if (status == codes.RTM_READY):\n final_progress_value = client.eRTMExecute(token)\n if (final_progress_value != 1.0):\n raise Exception(\"Something went wrong. Try again in a bit and if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server not ready. Try again in a few minites.\")\n\n # Download shot_record\n status = client.StatusCheck(token)\n if (status == codes.DOWNLOAD_READY):\n client.eRTMDownload(token, EL_UPDATE_FILE)\n else:\n raise Exception(\"Server not ready. Try again in a few minites. If the problem persists, contact Vorticity.\")\n\n # Clean up remote server\n status = client.StatusCheck(token)\n if (status == codes.CLEANUP_READY):\n status = client.eRTMCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # Load update data\n with np.load(EL_UPDATE_FILE) as data:\n dvp = data['dvp']\n dvs = data['dvs']\n\n # remove all temp files\n os.remove(EL_RTM_FILE)\n os.remove(EL_UPDATE_FILE)\n os.remove(EL_SANITY_FILE)\n\n return dvp, dvs\n\n# Batch forward model operator\ndef batchf28pml(block, shotbox, sweep, shot, shotxyz, recxxyyz, deltas, pml, destination):\n # simulation parameters\n act = 2\n acs = 8 # spacial accuracy\n # absorbing boundary conditions\n # 0 - none, 1 - pml\n abc = 1\n cnum = 2 # number of accelerator cards to use\n\n # Validate that user input is usable\n validate.block(block)\n validate.shotbox(block, shotbox)\n validate.sweep(block, shotbox, sweep)\n validate.shot(shot)\n\n shotbox_nx = shotbox[0]\n shotbox_ny = shotbox[1]\n shotbox_nz = shotbox[2]\n\n ghost_model = np.empty((shotbox_nx, shotbox_ny, shotbox_nz))\n\n validate.shotxyz(ghost_model, shotxyz)\n validate.recxxyyz(ghost_model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(ghost_model, pml)\n\n nt = shot.shape[0]\n xt1 = recxxyyz[0]\n xt2 = recxxyyz[1]\n yt1 = recxxyyz[2]\n yt2 = recxxyyz[3]\n zt = recxxyyz[4]\n\n x_start = sweep[0]\n x_end = sweep[1]\n x_step = sweep[2]\n y_start = sweep[3]\n y_end = sweep[4]\n y_step = sweep[5]\n\n sim = np.array([act, acs, abc, cnum], dtype=np.int32)\n sanity_data = np.array([shotbox_nx, shotbox_ny, shotbox_nz, nt, xt1, xt2, yt1, yt2, act, abc, cnum])\n\n print(\"Starting gaia process.\")\n np.save(BF_SANILTY_FILE, sanity_data)\n np.savez(BF_FILE, model=block, shotbox=shotbox, sweep=sweep, shot=shot, shotxyz=shotxyz, recxxyyz=recxxyyz, deltas=deltas, sim=sim, pml=pml)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Do a quick sanity check to ensure simulation parameters are within server bounds\n status = client.BatchForwardSanityCheck(BF_SANILTY_FILE)\n if (status == codes.ERROR):\n raise Exception(\"This simulation will take too many resources. Try again with a lower resolution, trace size and/or timesteps.\")\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n file_length = client.BatchForwardUpload(BF_FILE)\n if (file_length != os.path.getsize(BF_FILE)):\n raise Exception(\"Something went wrong with data upload to server. Try again in a bit or if problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n \n # Now instigate execution\n status = client.BatchForwardInitExec(token)\n if (status == codes.ERROR):\n raise Exception(\"Something went wrong when instigating batch execution. Try again after resetting the server or if problem persists, contact Vorticity.\")\n\n y_offset = y_start\n x_offset = x_start\n\n while (y_offset <= y_end):\n while (x_offset <= x_end):\n filename = \"shot-\" + str(y_offset) + \"-\" + str(x_offset) + \".npy\"\n drop_point = destination + filename\n\n while(True):\n response = client.BatchForwardStatus(token, filename)\n sys.stdout.write('\\r')\n sys.stdout.write('Processing shot %d of %d | %.2f%%' % (response.shot, response.total, response.progress * 100,))\n sys.stdout.flush()\n if (response.progress == 1.0):\n break\n #time.sleep(0.02)\n \n print()\n while(True):\n response = client.BatchForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n #time.sleep(0.02)\n\n client.BatchForwardDownload(token, filename, drop_point)\n \n if (x_step == 0):\n break\n x_offset += x_step\n \n if (y_step == 0):\n break\n x_offset = x_start\n y_offset += y_step\n\n status = client.BatchForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # remove all temp files\n os.remove(BF_FILE)\n os.remove(BF_SANILTY_FILE)\n\n# remote upload operator\ndef remoteUpload(local_filename, remote_filename):\n\n with open(local_filename, 'rb') as fobj:\n version = np.lib.format.read_magic(fobj)\n if version[0] == 1:\n shape, fortran_order, dtype = np.lib.format.read_array_header_1_0(fobj)\n else:\n shape, fortran_order, dtype = np.lib.format.read_array_header_2_0(fobj)\n\n # Validate that user input is usable\n validate.remote_model(shape, dtype)\n\n print(\"Starting gaia process.\")\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n file_size = os.path.getsize(local_filename)\n sanity_status = client.rUploadSanityCheck(token, remote_filename, file_size)\n\n if (sanity_status == codes.ERROR):\n raise Exception(\"Unable to upload. Not enough free space on remote server.\")\n\n response = client.rUpload(local_filename)\n\n if (response != file_size):\n raise Exception(\"Something went wrong with data upload to server. Try a gaia reset or if problem persists, contact Vorticity.\")\n\n print(\"Process complete!\")\n\n# remote batch forward operator\ndef rbf28pml(modelfile, shotbox, sweep, shot, shotxyz, recxxyyz, deltas, pml, destination):\n # simulation parameters\n act = 2\n acs = 8 # spacial accuracy\n # absorbing boundary conditions\n # 0 - none, 1 - pml\n abc = 1\n cnum = 2 # number of accelerator cards to use\n\n validate.shot(shot)\n snx = shotbox[0]\n sny = shotbox[1]\n snz = shotbox[2]\n\n ghost_model = np.empty((snx, sny, snz))\n\n validate.shotxyz(ghost_model, shotxyz)\n validate.recxxyyz(ghost_model, recxxyyz)\n validate.deltas(deltas)\n validate.pml(ghost_model, pml)\n\n xs = shotxyz[0]\n ys = shotxyz[1]\n zs = shotxyz[2]\n\n nt = shot.shape[0]\n xt1 = recxxyyz[0]\n xt2 = recxxyyz[1]\n yt1 = recxxyyz[2]\n yt2 = recxxyyz[3]\n zt = recxxyyz[4]\n\n xsrt = sweep[0]\n xend = sweep[1]\n xstp = sweep[2]\n ysrt = sweep[3]\n yend = sweep[4]\n ystp = sweep[5]\n\n sim = np.array([act, acs, abc, cnum], dtype=np.int32)\n\n print(\"Starting gaia process.\")\n np.savez(RBF_SETUP_FILE, \n modelfile=modelfile, shotbox=shotbox, sweep=sweep, shot=shot, shotxyz=shotxyz, recxxyyz=recxxyyz, deltas=deltas, sim=sim, pml=pml)\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n # Check if server is ready for upload and if so upload file\n status = client.StatusCheck(token)\n if (status == codes.UPLOAD_READY):\n response = client.rForwardUpload(RBF_SETUP_FILE)\n if (response.status != codes.SUCCESS):\n raise Exception(\"Simulation was rejected by the server. Possible incorrect setup.\")\n\n if (response.length != os.path.getsize(RBF_SETUP_FILE)): \n raise Exception(\"Error uploading simulation. Reset server and try again. If problem persists, contact Vorticity.\")\n else:\n raise Exception(\"Server busy. Wait for the original task to complete.\")\n\n # Now instigate execution\n status = client.rForwardInitExec(token)\n if (status == codes.ERROR):\n raise Exception(\"Something went wrong when instigating execution. Try again after resetting the server or if problem persists, contact Vorticity.\")\n\n y_offset = ysrt\n x_offset = xsrt\n\n while (y_offset <= yend):\n while (x_offset <= xend):\n filename = \"shot-\" + str(x_offset + xs) + \"-\" + str(y_offset + ys) + \".npy\"\n drop_point = destination + filename\n\n while(True):\n response = client.rForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n sys.stdout.write('\\r')\n sys.stdout.write('Processing shot %d of %d | %.2f%%' % (response.shot, response.total, response.progress * 100,))\n sys.stdout.flush()\n if (response.progress == 1.0):\n print()\n break\n\n while(True):\n response = client.rForwardStatus(token, filename)\n if (response.fileExists == True):\n break\n #time.sleep(0.02)\n\n client.rForwardDownload(token, filename, drop_point)\n\n if (xstp == 0):\n break\n x_offset += xstp\n \n if (ystp == 0):\n break\n x_offset = xsrt\n y_offset += ystp\n\n status = client.rForwardCleanUp(token)\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")\n\n # remove all temp files\n os.remove(RBF_SETUP_FILE)\n\n# remote delte operator\ndef remoteDelete(remote_filename):\n print(\"Starting gaia process.\")\n\n # get the token for identification to server\n token = tokens.USER_TOKEN\n\n # Launch client\n client = GaiaClient(token)\n\n print(\"Deleting remote earth model.\")\n status = client.rDelete(token, remote_filename)\n\n if (status == codes.SUCCESS):\n print(\"Process complete!\")\n else:\n print(\"Process did not complete as intended. Contact Vorticity.\")", "# Description:\n# \n# This example uses Vorticity gaia API's mf28pml operator to run a forward model. \n# The operator takes a velocity model and returns a simulated shot record which \n# is then plotted using matplotlib.\n#\n# mf28pml allows for larger velocity models and faster solving than f28pml\n# \n# Input parameters for the operator is generated by the \n# function generate_test_data() and is as follows:\n#\n# model - 3D numpy array representing the velocity model\n# shot - 1D numpy array representing the shot profile spanning the all timesteps\n# shotxyz - Cartesian coordinates of the shot location\n# recxxyyz - Cartesian coordinates of the receiver locations\n# deltas - dx, dy, dz and dt for the simulation\n# pml - width and amplitude of the PML layer\n#\n# Output: simulated shot record in the form of a 3d numpy array of the format\n# shot_record[timestep, x_position, y_position]\n# \n# (C) Vorticity Inc. Mountain View, CA 2021\n# Licence: MIT\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport gaia\n\n# Plot results using matplotlib\ndef plot_results(shot_record):\n fig = plt.figure(figsize=(15, 15))\n scale = np.max(shot_record) / 5000.\n extent = [0, 1, 1, 0]\n plot = plt.imshow(shot_record, vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)\n plt.xlabel('X position')\n plt.ylabel('Time')\n plt.show()\n\n# Generate shot profile\ndef generate_ricker(nt, freq, dt):\n max_amplitude = 1000\n npt = nt * dt\n t = np.arange(-float(npt)/2, float(npt)/2, dt)\n # generate the short waveform\n rick1 = max_amplitude * (1 - t * t * freq**2 * np.pi**2) * np.exp(-t**2 * np.pi**2 * freq**2)\n # Overlay the short waveform over the full length of timesteps\n rick = np.zeros(nt, dtype=np.float32)\n rick[0: nt - (round(nt/2) - round(1/freq/dt) + 1)] = rick1[round(nt/2) - round(1/freq/dt) + 1: nt];\n return rick\n\ndef generate_test_data():\n # Earth model dimensions\n nx = 1001\n ny = 1001\n nz = 1601\n\n # Spacial discretization\n dx = 2.5\n dy = dx\n dz = dx\n\n # temporal discretization\n dt = 0.0004\n\n # number of timesteps\n nt = 2500\n\n # Absorbing boundaries\n pmlw = 50\n pmla = 100\n\n # Shot parameters\n freq = 30 # Frequency\n xs = round(nx/2) \n ys = round(ny/2)\n zs = 4\n \n # Receiver parameters\n xt1 = 104\n xt2 = (nx - 105)\n yt1 = round(ny/2)\n yt2 = round(ny/2)\n zt = 4\n\n # Earth model velocities\n c1 = 1500\n c2 = 2500\n\n # Build earth model\n model = np.full((nx, ny, nz), c1, dtype=np.float32) # Smooth model\n model[:, :, 151:] = c2 # Now insert step\n\n # Generate rest of the parameters\n shot = generate_ricker(nt, freq, dt)\n shotxyz = np.array([xs, ys, zs], dtype=np.int32)\n recxxyz = np.array([xt1, xt2, yt1, yt2, zt], dtype=np.int32)\n deltas = np.array([dx, dy, dz, dt], dtype=np.float32)\n pml = np.array([pmlw, pmla], dtype=np.int32)\n\n return model, shot, shotxyz, recxxyz, deltas, pml\n\nif __name__ == '__main__':\n\n # generate test data\n print(\"Generating test data.\")\n model, shot, shotxyz, recxxyz, deltas, pml = generate_test_data()\n\n # Call gaia function\n shot_record = gaia.mf28pml(model, shot, shotxyz, recxxyz, deltas, pml)\n \n # Plot results\n plot_results(shot_record[:, :, 0])\n\n # Save shot record for rtm later\n np.save(\"data/shot_record\", shot_record)" ]
[ [ "numpy.square", "numpy.lib.format.read_array_header_2_0", "numpy.savez", "numpy.lib.format.read_array_header_1_0", "numpy.lib.format.read_magic", "numpy.save", "numpy.load", "numpy.array", "numpy.empty" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.figure", "numpy.save", "numpy.full", "numpy.max", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
klDen/flink
[ "a2c737891afde0c63c1a453b1ee164b80b6a702c", "a2c737891afde0c63c1a453b1ee164b80b6a702c" ]
[ "flink-python/pyflink/fn_execution/operations.py", "flink-python/pyflink/table/tests/test_pandas_conversion.py" ]
[ "################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport abc\nimport time\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import List, Tuple, Any, Dict\n\nfrom apache_beam.coders import PickleCoder\n\nfrom pyflink.datastream.state import ValueStateDescriptor, ValueState, ListStateDescriptor, \\\n ListState, MapStateDescriptor, MapState, ReducingStateDescriptor, ReducingState\nfrom pyflink.datastream import TimeDomain\nfrom pyflink.datastream.functions import RuntimeContext, TimerService, ProcessFunction, \\\n KeyedProcessFunction\nfrom pyflink.fn_execution import flink_fn_execution_pb2, operation_utils\nfrom pyflink.fn_execution.aggregate import extract_data_view_specs\nfrom pyflink.fn_execution.beam.beam_coders import DataViewFilterCoder\nfrom pyflink.fn_execution.operation_utils import extract_user_defined_aggregate_function\nfrom pyflink.fn_execution.state_impl import RemoteKeyedStateBackend\n\ntry:\n from pyflink.fn_execution.aggregate_fast import RowKeySelector, SimpleAggsHandleFunction, \\\n GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction, \\\n GroupTableAggFunction\nexcept ImportError:\n from pyflink.fn_execution.aggregate_slow import RowKeySelector, SimpleAggsHandleFunction, \\\n GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction,\\\n GroupTableAggFunction\n\nfrom pyflink.metrics.metricbase import GenericMetricGroup\nfrom pyflink.table import FunctionContext, Row\n\n\n# table operations\nSCALAR_FUNCTION_URN = \"flink:transform:scalar_function:v1\"\nTABLE_FUNCTION_URN = \"flink:transform:table_function:v1\"\nSTREAM_GROUP_AGGREGATE_URN = \"flink:transform:stream_group_aggregate:v1\"\nSTREAM_GROUP_TABLE_AGGREGATE_URN = \"flink:transform:stream_group_table_aggregate:v1\"\nPANDAS_AGGREGATE_FUNCTION_URN = \"flink:transform:aggregate_function:arrow:v1\"\nPANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN = \\\n \"flink:transform:batch_over_window_aggregate_function:arrow:v1\"\n\n# datastream operations\nDATA_STREAM_STATELESS_FUNCTION_URN = \"flink:transform:datastream_stateless_function:v1\"\nPROCESS_FUNCTION_URN = \"flink:transform:process_function:v1\"\nKEYED_PROCESS_FUNCTION_URN = \"flink:transform:keyed_process_function:v1\"\n\n\nclass Operation(abc.ABC):\n def __init__(self, spec):\n super(Operation, self).__init__()\n self.spec = spec\n self.func, self.user_defined_funcs = self.generate_func(self.spec.serialized_fn)\n if self.spec.serialized_fn.metric_enabled:\n self.base_metric_group = GenericMetricGroup(None, None)\n else:\n self.base_metric_group = None\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n user_defined_func.open(FunctionContext(self.base_metric_group))\n\n def close(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'close'):\n user_defined_func.close()\n\n def finish(self):\n self._update_gauge(self.base_metric_group)\n\n def _update_gauge(self, base_metric_group):\n if base_metric_group is not None:\n for name in base_metric_group._flink_gauge:\n flink_gauge = base_metric_group._flink_gauge[name]\n beam_gauge = base_metric_group._beam_gauge[name]\n beam_gauge.set(flink_gauge())\n for sub_group in base_metric_group._sub_groups:\n self._update_gauge(sub_group)\n\n @abc.abstractmethod\n def generate_func(self, serialized_fn) -> Tuple:\n pass\n\n\nclass ScalarFunctionOperation(Operation):\n def __init__(self, spec):\n super(ScalarFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n \"\"\"\n Generates a lambda function based on udfs.\n :param serialized_fn: serialized function which contains a list of the proto\n representation of the Python :class:`ScalarFunction`\n :return: the generated lambda function\n \"\"\"\n scalar_functions, variable_dict, user_defined_funcs = reduce(\n lambda x, y: (\n ','.join([x[0], y[0]]),\n dict(chain(x[1].items(), y[1].items())),\n x[2] + y[2]),\n [operation_utils.extract_user_defined_function(udf) for udf in serialized_fn.udfs])\n generate_func = eval('lambda value: [%s]' % scalar_functions, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass TableFunctionOperation(Operation):\n def __init__(self, spec):\n super(TableFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n \"\"\"\n Generates a lambda function based on udtfs.\n :param serialized_fn: serialized function which contains the proto representation of\n the Python :class:`TableFunction`\n :return: the generated lambda function\n \"\"\"\n table_function, variable_dict, user_defined_funcs = \\\n operation_utils.extract_user_defined_function(serialized_fn.udfs[0])\n generate_func = eval('lambda value: %s' % table_function, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass PandasAggregateFunctionOperation(Operation):\n def __init__(self, spec):\n super(PandasAggregateFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn):\n pandas_functions, variable_dict, user_defined_funcs = reduce(\n lambda x, y: (\n ','.join([x[0], y[0]]),\n dict(chain(x[1].items(), y[1].items())),\n x[2] + y[2]),\n [operation_utils.extract_user_defined_function(udf, True)\n for udf in serialized_fn.udfs])\n variable_dict['wrap_pandas_result'] = operation_utils.wrap_pandas_result\n generate_func = eval('lambda value: wrap_pandas_result([%s])' %\n pandas_functions, variable_dict)\n return generate_func, user_defined_funcs\n\n\nclass PandasBatchOverWindowAggregateFunctionOperation(Operation):\n def __init__(self, spec):\n super(PandasBatchOverWindowAggregateFunctionOperation, self).__init__(spec)\n self.windows = [window for window in self.spec.serialized_fn.windows]\n # the index among all the bounded range over window\n self.bounded_range_window_index = [-1 for _ in range(len(self.windows))]\n # Whether the specified position window is a bounded range window.\n self.is_bounded_range_window = []\n window_types = flink_fn_execution_pb2.OverWindow\n\n bounded_range_window_nums = 0\n for i, window in enumerate(self.windows):\n window_type = window.window_type\n if (window_type is window_types.RANGE_UNBOUNDED_PRECEDING) or (\n window_type is window_types.RANGE_UNBOUNDED_FOLLOWING) or (\n window_type is window_types.RANGE_SLIDING):\n self.bounded_range_window_index[i] = bounded_range_window_nums\n self.is_bounded_range_window.append(True)\n bounded_range_window_nums += 1\n else:\n self.is_bounded_range_window.append(False)\n\n def generate_func(self, serialized_fn):\n user_defined_funcs = []\n self.window_indexes = []\n self.mapper = []\n for udf in serialized_fn.udfs:\n pandas_agg_function, variable_dict, user_defined_func, window_index = \\\n operation_utils.extract_over_window_user_defined_function(udf)\n user_defined_funcs.extend(user_defined_func)\n self.window_indexes.append(window_index)\n self.mapper.append(eval('lambda value: %s' % pandas_agg_function, variable_dict))\n return self.wrapped_over_window_function, user_defined_funcs\n\n def wrapped_over_window_function(self, boundaries_series):\n import pandas as pd\n OverWindow = flink_fn_execution_pb2.OverWindow\n input_series = boundaries_series[-1]\n # the row number of the arrow format data\n input_cnt = len(input_series[0])\n results = []\n # loop every agg func\n for i in range(len(self.window_indexes)):\n window_index = self.window_indexes[i]\n # the over window which the agg function belongs to\n window = self.windows[window_index]\n window_type = window.window_type\n func = self.mapper[i]\n result = []\n if self.is_bounded_range_window[window_index]:\n window_boundaries = boundaries_series[\n self.bounded_range_window_index[window_index]]\n if window_type is OverWindow.RANGE_UNBOUNDED_PRECEDING:\n # range unbounded preceding window\n for j in range(input_cnt):\n end = window_boundaries[j]\n series_slices = [s.iloc[:end] for s in input_series]\n result.append(func(series_slices))\n elif window_type is OverWindow.RANGE_UNBOUNDED_FOLLOWING:\n # range unbounded following window\n for j in range(input_cnt):\n start = window_boundaries[j]\n series_slices = [s.iloc[start:] for s in input_series]\n result.append(func(series_slices))\n else:\n # range sliding window\n for j in range(input_cnt):\n start = window_boundaries[j * 2]\n end = window_boundaries[j * 2 + 1]\n series_slices = [s.iloc[start:end] for s in input_series]\n result.append(func(series_slices))\n else:\n # unbounded range window or unbounded row window\n if (window_type is OverWindow.RANGE_UNBOUNDED) or (\n window_type is OverWindow.ROW_UNBOUNDED):\n series_slices = [s.iloc[:] for s in input_series]\n func_result = func(series_slices)\n result = [func_result for _ in range(input_cnt)]\n elif window_type is OverWindow.ROW_UNBOUNDED_PRECEDING:\n # row unbounded preceding window\n window_end = window.upper_boundary\n for j in range(input_cnt):\n end = min(j + window_end + 1, input_cnt)\n series_slices = [s.iloc[: end] for s in input_series]\n result.append(func(series_slices))\n elif window_type is OverWindow.ROW_UNBOUNDED_FOLLOWING:\n # row unbounded following window\n window_start = window.lower_boundary\n for j in range(input_cnt):\n start = max(j + window_start, 0)\n series_slices = [s.iloc[start: input_cnt] for s in input_series]\n result.append(func(series_slices))\n else:\n # row sliding window\n window_start = window.lower_boundary\n window_end = window.upper_boundary\n for j in range(input_cnt):\n start = max(j + window_start, 0)\n end = min(j + window_end + 1, input_cnt)\n series_slices = [s.iloc[start: end] for s in input_series]\n result.append(func(series_slices))\n results.append(pd.Series(result))\n return results\n\n\nclass StatefulFunctionOperation(Operation):\n\n def __init__(self, spec, keyed_state_backend):\n self.keyed_state_backend = keyed_state_backend\n super(StatefulFunctionOperation, self).__init__(spec)\n\n def finish(self):\n super().finish()\n if self.keyed_state_backend:\n self.keyed_state_backend.commit()\n\n\nTRIGGER_TIMER = 1\n\n\nclass AbstractStreamGroupAggregateOperation(StatefulFunctionOperation):\n\n def __init__(self, spec, keyed_state_backend):\n self.generate_update_before = spec.serialized_fn.generate_update_before\n self.grouping = [i for i in spec.serialized_fn.grouping]\n self.group_agg_function = None\n # If the upstream generates retract message, we need to add an additional count1() agg\n # to track current accumulated messages count. If all the messages are retracted, we need\n # to send a DELETE message to downstream.\n self.index_of_count_star = spec.serialized_fn.index_of_count_star\n self.count_star_inserted = spec.serialized_fn.count_star_inserted\n self.state_cache_size = spec.serialized_fn.state_cache_size\n self.state_cleaning_enabled = spec.serialized_fn.state_cleaning_enabled\n self.data_view_specs = extract_data_view_specs(spec.serialized_fn.udfs)\n super(AbstractStreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def open(self):\n self.group_agg_function.open(FunctionContext(self.base_metric_group))\n\n def close(self):\n self.group_agg_function.close()\n\n def generate_func(self, serialized_fn):\n user_defined_aggs = []\n input_extractors = []\n filter_args = []\n # stores the indexes of the distinct views which the agg functions used\n distinct_indexes = []\n # stores the indexes of the functions which share the same distinct view\n # and the filter args of them\n distinct_info_dict = {}\n for i in range(len(serialized_fn.udfs)):\n user_defined_agg, input_extractor, filter_arg, distinct_index = \\\n extract_user_defined_aggregate_function(\n i, serialized_fn.udfs[i], distinct_info_dict)\n user_defined_aggs.append(user_defined_agg)\n input_extractors.append(input_extractor)\n filter_args.append(filter_arg)\n distinct_indexes.append(distinct_index)\n distinct_view_descriptors = {}\n for agg_index_list, filter_arg_list in distinct_info_dict.values():\n if -1 in filter_arg_list:\n # If there is a non-filter call, we don't need to check filter or not before\n # writing the distinct data view.\n filter_arg_list = []\n # use the agg index of the first function as the key of shared distinct view\n distinct_view_descriptors[agg_index_list[0]] = DistinctViewDescriptor(\n input_extractors[agg_index_list[0]], filter_arg_list)\n\n key_selector = RowKeySelector(self.grouping)\n if len(self.data_view_specs) > 0:\n state_value_coder = DataViewFilterCoder(self.data_view_specs)\n else:\n state_value_coder = PickleCoder()\n\n self.group_agg_function = self.create_process_function(\n user_defined_aggs, input_extractors, filter_args, distinct_indexes,\n distinct_view_descriptors, key_selector, state_value_coder)\n\n return self.process_element_or_timer, []\n\n def process_element_or_timer(self, input_datas: List[Tuple[int, Row, int, Row]]):\n # the structure of the input data:\n # [element_type, element(for process_element), timestamp(for timer), key(for timer)]\n # all the fields are nullable except the \"element_type\"\n for input_data in input_datas:\n if input_data[0] != TRIGGER_TIMER:\n self.group_agg_function.process_element(input_data[1])\n else:\n self.group_agg_function.on_timer(input_data[3])\n return self.group_agg_function.finish_bundle()\n\n @abc.abstractmethod\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n pass\n\n\nclass StreamGroupAggregateOperation(AbstractStreamGroupAggregateOperation):\n\n def __init__(self, spec, keyed_state_backend):\n super(StreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n aggs_handler_function = SimpleAggsHandleFunction(\n user_defined_aggs,\n input_extractors,\n self.index_of_count_star,\n self.count_star_inserted,\n self.data_view_specs,\n filter_args,\n distinct_indexes,\n distinct_view_descriptors)\n\n return GroupAggFunction(\n aggs_handler_function,\n key_selector,\n self.keyed_state_backend,\n state_value_coder,\n self.generate_update_before,\n self.state_cleaning_enabled,\n self.index_of_count_star)\n\n\nclass StreamGroupTableAggregateOperation(AbstractStreamGroupAggregateOperation):\n def __init__(self, spec, keyed_state_backend):\n super(StreamGroupTableAggregateOperation, self).__init__(spec, keyed_state_backend)\n\n def create_process_function(self, user_defined_aggs, input_extractors, filter_args,\n distinct_indexes, distinct_view_descriptors, key_selector,\n state_value_coder):\n aggs_handler_function = SimpleTableAggsHandleFunction(\n user_defined_aggs,\n input_extractors,\n self.data_view_specs,\n filter_args,\n distinct_indexes,\n distinct_view_descriptors)\n return GroupTableAggFunction(\n aggs_handler_function,\n key_selector,\n self.keyed_state_backend,\n state_value_coder,\n self.generate_update_before,\n self.state_cleaning_enabled,\n self.index_of_count_star)\n\n\nclass DataStreamStatelessFunctionOperation(Operation):\n\n def __init__(self, spec):\n super(DataStreamStatelessFunctionOperation, self).__init__(spec)\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n runtime_context = RuntimeContext(\n self.spec.serialized_fn.runtime_context.task_name,\n self.spec.serialized_fn.runtime_context.task_name_with_subtasks,\n self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.index_of_this_subtask,\n self.spec.serialized_fn.runtime_context.attempt_number,\n {p.key: p.value for p in self.spec.serialized_fn.runtime_context.job_parameters}\n )\n user_defined_func.open(runtime_context)\n\n def generate_func(self, serialized_fn):\n func, user_defined_func = operation_utils.extract_data_stream_stateless_function(\n serialized_fn)\n return func, [user_defined_func]\n\n\nclass InternalRuntimeContext(RuntimeContext):\n\n def __init__(self,\n task_name: str,\n task_name_with_subtasks: str,\n number_of_parallel_subtasks: int,\n max_number_of_parallel_subtasks: int,\n index_of_this_subtask: int,\n attempt_number: int,\n job_parameters: Dict[str, str],\n keyed_state_backend: RemoteKeyedStateBackend):\n super(InternalRuntimeContext, self).__init__(\n task_name, task_name_with_subtasks, number_of_parallel_subtasks,\n max_number_of_parallel_subtasks, index_of_this_subtask, attempt_number,\n job_parameters)\n self._keyed_state_backend = keyed_state_backend\n\n def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:\n return self._keyed_state_backend.get_value_state(state_descriptor.name, PickleCoder())\n\n def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:\n return self._keyed_state_backend.get_list_state(state_descriptor.name, PickleCoder())\n\n def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:\n return self._keyed_state_backend.get_map_state(state_descriptor.name, PickleCoder(),\n PickleCoder())\n\n def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:\n return self._keyed_state_backend.get_reducing_state(\n state_descriptor.get_name(), PickleCoder(), state_descriptor.get_reduce_function())\n\n\nclass ProcessFunctionOperation(DataStreamStatelessFunctionOperation):\n\n def __init__(self, spec):\n self.timer_service = ProcessFunctionOperation.InternalTimerService()\n self.function_context = ProcessFunctionOperation.InternalProcessFunctionContext(\n self.timer_service)\n super(ProcessFunctionOperation, self).__init__(spec)\n\n def generate_func(self, serialized_fn) -> tuple:\n func, proc_func = operation_utils.extract_process_function(\n serialized_fn, self.function_context)\n return func, [proc_func]\n\n class InternalProcessFunctionContext(ProcessFunction.Context):\n \"\"\"\n Internal implementation of ProcessFunction.Context.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._timestamp = None\n\n def timer_service(self):\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n class InternalTimerService(TimerService):\n \"\"\"\n Internal implementation of TimerService.\n \"\"\"\n def __init__(self):\n self._current_watermark = None\n\n def current_processing_time(self) -> int:\n return int(time.time() * 1000)\n\n def current_watermark(self):\n return self._current_watermark\n\n def set_current_watermark(self, wm):\n self._current_watermark = wm\n\n def register_processing_time_timer(self, t: int):\n raise Exception(\"Register timers is only supported on a keyed stream.\")\n\n def register_event_time_timer(self, t: int):\n raise Exception(\"Register timers is only supported on a keyed stream.\")\n\n\nclass KeyedProcessFunctionOperation(StatefulFunctionOperation):\n\n def __init__(self, spec, keyed_state_backend):\n self._collector = KeyedProcessFunctionOperation.InternalCollector()\n internal_timer_service = KeyedProcessFunctionOperation.InternalTimerService(\n self._collector, keyed_state_backend)\n self.function_context = KeyedProcessFunctionOperation.InternalKeyedProcessFunctionContext(\n internal_timer_service)\n self.on_timer_ctx = KeyedProcessFunctionOperation\\\n .InternalKeyedProcessFunctionOnTimerContext(internal_timer_service)\n super(KeyedProcessFunctionOperation, self).__init__(spec, keyed_state_backend)\n\n def generate_func(self, serialized_fn) -> Tuple:\n func, proc_func = operation_utils.extract_keyed_process_function(\n serialized_fn, self.function_context, self.on_timer_ctx, self._collector,\n self.keyed_state_backend)\n return func, [proc_func]\n\n def open(self):\n for user_defined_func in self.user_defined_funcs:\n if hasattr(user_defined_func, 'open'):\n runtime_context = InternalRuntimeContext(\n self.spec.serialized_fn.runtime_context.task_name,\n self.spec.serialized_fn.runtime_context.task_name_with_subtasks,\n self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,\n self.spec.serialized_fn.runtime_context.index_of_this_subtask,\n self.spec.serialized_fn.runtime_context.attempt_number,\n {p.key: p.value for p in\n self.spec.serialized_fn.runtime_context.job_parameters},\n self.keyed_state_backend)\n user_defined_func.open(runtime_context)\n\n class InternalCollector(object):\n \"\"\"\n Internal implementation of the Collector. It uses a buffer list to store data to be emitted.\n There will be a header flag for each data type. 0 means it is a proc time timer registering\n request, while 1 means it is an event time timer and 2 means it is a normal data. When\n registering a timer, it must take along with the corresponding key for it.\n \"\"\"\n\n def __init__(self):\n self.buf = []\n\n def collect_reg_proc_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_PROC_TIMER.value,\n a, key, None))\n\n def collect_reg_event_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_EVENT_TIMER.value,\n a, key, None))\n\n def collect_del_proc_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.DEL_PROC_TIMER.value,\n a, key, None))\n\n def collect_del_event_timer(self, a: Any, key: Any):\n self.buf.append(\n (operation_utils.KeyedProcessFunctionOutputFlag.DEL_EVENT_TIMER.value,\n a, key, None))\n\n def collect(self, a: Any):\n self.buf.append((operation_utils.KeyedProcessFunctionOutputFlag.NORMAL_DATA.value, a))\n\n def clear(self):\n self.buf.clear()\n\n class InternalKeyedProcessFunctionOnTimerContext(KeyedProcessFunction.OnTimerContext):\n \"\"\"\n Internal implementation of ProcessFunction.OnTimerContext.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._time_domain = None\n self._timestamp = None\n self._current_key = None\n\n def get_current_key(self):\n return self._current_key\n\n def set_current_key(self, current_key):\n self._current_key = current_key\n\n def timer_service(self) -> TimerService:\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n def time_domain(self) -> TimeDomain:\n return self._time_domain\n\n def set_time_domain(self, td: TimeDomain):\n self._time_domain = td\n\n class InternalKeyedProcessFunctionContext(KeyedProcessFunction.Context):\n \"\"\"\n Internal implementation of KeyedProcessFunction.Context.\n \"\"\"\n\n def __init__(self, timer_service: TimerService):\n self._timer_service = timer_service\n self._timestamp = None\n self._current_key = None\n\n def get_current_key(self):\n return self._current_key\n\n def set_current_key(self, current_key):\n self._current_key = current_key\n\n def timer_service(self) -> TimerService:\n return self._timer_service\n\n def timestamp(self) -> int:\n return self._timestamp\n\n def set_timestamp(self, ts: int):\n self._timestamp = ts\n\n class InternalTimerService(TimerService):\n \"\"\"\n Internal implementation of TimerService.\n \"\"\"\n\n def __init__(self, collector, keyed_state_backend):\n self._collector = collector\n self._keyed_state_backend = keyed_state_backend\n self._current_watermark = None\n\n def current_processing_time(self) -> int:\n return int(time.time() * 1000)\n\n def current_watermark(self) -> int:\n return self._current_watermark\n\n def set_current_watermark(self, wm):\n self._current_watermark = wm\n\n def register_processing_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_reg_proc_timer(t, current_key)\n\n def register_event_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_reg_event_timer(t, current_key)\n\n def delete_processing_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_del_proc_timer(t, current_key)\n\n def delete_event_time_timer(self, t: int):\n current_key = self._keyed_state_backend.get_current_key()\n self._collector.collect_del_event_timer(t, current_key)\n", "################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport datetime\nimport decimal\n\nfrom pandas.util.testing import assert_frame_equal\n\nfrom pyflink.common import Row\nfrom pyflink.table.types import DataTypes\nfrom pyflink.testing import source_sink_utils\nfrom pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \\\n PyFlinkBlinkStreamTableTestCase, PyFlinkOldStreamTableTestCase\n\n\nclass PandasConversionTestBase(object):\n\n @classmethod\n def setUpClass(cls):\n super(PandasConversionTestBase, cls).setUpClass()\n cls.data = [(1, 1, 1, 1, True, 1.1, 1.2, 'hello', bytearray(b\"aaa\"),\n decimal.Decimal('1000000000000000000.01'), datetime.date(2014, 9, 13),\n datetime.time(hour=1, minute=0, second=1),\n datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],\n Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),\n d=[1, 2])),\n (1, 2, 2, 2, False, 2.1, 2.2, 'world', bytearray(b\"bbb\"),\n decimal.Decimal('1000000000000000000.02'), datetime.date(2014, 9, 13),\n datetime.time(hour=1, minute=0, second=1),\n datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],\n Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),\n d=[1, 2]))]\n cls.data_type = DataTypes.ROW(\n [DataTypes.FIELD(\"f1\", DataTypes.TINYINT()),\n DataTypes.FIELD(\"f2\", DataTypes.SMALLINT()),\n DataTypes.FIELD(\"f3\", DataTypes.INT()),\n DataTypes.FIELD(\"f4\", DataTypes.BIGINT()),\n DataTypes.FIELD(\"f5\", DataTypes.BOOLEAN()),\n DataTypes.FIELD(\"f6\", DataTypes.FLOAT()),\n DataTypes.FIELD(\"f7\", DataTypes.DOUBLE()),\n DataTypes.FIELD(\"f8\", DataTypes.STRING()),\n DataTypes.FIELD(\"f9\", DataTypes.BYTES()),\n DataTypes.FIELD(\"f10\", DataTypes.DECIMAL(38, 18)),\n DataTypes.FIELD(\"f11\", DataTypes.DATE()),\n DataTypes.FIELD(\"f12\", DataTypes.TIME()),\n DataTypes.FIELD(\"f13\", DataTypes.TIMESTAMP(3)),\n DataTypes.FIELD(\"f14\", DataTypes.ARRAY(DataTypes.STRING())),\n DataTypes.FIELD(\"f15\", DataTypes.ROW(\n [DataTypes.FIELD(\"a\", DataTypes.INT()),\n DataTypes.FIELD(\"b\", DataTypes.STRING()),\n DataTypes.FIELD(\"c\", DataTypes.TIMESTAMP(3)),\n DataTypes.FIELD(\"d\", DataTypes.ARRAY(DataTypes.INT()))]))], False)\n cls.pdf = cls.create_pandas_data_frame()\n\n @classmethod\n def create_pandas_data_frame(cls):\n data_dict = {}\n for j, name in enumerate(cls.data_type.names):\n data_dict[name] = [cls.data[i][j] for i in range(len(cls.data))]\n # need convert to numpy types\n import numpy as np\n data_dict[\"f1\"] = np.int8(data_dict[\"f1\"])\n data_dict[\"f2\"] = np.int16(data_dict[\"f2\"])\n data_dict[\"f3\"] = np.int32(data_dict[\"f3\"])\n data_dict[\"f4\"] = np.int64(data_dict[\"f4\"])\n data_dict[\"f6\"] = np.float32(data_dict[\"f6\"])\n data_dict[\"f7\"] = np.float64(data_dict[\"f7\"])\n data_dict[\"f15\"] = [row.as_dict() for row in data_dict[\"f15\"]]\n import pandas as pd\n return pd.DataFrame(data=data_dict,\n index=[2., 3.],\n columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',\n 'f10', 'f11', 'f12', 'f13', 'f14', 'f15'])\n\n\nclass PandasConversionTests(PandasConversionTestBase):\n\n def test_from_pandas_with_incorrect_schema(self):\n fields = self.data_type.fields.copy()\n fields[0], fields[7] = fields[7], fields[0] # swap str with tinyint\n wrong_schema = DataTypes.ROW(fields) # should be DataTypes.STRING()\n with self.assertRaisesRegex(Exception, \"Expected a string.*got int8\"):\n self.t_env.from_pandas(self.pdf, schema=wrong_schema)\n\n def test_from_pandas_with_names(self):\n # skip decimal as currently only decimal(38, 18) is supported\n pdf = self.pdf.drop(['f10', 'f11', 'f12', 'f13', 'f14', 'f15'], axis=1)\n new_names = list(map(str, range(len(pdf.columns))))\n table = self.t_env.from_pandas(pdf, schema=new_names)\n self.assertEqual(new_names, table.get_schema().get_field_names())\n table = self.t_env.from_pandas(pdf, schema=tuple(new_names))\n self.assertEqual(new_names, table.get_schema().get_field_names())\n\n def test_from_pandas_with_types(self):\n new_types = self.data_type.field_types()\n new_types[0] = DataTypes.BIGINT()\n table = self.t_env.from_pandas(self.pdf, schema=new_types)\n self.assertEqual(new_types, table.get_schema().get_field_data_types())\n table = self.t_env.from_pandas(self.pdf, schema=tuple(new_types))\n self.assertEqual(new_types, table.get_schema().get_field_data_types())\n\n\nclass PandasConversionITTests(PandasConversionTestBase):\n\n def test_from_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type, 5)\n self.assertEqual(self.data_type, table.get_schema().to_row_data_type())\n\n table = table.filter(table.f2 < 2)\n table_sink = source_sink_utils.TestAppendSink(\n self.data_type.field_names(),\n self.data_type.field_types())\n self.t_env.register_table_sink(\"Results\", table_sink)\n table.execute_insert(\"Results\").wait()\n actual = source_sink_utils.results()\n self.assert_equals(actual,\n [\"+I[1, 1, 1, 1, true, 1.1, 1.2, hello, [97, 97, 97], \"\n \"1000000000000000000.010000000000000000, 2014-09-13, 01:00:01, \"\n \"1970-01-01 00:00:00.123, [hello, 中文], +I[1, hello, \"\n \"1970-01-01 00:00:00.123, [1, 2]]]\"])\n\n def test_to_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n result_pdf = table.to_pandas()\n result_pdf.index = self.pdf.index\n self.assertEqual(2, len(result_pdf))\n assert_frame_equal(self.pdf, result_pdf)\n\n def test_empty_to_pandas(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n pdf = table.filter(table.f1 < 0).to_pandas()\n self.assertTrue(pdf.empty)\n\n def test_to_pandas_for_retract_table(self):\n table = self.t_env.from_pandas(self.pdf, self.data_type)\n result_pdf = table.group_by(table.f1).select(table.f2.max.alias('f2')).to_pandas()\n import pandas as pd\n import numpy as np\n assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int16([2])}))\n\n result_pdf = table.group_by(\"f2\").select(\"max(f1) as f2\").to_pandas()\n assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int8([1, 1])}))\n\n\nclass StreamPandasConversionTests(PandasConversionITTests,\n PyFlinkOldStreamTableTestCase):\n pass\n\n\nclass BlinkBatchPandasConversionTests(PandasConversionTests,\n PandasConversionITTests,\n PyFlinkBlinkBatchTableTestCase):\n pass\n\n\nclass BlinkStreamPandasConversionTests(PandasConversionITTests,\n PyFlinkBlinkStreamTableTestCase):\n def test_to_pandas_with_event_time(self):\n self.t_env.get_config().get_configuration().set_string(\"parallelism.default\", \"1\")\n # create source file path\n import tempfile\n import os\n tmp_dir = tempfile.gettempdir()\n data = [\n '2018-03-11 03:10:00',\n '2018-03-11 03:10:00',\n '2018-03-11 03:10:00',\n '2018-03-11 03:40:00',\n '2018-03-11 04:20:00',\n '2018-03-11 03:30:00'\n ]\n source_path = tmp_dir + '/test_to_pandas_with_event_time.csv'\n with open(source_path, 'w') as fd:\n for ele in data:\n fd.write(ele + '\\n')\n\n self.t_env.get_config().get_configuration().set_string(\n \"pipeline.time-characteristic\", \"EventTime\")\n\n source_table = \"\"\"\n create table source_table(\n rowtime TIMESTAMP(3),\n WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE\n ) with(\n 'connector.type' = 'filesystem',\n 'format.type' = 'csv',\n 'connector.path' = '%s',\n 'format.ignore-first-line' = 'false',\n 'format.field-delimiter' = ','\n )\n \"\"\" % source_path\n self.t_env.execute_sql(source_table)\n t = self.t_env.from_path(\"source_table\")\n result_pdf = t.to_pandas()\n import pandas as pd\n os.remove(source_path)\n assert_frame_equal(result_pdf, pd.DataFrame(\n data={\"rowtime\": [\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 10),\n datetime.datetime(2018, 3, 11, 3, 40),\n datetime.datetime(2018, 3, 11, 4, 20),\n datetime.datetime(2018, 3, 11, 3, 30),\n ]}))\n" ]
[ [ "pandas.Series" ], [ "numpy.int32", "numpy.int8", "pandas.DataFrame", "numpy.int16", "pandas.util.testing.assert_frame_equal", "numpy.int64", "numpy.float64", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ggoh29/Simplicial-neural-network-benchmark
[ "9a12bcd054251790d85e3971f5473dcffaa5664b", "9a12bcd054251790d85e3971f5473dcffaa5664b" ]
[ "planetoid_dgi_benchmark.py", "orientation_flow_benchmark.py" ]
[ "from Planetoid.PlanetoidDataset import PlanetoidSCDataset\nfrom models import planetoid_GCN, planetoid_GAT, planetoid_SCN, planetoid_SCConv, planetoid_SAN, planetoid_SAT\nimport torch.nn as nn\nimport torch\nfrom Planetoid.DGI import DGI\nfrom Planetoid.logreg import LogReg\nfrom constants import DEVICE\n\n2708, 79\ndataset = 'fake'\ndataset_features_dct = {'Cora' : 1433, 'CiteSeer' : 3703, 'PubMed' : 500, 'fake' : 2708}\ndataset_classes_dct = {'Cora' : 7, 'CiteSeer' : 6, 'PubMed' : 3 , 'fake' : 3}\ninput_size = dataset_features_dct[dataset]\noutput_size = 512\nnb_epochs = 200\ntest_epochs = 50\nlr = 0.001\nl2_coef = 0.0\npatience = 20\n\nnn_mod = planetoid_GCN\n# nn_mod = planetoid_GAT\n# nn_mod = planetoid_SCN\n# nn_mod = planetoid_SCConv\n# nn_mod = planetoid_SAT\n# nn_mod = planetoid_SAN\n\nprocessor_type = nn_mod[0]\nmodel = nn_mod[1]\n\ndgi = DGI(input_size, output_size, model)\noptimiser = torch.optim.Adam(dgi.parameters(), lr=lr, weight_decay=l2_coef)\nb_xent = nn.BCEWithLogitsLoss()\nxent = nn.CrossEntropyLoss()\n\nif __name__ == \"__main__\":\n\n data = PlanetoidSCDataset('./data', dataset, processor_type)\n data_full, b1, b2 = data.get_full()\n\n cnt_wait = 0\n best = 1e9\n best_t = 0\n bl = False\n b1 = b1.to(DEVICE)\n b2 = b2.to(DEVICE)\n for epoch in range(nb_epochs):\n dgi.train()\n optimiser.zero_grad()\n\n nb_nodes = data_full.X0.shape[0]\n lbl_1 = torch.ones(1, nb_nodes)\n lbl_2 = torch.zeros(1, nb_nodes)\n\n lbl = torch.cat((lbl_1, lbl_2), 1).to(DEVICE)\n\n logits = dgi(data_full, b1, b2, processor_type)\n\n loss = b_xent(logits, lbl)\n\n print('Loss:', loss)\n\n if loss < best:\n best = loss\n best_t = epoch\n cnt_wait = 0\n torch.save(dgi.state_dict(), f'./data/{model.__name__}_dgi.pkl')\n if epoch != 0:\n bl = True\n else:\n if bl:\n cnt_wait += 1\n\n if cnt_wait == patience:\n print('Early stopping!')\n break\n\n loss.backward()\n optimiser.step()\n\n print('Loading {}th epoch'.format(best_t))\n dgi.load_state_dict(torch.load(f'./data/{model.__name__}_dgi.pkl'))\n\n embeds, _ = dgi.embed(data_full, b1, b2)\n # embeds = data_full.X0.to(DEVICE)\n # output_size = 79\n # with open(\"./embeddings.py\", 'w') as f:\n # f.write(f'embeddings = {embeds.tolist()}')\n # with open(\"./labels.py\", 'w') as f:\n # f.write(f'labels = {data.get_labels().tolist()}')\n train_embs = data.get_train_embeds(embeds)\n val_embs = data.get_val_embeds(embeds)\n test_embs = data.get_test_embeds(embeds)\n\n train_lbls = data.get_train_labels().to(DEVICE)\n x_unique = train_lbls.unique(sorted=True)\n x_unique_count = torch.stack([(train_lbls == x_u).sum() for x_u in x_unique])\n val_lbls = data.get_val_labels().to(DEVICE)\n test_lbls = data.get_test_labels().to(DEVICE)\n\n tot = torch.zeros(1).to(DEVICE)\n\n accs = []\n\n for _ in range(test_epochs):\n log = LogReg(output_size, dataset_classes_dct[dataset])\n opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)\n log.to(DEVICE)\n\n pat_steps = 0\n best_acc = torch.zeros(1)\n best_acc = best_acc.to(DEVICE)\n\n for _ in range(100):\n log.train()\n opt.zero_grad()\n\n logits = log(train_embs)\n loss = xent(logits, train_lbls)\n\n loss.backward()\n opt.step()\n\n logits = log(test_embs)\n preds = torch.argmax(logits, dim=1)\n acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]\n accs.append(acc * 100)\n print(model.__name__)\n print(acc)\n tot += acc\n\n print('Average accuracy:', tot / test_epochs)\n\n accs = torch.stack(accs)\n print(accs.mean())\n print(accs.std())\n", "import torch\nfrom OrientationFlow.FlowDataset import FlowSCDataset\nfrom models import flow_SAT, flow_SCN, flow_SCConv, flow_SAN\nfrom torch.utils.data import DataLoader\nfrom constants import DEVICE\n\n\ninput_size = 1\noutput_size = 2\nnb_epochs = 100\nlr = 0.001\nbatch_size = 4\n\n# f = torch.nn.functional.relu\nf = torch.nn.Tanh()\n# f = torch.nn.Identity()\n\n# nn_mod = flow_SAT\n# nn_mod = flow_SAN\nnn_mod = flow_SCN\n# nn_mod = flow_SCConv\n\nprocessor_type = nn_mod[0]\nmodel = nn_mod[1]\n\nmodel = model(input_size, input_size, input_size, output_size, f=f).to(DEVICE)\noptimiser = torch.optim.Adam(model.parameters(), lr=lr)\nloss_f = torch.nn.CrossEntropyLoss()\n\n\nif __name__ == \"__main__\":\n data = FlowSCDataset('./data', processor_type)\n train_dataset, test_dataset = data.get_val_train_split()\n\n train_dataset = DataLoader(train_dataset, batch_size=batch_size, collate_fn=processor_type.batch, num_workers=8,\n shuffle=True, pin_memory=True)\n test_dataset = DataLoader(test_dataset, batch_size=batch_size, collate_fn=processor_type.batch, num_workers=8,\n shuffle=True, pin_memory=True)\n\n best_acc = 0\n for j in range(nb_epochs):\n training_acc, i = 0, 0\n model.train()\n for simplicialComplex, train_labels in train_dataset:\n simplicialComplex = processor_type.clean_features(simplicialComplex)\n simplicialComplex = processor_type.repair(simplicialComplex)\n simplicialComplex.to_device()\n train_labels = train_labels.to(DEVICE)\n optimiser.zero_grad()\n prediction = model(simplicialComplex)\n loss = loss_f(prediction, train_labels)\n loss.backward()\n optimiser.step()\n\n train_acc = (torch.argmax(prediction, 1).flatten() == train_labels).type(torch.float).mean().item()\n i += 1\n training_acc += (train_acc - training_acc) / i\n if training_acc > best_acc:\n torch.save(model.state_dict(), f'./data/{model.__class__.__name__}_flow.pkl')\n best_acc = training_acc\n\n print(f\"Training accuracy of {training_acc:.4f} for epoch {j}\")\n\n model.load_state_dict(torch.load(f'./data/{model.__class__.__name__}_flow.pkl'))\n model.eval()\n testing_acc = 0\n i = 0\n with torch.no_grad():\n for simplicialComplex, test_labels in test_dataset:\n simplicialComplex = processor_type.clean_features(simplicialComplex)\n simplicialComplex = processor_type.repair(simplicialComplex)\n simplicialComplex.to_device()\n test_labels = test_labels.to(DEVICE)\n prediction = model(simplicialComplex)\n\n test_acc = (torch.argmax(prediction, 1).flatten() == test_labels).type(torch.float).mean().item()\n i += 1\n testing_acc += (test_acc - testing_acc) / i\n\n print(f\"Test accuracy of {testing_acc:.4f}\")\n\n\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.load", "torch.cat", "torch.sum", "torch.nn.BCEWithLogitsLoss", "torch.stack", "torch.argmax" ], [ "torch.nn.CrossEntropyLoss", "torch.load", "torch.utils.data.DataLoader", "torch.nn.Tanh", "torch.no_grad", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JohnLauFoo/clc_packages_Yu
[ "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991", "259f01d9b5c02154ce258734d519ae8995cd0991" ]
[ "matplotlib-3.4.3/matplotlib-3.4.3/examples/mplot3d/lines3d.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/hat_graph.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/misc/load_converter.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/span_regions.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/stairs_demo.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/userdemo/colormap_normalizations_symlognorm.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/user_interfaces/web_application_server_sgskip.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/timeline.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/ticks_and_spines/spines.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/shapes_and_collections/path_patch.py", "matplotlib-3.4.3/matplotlib-3.4.3/examples/axes_grid1/simple_axes_divider1.py" ]
[ "\"\"\"\n================\nParametric Curve\n================\n\nThis example demonstrates plotting a parametric curve in 3D.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nax = plt.figure().add_subplot(projection='3d')\n\n# Prepare arrays x, y, z\ntheta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\nz = np.linspace(-2, 2, 100)\nr = z**2 + 1\nx = r * np.sin(theta)\ny = r * np.cos(theta)\n\nax.plot(x, y, z, label='parametric curve')\nax.legend()\n\nplt.show()\n", "\"\"\"\n=========\nHat graph\n=========\nThis example shows how to create a `hat graph`_ and how to annotate it with\nlabels.\n\n.. _hat graph: https://doi.org/10.1186/s41235-019-0182-3\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef hat_graph(ax, xlabels, values, group_labels):\n \"\"\"\n Create a hat graph.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n The Axes to plot into.\n xlabels : list of str\n The category names to be displayed on the x-axis.\n values : (M, N) array-like\n The data values.\n Rows are the groups (len(group_labels) == M).\n Columns are the categories (len(xlabels) == N).\n group_labels : list of str\n The group labels displayed in the legend.\n \"\"\"\n\n def label_bars(heights, rects):\n \"\"\"Attach a text label on top of each bar.\"\"\"\n for height, rect in zip(heights, rects):\n ax.annotate(f'{height}',\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 4), # 4 points vertical offset.\n textcoords='offset points',\n ha='center', va='bottom')\n\n values = np.asarray(values)\n x = np.arange(values.shape[1])\n ax.set_xticks(x)\n ax.set_xticklabels(xlabels)\n spacing = 0.3 # spacing between hat groups\n width = (1 - spacing) / values.shape[0]\n heights0 = values[0]\n for i, (heights, group_label) in enumerate(zip(values, group_labels)):\n style = {'fill': False} if i == 0 else {'edgecolor': 'black'}\n rects = ax.bar(x - spacing/2 + i * width, heights - heights0,\n width, bottom=heights0, label=group_label, **style)\n label_bars(heights, rects)\n\n\n# initialise labels and a numpy array make sure you have\n# N labels of N number of values in the array\nxlabels = ['I', 'II', 'III', 'IV', 'V']\nplayerA = np.array([5, 15, 22, 20, 25])\nplayerB = np.array([25, 32, 34, 30, 27])\n\nfig, ax = plt.subplots()\nhat_graph(ax, xlabels, [playerA, playerB], ['Player A', 'Player B'])\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_xlabel('Games')\nax.set_ylabel('Score')\nax.set_ylim(0, 60)\nax.set_title('Scores by number of game and players')\nax.legend()\n\nfig.tight_layout()\nplt.show()\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.axes.Axes.bar` / `matplotlib.pyplot.bar`\n# - `matplotlib.axes.Axes.annotate` / `matplotlib.pyplot.annotate`\n", "\"\"\"\n==============\nLoad converter\n==============\n\nThis example demonstrates passing a custom converter to `numpy.genfromtxt` to\nextract dates from a CSV file.\n\"\"\"\n\nimport dateutil.parser\nfrom matplotlib import cbook\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndatafile = cbook.get_sample_data('msft.csv', asfileobj=False)\nprint('loading', datafile)\n\ndata = np.genfromtxt(\n datafile, delimiter=',', names=True,\n dtype=None, converters={0: dateutil.parser.parse})\n\nfig, ax = plt.subplots()\nax.plot(data['Date'], data['High'], '-')\nfig.autofmt_xdate()\nplt.show()\n", "\"\"\"\n================\nUsing span_where\n================\n\nIllustrate some helper functions for shading regions where a logical\nmask is True.\n\nSee `matplotlib.collections.BrokenBarHCollection.span_where`.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.collections as collections\n\n\nt = np.arange(0.0, 2, 0.01)\ns1 = np.sin(2*np.pi*t)\ns2 = 1.2*np.sin(4*np.pi*t)\n\n\nfig, ax = plt.subplots()\nax.set_title('using span_where')\nax.plot(t, s1, color='black')\nax.axhline(0, color='black', lw=2)\n\ncollection = collections.BrokenBarHCollection.span_where(\n t, ymin=0, ymax=1, where=s1 > 0, facecolor='green', alpha=0.5)\nax.add_collection(collection)\n\ncollection = collections.BrokenBarHCollection.span_where(\n t, ymin=-1, ymax=0, where=s1 < 0, facecolor='red', alpha=0.5)\nax.add_collection(collection)\n\n\nplt.show()\n\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.collections.BrokenBarHCollection`\n# - `matplotlib.collections.BrokenBarHCollection.span_where`\n# - `matplotlib.axes.Axes.add_collection`\n# - `matplotlib.axes.Axes.axhline`\n", "\"\"\"\n===========\nStairs Demo\n===========\n\nThis example demonstrates the use of `~.matplotlib.pyplot.stairs` for stepwise\nconstant functions. A common use case is histogram and histogram-like data\nvisualization.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import StepPatch\n\nnp.random.seed(0)\nh, edges = np.histogram(np.random.normal(5, 3, 5000),\n bins=np.linspace(0, 10, 20))\n\nfig, axs = plt.subplots(3, 1, figsize=(7, 15))\naxs[0].stairs(h, edges, label='Simple histogram')\naxs[0].stairs(h, edges + 5, baseline=50, label='Modified baseline')\naxs[0].stairs(h, edges + 10, baseline=None, label='No edges')\naxs[0].set_title(\"Step Histograms\")\n\naxs[1].stairs(np.arange(1, 6, 1), fill=True,\n label='Filled histogram\\nw/ automatic edges')\naxs[1].stairs(np.arange(1, 6, 1)*0.3, np.arange(2, 8, 1),\n orientation='horizontal', hatch='//',\n label='Hatched histogram\\nw/ horizontal orientation')\naxs[1].set_title(\"Filled histogram\")\n\npatch = StepPatch(values=[1, 2, 3, 2, 1],\n edges=range(1, 7),\n label=('Patch derived underlying object\\n'\n 'with default edge/facecolor behaviour'))\naxs[2].add_patch(patch)\naxs[2].set_xlim(0, 7)\naxs[2].set_ylim(-1, 5)\naxs[2].set_title(\"StepPatch artist\")\n\nfor ax in axs:\n ax.legend()\nplt.show()\n\n#############################################################################\n# *baseline* can take an array to allow for stacked histogram plots\nA = [[0, 0, 0],\n [1, 2, 3],\n [2, 4, 6],\n [3, 6, 9]]\n\nfor i in range(len(A) - 1):\n plt.stairs(A[i+1], baseline=A[i], fill=True)\n\n#############################################################################\n# Comparison of `.pyplot.step` and `.pyplot.stairs`\n# -------------------------------------------------\n#\n# `.pyplot.step` defines the positions of the steps as single values. The steps\n# extend left/right/both ways from these reference values depending on the\n# parameter *where*. The number of *x* and *y* values is the same.\n#\n# In contrast, `.pyplot.stairs` defines the positions of the steps via their\n# bounds *edges*, which is one element longer than the step values.\n\nbins = np.arange(14)\ncenters = bins[:-1] + np.diff(bins) / 2\ny = np.sin(centers / 2)\n\nplt.step(bins[:-1], y, where='post', label='step(where=\"post\")')\nplt.plot(bins[:-1], y, 'o--', color='grey', alpha=0.3)\n\nplt.stairs(y - 1, bins, baseline=None, label='stairs()')\nplt.plot(centers, y - 1, 'o--', color='grey', alpha=0.3)\nplt.plot(np.repeat(bins, 2), np.hstack([y[0], np.repeat(y, 2), y[-1]]) - 1,\n 'o', color='red', alpha=0.2)\n\nplt.legend()\nplt.title('step() vs. stairs()')\nplt.show()\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.axes.Axes.stairs` / `matplotlib.pyplot.stairs`\n# - `matplotlib.patches.StepPatch`\n", "\"\"\"\n==================================\nColormap Normalizations Symlognorm\n==================================\n\nDemonstration of using norm to map colormaps onto data in non-linear ways.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\n\"\"\"\nSymLogNorm: two humps, one negative and one positive, The positive\nwith 5-times the amplitude. Linearly, you cannot see detail in the\nnegative hump. Here we logarithmically scale the positive and\nnegative data separately.\n\nNote that colorbar labels do not come out looking very good.\n\"\"\"\n\nN = 100\nX, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]\nZ1 = np.exp(-X**2 - Y**2)\nZ2 = np.exp(-(X - 1)**2 - (Y - 1)**2)\nZ = (Z1 - Z2) * 2\n\nfig, ax = plt.subplots(2, 1)\n\npcm = ax[0].pcolormesh(X, Y, Z,\n norm=colors.SymLogNorm(linthresh=0.03, linscale=0.03,\n vmin=-1.0, vmax=1.0, base=10),\n cmap='RdBu_r', shading='nearest')\nfig.colorbar(pcm, ax=ax[0], extend='both')\n\npcm = ax[1].pcolormesh(X, Y, Z, cmap='RdBu_r', vmin=-np.max(Z),\n shading='nearest')\nfig.colorbar(pcm, ax=ax[1], extend='both')\n\nplt.show()\n", "\"\"\"\n=============================================\nEmbedding in a web application server (Flask)\n=============================================\n\nWhen using Matplotlib in a web server it is strongly recommended to not use\npyplot (pyplot maintains references to the opened figures to make\n`~.matplotlib.pyplot.show` work, but this will cause memory leaks unless the\nfigures are properly closed).\n\nSince Matplotlib 3.1, one can directly create figures using the `.Figure`\nconstructor and save them to in-memory buffers. In older versions, it was\nnecessary to explicitly instantiate an Agg canvas (see e.g.\n:doc:`/gallery/user_interfaces/canvasagg`).\n\nThe following example uses Flask_, but other frameworks work similarly:\n\n.. _Flask: https://flask.palletsprojects.com\n\n\"\"\"\n\nimport base64\nfrom io import BytesIO\n\nfrom flask import Flask\nfrom matplotlib.figure import Figure\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef hello():\n # Generate the figure **without using pyplot**.\n fig = Figure()\n ax = fig.subplots()\n ax.plot([1, 2])\n # Save it to a temporary buffer.\n buf = BytesIO()\n fig.savefig(buf, format=\"png\")\n # Embed the result in the html output.\n data = base64.b64encode(buf.getbuffer()).decode(\"ascii\")\n return f\"<img src='data:image/png;base64,{data}'/>\"\n\n#############################################################################\n#\n# Since the above code is a Flask application, it should be run using the\n# `flask command-line tool <https://flask.palletsprojects.com/en/master/cli/>`_\n# Assuming that the working directory contains this script:\n#\n# Unix-like systems\n#\n# .. code-block:: console\n#\n# FLASK_APP=web_application_server_sgskip flask run\n#\n# Windows\n#\n# .. code-block:: console\n#\n# set FLASK_APP=web_application_server_sgskip\n# flask run\n#\n#\n# Clickable images for HTML\n# -------------------------\n#\n# Andrew Dalke of `Dalke Scientific <http://www.dalkescientific.com>`_\n# has written a nice `article\n# <http://www.dalkescientific.com/writings/diary/archive/2005/04/24/interactive_html.html>`_\n# on how to make html click maps with Matplotlib agg PNGs. We would\n# also like to add this functionality to SVG. If you are interested in\n# contributing to these efforts that would be great.\n", "\"\"\"\n===============================================\nCreating a timeline with lines, dates, and text\n===============================================\n\nHow to create a simple timeline using Matplotlib release dates.\n\nTimelines can be created with a collection of dates and text. In this example,\nwe show how to create a simple timeline using the dates for recent releases\nof Matplotlib. First, we'll pull the data from GitHub.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.dates as mdates\nfrom datetime import datetime\n\ntry:\n # Try to fetch a list of Matplotlib releases and their dates\n # from https://api.github.com/repos/matplotlib/matplotlib/releases\n import urllib.request\n import json\n\n url = 'https://api.github.com/repos/matplotlib/matplotlib/releases'\n url += '?per_page=100'\n data = json.loads(urllib.request.urlopen(url, timeout=.4).read().decode())\n\n dates = []\n names = []\n for item in data:\n if 'rc' not in item['tag_name'] and 'b' not in item['tag_name']:\n dates.append(item['published_at'].split(\"T\")[0])\n names.append(item['tag_name'])\n # Convert date strings (e.g. 2014-10-18) to datetime\n dates = [datetime.strptime(d, \"%Y-%m-%d\") for d in dates]\n\nexcept Exception:\n # In case the above fails, e.g. because of missing internet connection\n # use the following lists as fallback.\n names = ['v2.2.4', 'v3.0.3', 'v3.0.2', 'v3.0.1', 'v3.0.0', 'v2.2.3',\n 'v2.2.2', 'v2.2.1', 'v2.2.0', 'v2.1.2', 'v2.1.1', 'v2.1.0',\n 'v2.0.2', 'v2.0.1', 'v2.0.0', 'v1.5.3', 'v1.5.2', 'v1.5.1',\n 'v1.5.0', 'v1.4.3', 'v1.4.2', 'v1.4.1', 'v1.4.0']\n\n dates = ['2019-02-26', '2019-02-26', '2018-11-10', '2018-11-10',\n '2018-09-18', '2018-08-10', '2018-03-17', '2018-03-16',\n '2018-03-06', '2018-01-18', '2017-12-10', '2017-10-07',\n '2017-05-10', '2017-05-02', '2017-01-17', '2016-09-09',\n '2016-07-03', '2016-01-10', '2015-10-29', '2015-02-16',\n '2014-10-26', '2014-10-18', '2014-08-26']\n\n # Convert date strings (e.g. 2014-10-18) to datetime\n dates = [datetime.strptime(d, \"%Y-%m-%d\") for d in dates]\n\n##############################################################################\n# Next, we'll create a stem plot with some variation in levels as to\n# distinguish even close-by events. We add markers on the baseline for visual\n# emphasis on the one-dimensional nature of the time line.\n#\n# For each event, we add a text label via `~.Axes.annotate`, which is offset\n# in units of points from the tip of the event line.\n#\n# Note that Matplotlib will automatically plot datetime inputs.\n\n\n# Choose some nice levels\nlevels = np.tile([-5, 5, -3, 3, -1, 1],\n int(np.ceil(len(dates)/6)))[:len(dates)]\n\n# Create figure and plot a stem plot with the date\nfig, ax = plt.subplots(figsize=(8.8, 4), constrained_layout=True)\nax.set(title=\"Matplotlib release dates\")\n\nax.vlines(dates, 0, levels, color=\"tab:red\") # The vertical stems.\nax.plot(dates, np.zeros_like(dates), \"-o\",\n color=\"k\", markerfacecolor=\"w\") # Baseline and markers on it.\n\n# annotate lines\nfor d, l, r in zip(dates, levels, names):\n ax.annotate(r, xy=(d, l),\n xytext=(-3, np.sign(l)*3), textcoords=\"offset points\",\n horizontalalignment=\"right\",\n verticalalignment=\"bottom\" if l > 0 else \"top\")\n\n# format xaxis with 4 month intervals\nax.xaxis.set_major_locator(mdates.MonthLocator(interval=4))\nax.xaxis.set_major_formatter(mdates.DateFormatter(\"%b %Y\"))\nplt.setp(ax.get_xticklabels(), rotation=30, ha=\"right\")\n\n# remove y axis and spines\nax.yaxis.set_visible(False)\nax.spines[[\"left\", \"top\", \"right\"]].set_visible(False)\n\nax.margins(y=0.1)\nplt.show()\n\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.axes.Axes.annotate`\n# - `matplotlib.axes.Axes.vlines`\n# - `matplotlib.axis.Axis.set_major_locator`\n# - `matplotlib.axis.Axis.set_major_formatter`\n# - `matplotlib.dates.MonthLocator`\n# - `matplotlib.dates.DateFormatter`\n", "\"\"\"\n======\nSpines\n======\n\nThis demo compares:\n\n- normal axes, with spines on all four sides;\n- an axes with spines only on the left and bottom;\n- an axes using custom bounds to limit the extent of the spine.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nx = np.linspace(0, 2 * np.pi, 100)\ny = 2 * np.sin(x)\n\n# Constrained layout makes sure the labels don't overlap the axes.\nfig, (ax0, ax1, ax2) = plt.subplots(nrows=3, constrained_layout=True)\n\nax0.plot(x, y)\nax0.set_title('normal spines')\n\nax1.plot(x, y)\nax1.set_title('bottom-left spines')\n\n# Hide the right and top spines\nax1.spines.right.set_visible(False)\nax1.spines.top.set_visible(False)\n# Only show ticks on the left and bottom spines\nax1.yaxis.set_ticks_position('left')\nax1.xaxis.set_ticks_position('bottom')\n\nax2.plot(x, y)\n\n# Only draw spine between the y-ticks\nax2.spines.left.set_bounds(-1, 1)\n# Hide the right and top spines\nax2.spines.right.set_visible(False)\nax2.spines.top.set_visible(False)\n# Only show ticks on the left and bottom spines\nax2.yaxis.set_ticks_position('left')\nax2.xaxis.set_ticks_position('bottom')\n\nplt.show()\n", "r\"\"\"\n================\nPathPatch object\n================\n\nThis example shows how to create `~.path.Path` and `~.patches.PathPatch`\nobjects through Matplotlib's API.\n\"\"\"\n\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\n\nPath = mpath.Path\npath_data = [\n (Path.MOVETO, (1.58, -2.57)),\n (Path.CURVE4, (0.35, -1.1)),\n (Path.CURVE4, (-1.75, 2.0)),\n (Path.CURVE4, (0.375, 2.0)),\n (Path.LINETO, (0.85, 1.15)),\n (Path.CURVE4, (2.2, 3.2)),\n (Path.CURVE4, (3, 0.05)),\n (Path.CURVE4, (2.0, -0.5)),\n (Path.CLOSEPOLY, (1.58, -2.57)),\n ]\ncodes, verts = zip(*path_data)\npath = mpath.Path(verts, codes)\npatch = mpatches.PathPatch(path, facecolor='r', alpha=0.5)\nax.add_patch(patch)\n\n# plot control points and connecting lines\nx, y = zip(*path.vertices)\nline, = ax.plot(x, y, 'go-')\n\nax.grid()\nax.axis('equal')\nplt.show()\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.path`\n# - `matplotlib.path.Path`\n# - `matplotlib.patches`\n# - `matplotlib.patches.PathPatch`\n# - `matplotlib.axes.Axes.add_patch`\n", "\"\"\"\n=====================\nSimple Axes Divider 1\n=====================\n\n\"\"\"\n\nfrom mpl_toolkits.axes_grid1 import Size, Divider\nimport matplotlib.pyplot as plt\n\n\n##############################################################################\n# Fixed axes sizes; fixed paddings.\n\nfig = plt.figure(figsize=(6, 6))\n\n# Sizes are in inches.\nhoriz = [Size.Fixed(1.), Size.Fixed(.5), Size.Fixed(1.5), Size.Fixed(.5)]\nvert = [Size.Fixed(1.5), Size.Fixed(.5), Size.Fixed(1.)]\n\nrect = (0.1, 0.1, 0.8, 0.8)\n# Divide the axes rectangle into a grid with sizes specified by horiz * vert.\ndivider = Divider(fig, rect, horiz, vert, aspect=False)\n\n# The rect parameter will actually be ignored and overridden by axes_locator.\nax1 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=0, ny=0))\nax2 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=0, ny=2))\nax3 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=2, ny=2))\nax4 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=2, nx1=4, ny=0))\n\nfor ax in fig.axes:\n ax.tick_params(labelbottom=False, labelleft=False)\n\n##############################################################################\n# Axes sizes that scale with the figure size; fixed paddings.\n\nfig = plt.figure(figsize=(6, 6))\n\nhoriz = [Size.Scaled(1.5), Size.Fixed(.5), Size.Scaled(1.), Size.Scaled(.5)]\nvert = [Size.Scaled(1.), Size.Fixed(.5), Size.Scaled(1.5)]\n\nrect = (0.1, 0.1, 0.8, 0.8)\n# Divide the axes rectangle into a grid with sizes specified by horiz * vert.\ndivider = Divider(fig, rect, horiz, vert, aspect=False)\n\n# The rect parameter will actually be ignored and overridden by axes_locator.\nax1 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=0, ny=0))\nax2 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=0, ny=2))\nax3 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=2, ny=2))\nax4 = fig.add_axes(rect, axes_locator=divider.new_locator(nx=2, nx1=4, ny=0))\n\nfor ax in fig.axes:\n ax.tick_params(labelbottom=False, labelleft=False)\n\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.cos", "numpy.sin", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.asarray", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.array", "matplotlib.pyplot.show" ], [ "matplotlib.cbook.get_sample_data", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.genfromtxt" ], [ "numpy.arange", "matplotlib.pyplot.subplots", "numpy.sin", "matplotlib.collections.BrokenBarHCollection.span_where", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.legend", "numpy.random.seed", "matplotlib.pyplot.title", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.step", "matplotlib.pyplot.subplots", "numpy.sin", "matplotlib.pyplot.plot", "numpy.random.normal", "numpy.diff", "matplotlib.pyplot.stairs", "numpy.repeat", "matplotlib.pyplot.show" ], [ "matplotlib.colors.SymLogNorm", "matplotlib.pyplot.subplots", "numpy.max", "numpy.exp", "matplotlib.pyplot.show" ], [ "matplotlib.figure.Figure" ], [ "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots", "numpy.sign", "numpy.zeros_like", "matplotlib.pyplot.show", "matplotlib.dates.MonthLocator" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.linspace", "numpy.sin" ], [ "matplotlib.pyplot.show", "matplotlib.path.Path", "matplotlib.pyplot.subplots", "matplotlib.patches.PathPatch" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AhmetCanSolak/aydin
[ "e8bc81ee88c96e0f34986df30a63c96468a45f70", "e8bc81ee88c96e0f34986df30a63c96468a45f70", "e8bc81ee88c96e0f34986df30a63c96468a45f70", "e8bc81ee88c96e0f34986df30a63c96468a45f70" ]
[ "aydin/it/demo/n2s/nn/2D_generic.py", "aydin/it/demo/n2s/cnn/2D_camera_small.py", "aydin/analysis/camera_simulation.py", "aydin/util/crop/super_fast_rep_crop.py" ]
[ "# flake8: noqa\nimport os\nimport time\n\nimport numpy\nimport numpy as np\nfrom skimage.data import camera\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom skimage.metrics import structural_similarity as ssim\n\nfrom aydin.features.standard_features import StandardFeatureGenerator\nfrom aydin.io.datasets import newyork, pollen, normalise, add_noise, lizard, characters\nfrom aydin.it.fgr import ImageTranslatorFGR\nfrom aydin.regression.perceptron import PerceptronRegressor\n\n\"\"\"\n Demo for self-supervised denoising using camera image with synthetic noise\n\"\"\"\n\n\ndef demo(image, name):\n image = normalise(image.astype(np.float32))\n noisy = add_noise(image)\n # noisy=image\n\n start_time = time.time()\n\n generator = StandardFeatureGenerator()\n regressor = PerceptronRegressor()\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy)\n\n elapsedtime = time.time() - start_time\n print(f\"time elapsed: {elapsedtime} s\")\n\n start = time.time()\n denoised = it.translate(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\"denoised:\", psnr_denoised, ssim_denoised)\n\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=(2.7 * 5, 5), dpi=300)\n plt.subplot(1, 3, 1)\n plt.imshow(normalise(noisy), cmap='gray')\n plt.axis('off')\n plt.title(f'Noisy \\nPSNR: {psnr_noisy:.3f}, SSIM: {ssim_noisy:.3f}')\n plt.subplot(1, 3, 2)\n plt.imshow(normalise(denoised), cmap='gray')\n plt.axis('off')\n plt.title(f'Denoised \\nPSNR: {psnr_denoised:.3f}, SSIM: {ssim_denoised:.3f}')\n plt.subplot(1, 3, 3)\n plt.imshow(normalise(image), cmap='gray')\n plt.axis('off')\n plt.title('Original')\n plt.subplots_adjust(left=0.01, right=0.99, top=0.95, bottom=0.01, hspace=0.1)\n os.makedirs(\"../../../demo_results\", exist_ok=True)\n plt.savefig(f'../../demo_results/n2s_nn_2D_{name}.png')\n\n plt.clf()\n plt.plot(regressor.loss_history[0]['training'], 'r')\n plt.plot(regressor.loss_history[0]['validation'], 'b')\n plt.legend(['training', 'validation'])\n plt.show()\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(normalise(image), name='image')\n viewer.add_image(normalise(noisy), name='noisy')\n viewer.add_image(normalise(denoised), name='denoised')\n\n\nif __name__ == \"__main__\":\n camera_image = camera()\n demo(camera_image, \"camera\")\n lizard_image = lizard()\n demo(lizard_image, \"lizard\")\n pollen_image = pollen()\n demo(pollen_image, \"pollen\")\n newyork_image = newyork()\n demo(newyork_image, \"newyork\")\n characters_image = characters()\n demo(characters_image, \"characters\")\n", "# flake8: noqa\nimport time\n\nimport numpy\nfrom skimage.data import camera\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom skimage.metrics import structural_similarity as ssim\n\nfrom aydin.io.datasets import normalise, add_noise\nfrom aydin.it.cnn import ImageTranslatorCNN\n\n\ndef demo(image, max_epochs=4, image_width=200):\n \"\"\"\n Demo for self-supervised denoising using camera image with synthetic noise\n \"\"\"\n\n image = normalise(image)\n H0, W0 = (numpy.array(image.shape) - image_width) // 2\n image = image[H0 : H0 + image_width, W0 : W0 + image_width]\n noisy = add_noise(image)\n\n # CNN based Image translation:\n # input_dim only includes H, W, C; number of images is not included\n it = ImageTranslatorCNN(\n training_architecture='random',\n nb_unet_levels=3,\n batch_norm=None, # 'instance',\n max_epochs=max_epochs,\n )\n\n start = time.time()\n # total_num_patches decides how many tiling batches to train.\n it.train(noisy, noisy)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n # in case of batching we have to do this:\n start = time.time()\n denoised_inf = it.translate(noisy, tile_size=image_width)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy.reshape(image.shape), 0, 1)\n denoised_inf = numpy.clip(denoised_inf, 0, 1)\n print(\"noisy :\", psnr(image, noisy), ssim(noisy, image))\n print(\"denoised_inf:\", psnr(image, denoised_inf), ssim(denoised_inf, image))\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(normalise(image), name='image')\n viewer.add_image(normalise(noisy), name='noisy')\n viewer.add_image(normalise(denoised_inf), name='denoised_inf')\n\n\nif __name__ == \"__main__\":\n camera_image = camera()\n demo(camera_image)\n", "# Function to add camera noise\nfrom typing import Optional\n\nimport numpy\nfrom numpy.random import RandomState\n\n\n# from numpy.typing import ArrayLike\n\n\ndef simulate_camera_image(\n photons_per_second,\n exposure_time_s: float = 0.20,\n quantum_efficiency: float = 0.82,\n gain: float = 2.22,\n gain_sigma: float = 0.02,\n gain_column_sigma: float = 0.01,\n offset_mean: float = 1.63,\n offset_sigma: float = 1,\n dark_current: float = 0.04,\n dark_current_sigma: float = 0.01,\n dark_current_column_sigma: float = 0.01,\n min_exposure_dark_current: float = 0.001,\n num_hot_pixels: int = 8,\n num_cold_pixels: int = 8,\n probability_cosmic_ray: float = 1e-6,\n bitdepth: int = 12,\n baseline: int = 100,\n shot_rnd: Optional[RandomState] = None,\n camera_rnd: Optional[RandomState] = None,\n dtype=numpy.int32,\n):\n \"\"\"\n Realistic noise simulation for scientific cameras.\n Adapted from Kyle Douglas blog: http://kmdouglass.github.io/posts/modeling-noise-for-image-simulations/\n With additional ideas from: https://mwcraig.github.io/ccd-as-book/01-03-Construction-of-an-artificial-but-realistic-image.html\n\n\n Parameters\n ----------\n photons_per_second :\n Image representing the number of photons received on the camera per pixel per second\n exposure_time_s :\n Exposure time in seconds\n quantum_efficiency :\n Quantum efficiency - i.e. conversion factor between photons and electrons\n gain :\n Conversion factor between electrons and Analog-Digital-Units (ADU)\n gain_sigma :\n Unfortunately, not all gains are identical across the camera pixels,\n This parameter controls the spread of the gain.\n gain_column_sigma :\n And often each column of the detector has its own electronics that induce another source of column-dependent noise.\n This parameter controls the additional spread of the gain per column.\n offset_mean :\n Pixel amplification offset noise mean value.\n offset_sigma :\n Pixel amplification offset noise sigma value.\n dark_current :\n Dark current, in electrons per pixel per second, which is the way manufacturers typically\n report it.\n dark_current_sigma :\n Unfortunately, the dark current is not identical for each and every pixel.\n This parameter controls the spread of the dark current.\n dark_current_column_sigma :\n And often each column of the detector has its own electronics that induce another source of column-dependent noise.\n This parameter controls the additional spread of the dark current per column.\n min_exposure_dark_current:\n Minimal exposure for the purpose of dark photons. The effects of the dark current do not completely vanish for very short exposures...\n num_hot_pixels:\n Number of hot pixels.\n num_cold_pixels:\n Number of cold pixels.\n probability_cosmic_ray:\n Probability per pixel per second that a cosmic ray will hit a camera pixel.\n bitdepth :\n Bit depth of each pixel fo the camera\n baseline :\n Baseline value for camera\n shot_rnd :\n Random state for each image (time dependent)\n camera_rnd :\n Random state for each camera (time indedependent, camera instance dependent)\n dtype :\n Integral dtype to return image in\n\n Returns\n -------\n\n \"\"\"\n\n if shot_rnd is None:\n shot_rnd = numpy.random.RandomState(seed=None)\n\n if camera_rnd is None:\n camera_rnd = numpy.random.RandomState(seed=42)\n\n # Gain image:\n gain_image = gain * numpy.ones_like(photons_per_second, dtype=numpy.float32)\n\n # Unfortunately the gain is a bit different for each pixel:\n if gain_sigma > 0:\n gain_image += _normal(\n camera_rnd, scale=gain_sigma, size=photons_per_second.shape\n )\n\n # And often each column of the detector has its own electronics that induce another source of column-dependent noise:\n if gain_column_sigma > 0:\n gain_image += _normal(\n camera_rnd, scale=gain_column_sigma, size=photons_per_second.shape[1:]\n )[numpy.newaxis, :]\n\n # Clip gain:\n gain_image = numpy.clip(gain_image, a_min=0, a_max=None, out=gain_image)\n\n # Readout offset noise:\n offset_image = offset_mean + _normal(\n shot_rnd, scale=offset_sigma, size=photons_per_second.shape\n )\n\n # Dark current image:\n dark_current_image = dark_current * numpy.ones_like(\n photons_per_second, dtype=numpy.float32\n )\n\n # Unfortunately the dark current is a bit different for each pixel:\n if dark_current_sigma > 0:\n dark_current_image += _normal(\n camera_rnd, scale=dark_current_sigma, size=photons_per_second.shape\n )\n\n # And often each column of the detector has its own electronics that induce another source of column-dependent noise:\n if dark_current_column_sigma > 0:\n dark_current_image += _normal(\n camera_rnd,\n scale=dark_current_column_sigma,\n size=photons_per_second.shape[1:],\n )[numpy.newaxis, :]\n\n # Add shot noise\n photons = _poisson(\n shot_rnd, photons_per_second * exposure_time_s, size=photons_per_second.shape\n )\n\n # Converts from photons to electrons:\n electrons = quantum_efficiency * photons\n\n # Epsilon value for clipping lowgain:\n epsilon = 1e-6\n\n # dark current electrons:\n dark_electrons = _poisson(\n shot_rnd,\n numpy.clip(dark_current_image, a_min=epsilon, a_max=None)\n * max(min_exposure_dark_current, exposure_time_s),\n size=photons.shape,\n )\n\n # Cosmic rays (lol):\n if probability_cosmic_ray > 0:\n num_of_rays = exposure_time_s * probability_cosmic_ray * electrons.size\n effective_num_of_rays = shot_rnd.poisson(num_of_rays)\n\n y_max, x_max = electrons.shape\n ray_x = camera_rnd.randint(0, x_max, size=effective_num_of_rays)\n ray_y = camera_rnd.randint(0, y_max, size=effective_num_of_rays)\n dark_electrons[tuple([ray_y, ray_x])] += int(gain * 16)\n\n # Some pixels are hot:\n if num_hot_pixels > 0:\n y_max, x_max = dark_electrons.shape\n hot_x = camera_rnd.randint(0, x_max, size=num_hot_pixels)\n hot_y = camera_rnd.randint(0, y_max, size=num_hot_pixels)\n dark_electrons[tuple([hot_y, hot_x])] *= min(16, 2 ** (bitdepth - 2))\n\n # Some pixels are cold:\n if num_cold_pixels > 0:\n y_max, x_max = electrons.shape\n cold_x = camera_rnd.randint(0, x_max, size=num_cold_pixels)\n cold_y = camera_rnd.randint(0, y_max, size=num_cold_pixels)\n electrons[tuple([cold_y, cold_x])] /= min(16, 2 ** (bitdepth - 2))\n\n # Add dark current\n all_electrons = dark_electrons + electrons\n\n # max ADU:\n max_adu = numpy.int(2**bitdepth - 1)\n\n # Convert to discrete numbers (ADU):\n adu = (all_electrons * gain_image + offset_image).astype(dtype)\n\n # Add baseline:\n adu += baseline\n\n # Models pixel saturation:\n adu[adu > max_adu] = max_adu\n\n return adu\n\n\ndef _poisson(rnd: RandomState, lam, size):\n return rnd.poisson(lam=lam, size=size)\n\n\ndef _normal(rnd: RandomState, scale, size):\n return rnd.normal(scale=scale, size=size)\n", "from memoization.memoization import cached\nfrom numpy.typing import ArrayLike\nfrom scipy.ndimage import zoom\n\nfrom aydin.util.crop.rep_crop import representative_crop\nfrom aydin.util.log.log import lprint, lsection\n\n\n@cached(ttl=10, max_size=5)\ndef super_fast_representative_crop(\n image: ArrayLike,\n crop_size: int,\n min_length: int = 8,\n search_mode: str = 'systematic',\n granularity_factor: int = 3,\n return_slice: bool = False,\n min_scaling_factor: int = 2,\n *args,\n **kwargs,\n):\n \"\"\"\n\n Parameters\n ----------\n Same parameters as 'representative_crop' with the addition of:\n\n min_scaling_factor: int\n Minimal downscaling factor per axis.\n\n\n Returns\n -------\n Most representative crop, and if return_slice is True the actual slice object too.\n\n \"\"\"\n with lsection(f\"Super fast cropping image of size: {image.shape}\"):\n\n # Compute downscale facto per dimension:\n def _downscale(length):\n return min(max(min_scaling_factor, length // 256), min_length)\n\n downscale_factor = tuple(\n _downscale(s) if s >= min_length else min_length // 2 for s in image.shape\n )\n lprint(f\"Scaling by factors: {downscale_factor}\")\n\n # Compute zoom factor\n zoom_per_axis = tuple(\n 1.0 / d if s > d else 1 for d, s in zip(downscale_factor, image.shape)\n )\n lprint(f\"zoom_per_axis: {zoom_per_axis}\")\n\n # Downsample image:\n with lsection(f\"Downscaling image of shape: {image.shape}...\"):\n image_d = zoom(image, zoom=zoom_per_axis, prefilter=False, order=0)\n\n # Compute overall zoom factor:\n overall_zoom = image_d.size / image.size\n\n # Compute the scaled-down crop_size:\n crop_size = int(crop_size * overall_zoom)\n\n # Delegate cropping:\n _, slice_ = representative_crop(\n image_d,\n crop_size=crop_size,\n search_mode=search_mode,\n granularity_factor=granularity_factor,\n min_length=min_length,\n return_slice=True,\n *args,\n **kwargs,\n )\n\n # Normalise Slice:\n # Upscale slice:\n slice_ = tuple(\n slice(\n 0 if sl.start is None else sl.start,\n s if sl.stop is None else sl.stop,\n 1,\n )\n for sl, s in zip(slice_, image_d.shape)\n )\n\n # Upscale slice:\n slice_ = tuple(\n slice(sl.start * s, sl.stop * s, 1)\n for sl, s in zip(slice_, downscale_factor)\n )\n\n # Clip slice to dimensions of image:\n slice_ = tuple(\n slice(max(sl.start, 0), min(sl.stop, s), 1)\n for sl, s in zip(slice_, image.shape)\n )\n\n # Crop Image:\n crop = image[slice_]\n\n # Returns:\n if return_slice:\n # Return slice if requested:\n return crop, slice_\n else:\n return crop\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.clip", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.array", "numpy.clip" ], [ "numpy.int", "numpy.random.RandomState", "numpy.ones_like", "numpy.clip" ], [ "scipy.ndimage.zoom" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
xhuohai/kendryte-model-compiler
[ "b6ef72b5db83a3b421046150ff3e77843c2be5bb", "b6ef72b5db83a3b421046150ff3e77843c2be5bb" ]
[ "layer_list_to_darknet.py", "h5_converter.py" ]
[ "'''\n * Copyright 2018 Canaan Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n '''\n\nimport tensor_list_to_layer_list\nimport numpy\n\n\ndef gen_config_file(layers):\n ret = []\n for layer in layers:\n assert (isinstance(layer, tensor_list_to_layer_list.LayerBase))\n ret.append('[' + layer.name + ']')\n for k, v in layer.config.items():\n ret.append(str(k) + '=' + str(v))\n ret.append('')\n\n return '\\n'.join(ret)\n\n\ndef gen_weights(layers):\n ret = [numpy.array([0, 2, 0, 0], 'int32').tobytes()] # header\n\n for layer in layers:\n assert (isinstance(layer, tensor_list_to_layer_list.LayerBase))\n if type(layer) in (\n tensor_list_to_layer_list.LayerNet,\n tensor_list_to_layer_list.LayerPool\n ):\n pass\n elif isinstance(layer, tensor_list_to_layer_list.LayerConvolutional) or \\\n isinstance(layer, tensor_list_to_layer_list.LayerDepthwiseConvolutional):\n if str(layer.config['batch_normalize']) != '0':\n gamma = numpy.array(layer.batch_normalize_gamma, 'float32')\n beta = numpy.array(layer.batch_normalize_beta, 'float32')\n bias = numpy.array(layer.batch_normalize_moving_mean, 'float32')\n if layer.bias is not None:\n bias = bias - numpy.array(layer.bias, 'float32')\n variance = numpy.array(layer.batch_normalize_moving_variance, 'float32')\n\n ret.append(beta.tobytes())\n ret.append(gamma.tobytes())\n ret.append(bias.tobytes())\n ret.append(variance.tobytes())\n else:\n bias = numpy.array(layer.bias, 'float32')\n ret.append(bias.tobytes())\n\n weights = numpy.array(layer.weights, 'float32')\n weights_trans = numpy.transpose(weights, [3, 2, 0, 1])\n ret.append(weights_trans.tobytes())\n else:\n print('unknown layer:', layer.name, type(layer))\n\n return b''.join(ret)\n", "# coding=utf-8\r\n'''\r\n * Copyright 2018 Canaan Inc.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n '''\r\n \r\nimport keras.models\r\nimport tensorflow as tf\r\nimport tempfile\r\nfrom keras import backend as K\r\nfrom tensorflow.python.framework import graph_io\r\n\r\n\r\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\r\n \"\"\"\r\n Freezes the state of a session into a prunned computation graph.\r\n\r\n Creates a new computation graph where variable nodes are replaced by\r\n constants taking their current value in the session. The new graph will be\r\n prunned so subgraphs that are not neccesary to compute the requested\r\n outputs are removed.\r\n @param session The TensorFlow session to be frozen.\r\n @param keep_var_names A list of variable names that should not be frozen,\r\n or None to freeze all the variables in the graph.\r\n @param output_names Names of the relevant graph outputs.\r\n @param clear_devices Remove the device directives from the graph for better portability.\r\n @return The frozen graph definition.\r\n \"\"\"\r\n from tensorflow.python.framework.graph_util import convert_variables_to_constants\r\n graph = session.graph\r\n with graph.as_default():\r\n freeze_var_names = None #list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\r\n output_names = output_names or []\r\n # output_names += [v.op.name for v in tf.global_variables()]\r\n input_graph_def = graph.as_graph_def()\r\n if clear_devices:\r\n for node in input_graph_def.node:\r\n node.device = \"\"\r\n frozen_graph = convert_variables_to_constants(session, input_graph_def,\r\n output_names, freeze_var_names)\r\n return frozen_graph\r\n\r\n\r\ndef convert(h5_in):\r\n pb_out = tempfile.mktemp('.pb')\r\n *pb_path_list, pb_name = pb_out.split('/')\r\n pb_path = '/'.join(pb_path_list)\r\n\r\n K.set_learning_phase(0)\r\n net_model = keras.models.load_model(h5_in, custom_objects={'tf': tf})\r\n\r\n frozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])\r\n graph_io.write_graph(frozen_graph, pb_path, pb_name, as_text=False)\r\n tf.reset_default_graph()\r\n return pb_out\r\n" ]
[ [ "numpy.array", "numpy.transpose" ], [ "tensorflow.reset_default_graph", "tensorflow.python.framework.graph_io.write_graph", "tensorflow.python.framework.graph_util.convert_variables_to_constants" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
837477/COMTRIS_AI
[ "2cb49a9a9c5de785d6b1a864abf8d5eeb6db3302" ]
[ "src/comtris.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom pymongo import MongoClient\n\n\nclass Net(nn.Module):\n def __init__(self, D_in, D_out):\n super(Net,self).__init__()\n self.layer_1 = nn.Linear(D_in, D_out*2)\n self.layer_out = nn.Linear(D_out*2, D_out)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.layer_1(x)\n x = self.relu(x)\n x = self.layer_out(x) \n return x\n\n\nclass Comtris():\n def __init__(self):\n self.db = MongoClient(os.environ['COMTRIS_MONGODB_URI'])['COMTRIS']\n self.model = {\n \"CPU\": torch.load(\"./model/CPU\"),\n \"VGA\": torch.load(\"./model/VGA\"),\n \"M/B\": torch.load(\"./model/MB\"),\n \"RAM\": torch.load(\"./model/RAM\"),\n \"SSD\": torch.load(\"./model/SSD\"),\n \"POWER\": torch.load(\"./model/POWER\"),\n }\n for part in self.model:\n self.model[part].eval() \n self.index_dict = {\n \"CPU\": self.db['master_config'].find_one({\"key\": \"CPU_dict\"})['value'],\n \"VGA\": self.db['master_config'].find_one({\"key\": \"VGA_dict\"})['value'],\n \"M/B\": self.db['master_config'].find_one({\"key\": \"M/B_dict\"})['value'],\n \"RAM\": self.db['master_config'].find_one({\"key\": \"RAM_dict\"})['value'],\n \"SSD\": self.db['master_config'].find_one({\"key\": \"SSD_dict\"})['value'],\n \"POWER\": self.db['master_config'].find_one({\"key\": \"POWER_dict\"})['value']\n }\n self.part_needs = self.db['master_config'].find_one({\"key\": \"needs\"})['value']\n self.index = {}\n for part in self.index_dict:\n for p_i in self.index_dict[part][\"part_to_index\"]:\n self.index_dict[part][\"part_to_index\"][p_i] = int(self.index_dict[part][\"part_to_index\"][p_i])\n self.index.update(self.index_dict[part][\"part_to_index\"])\n \n def part(self):\n part = {\n \"CPU\": list(self.index_dict['CPU']['part_to_index'].keys()),\n \"VGA\": list(self.index_dict['VGA']['part_to_index'].keys()),\n \"M/B\": list(self.index_dict['M/B']['part_to_index'].keys()),\n \"RAM\": list(self.index_dict['RAM']['part_to_index'].keys()),\n \"SSD\": list(self.index_dict['SSD']['part_to_index'].keys()),\n \"POWER\": list(self.index_dict['POWER']['part_to_index'].keys())\n }\n return part\n \n def needs(self):\n return self.part_needs\n\n def prediction(self, parts, target):\n # 예측 데이터 개수 확인\n if len(parts) != len(self.part_needs[target]):\n return False\n \n if target not in {\"CPU\", \"VGA\", \"M/B\", \"RAM\", \"SSD\", \"POWER\"}:\n return False\n \n # 예측 데이터 가공\n x = []\n for part in parts:\n x.append(self.index[part])\n x = torch.FloatTensor(x)\n \n # 예측 값 추출\n y = list(self.model[target](x))\n y = y.index(max(y))\n result = self.index_dict[target]['index_to_part'][str(y)]\n \n return result\n\n\nif __name__ == \"__main__\":\n CT = Comtris()\n\n # 순서 매우 중요!!\n # [\"AMD 3100\", \"ASROCK A320M\", \"ASROCK RX570\", \"3200 8G\", \"500GB\", \"600W\"]\n # [CPU, M/B, VGA, RAM, SSD, POWER]\n\n needs = CT.needs()\n part = CT.part()\n # CPU TEST\n '''\n for i in range(5):\n x = []\n for p in part:\n if p not in needs['CPU']:\n continue\n x.append(np.random.choice(part[p]))\n result = CT.prediction(x, \"CPU\")\n print(x)\n print(result)\n print(\"#\" * 100)\n # VGA TEST\n for i in range(5):\n x = []\n for p in part:\n if p not in needs['VGA']:\n continue\n x.append(np.random.choice(part[p]))\n result = CT.prediction(x, \"VGA\")\n print(x)\n print(result)\n print(\"#\" * 100)\n '''\n\n result = CT.prediction([\"GTX1660SUPER ASUS\", \"A320 ASUS\", \"3200 16GB\", \"1TB\", \"600W\"], \"CPU\")\n print(result)\n" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.FloatTensor", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tum-db/partitioned-filters
[ "56c20102715a442cbec9ecb732d41de15b31c828" ]
[ "python/benchmark_plotter/latexify.py" ]
[ "import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator, FuncFormatter\nimport numpy as np\n\n\ndef latexify(fig_width=None, fig_height=None, columns=1):\n \"\"\"Set up matplotlib's RC params for LaTeX plotting.\n Call this before plotting a figure.\n\n Parameters\n ----------\n fig_width : float, optional, inches\n fig_height : float, optional, inches\n columns : {1, 2}\n \"\"\"\n\n # code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples\n\n # Width and max height in inches for IEEE journals taken from\n # computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf\n\n assert (columns in [1, 2])\n\n if fig_width is None:\n fig_width = 3.39 if columns == 1 else 6.9 # width in inches\n\n if fig_height is None:\n golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio\n fig_height = fig_width * golden_mean # height in inches\n\n MAX_HEIGHT_INCHES = 32.0\n if fig_height > MAX_HEIGHT_INCHES:\n print(\"WARNING: fig_height too large:\" + fig_height +\n \"so will reduce to\" + MAX_HEIGHT_INCHES + \"inches.\")\n fig_height = MAX_HEIGHT_INCHES\n\n params = {'backend': 'ps',\n 'pgf.rcfonts': False,\n 'axes.labelsize': 8, # fontsize for x and y labels (was 10)\n 'axes.titlesize': 8,\n 'font.size': 8, # was 10\n 'legend.fontsize': 6, # was 10\n 'legend.handlelength': 1.5,\n 'legend.handletextpad': 0.3,\n 'legend.labelspacing': 0.3, # was 0.1\n 'legend.columnspacing': 0.3,\n 'legend.borderpad': 0.3,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'axes.labelpad': 1,\n 'axes.titlepad': 2,\n 'text.usetex': True,\n 'figure.figsize': [fig_width, fig_height],\n 'font.family': 'serif',\n 'text.latex.preamble': r'\\usepackage{amssymb} \\usepackage{ifsym}'\n }\n\n matplotlib.rcParams.update(params)\n\n\ndef format_axes(ax):\n spine_color = 'black'\n for spine in ['top', 'right']:\n ax.spines[spine].set_visible(False)\n\n for spine in ['left', 'bottom']:\n ax.spines[spine].set_color(spine_color)\n ax.spines[spine].set_linewidth(0.5)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_tick_params(direction='out', color=spine_color)\n\n ax.yaxis.set_minor_locator(AutoMinorLocator(n=2))\n ax.yaxis.grid(True)\n ax.yaxis.grid(b=True, which='minor', linestyle=':')\n ax.tick_params(axis='both', which='major', pad=0.5)\n\n return ax\n\n\ndef barAxes(ax):\n ax.set_axisbelow(True)\n\n\ndef cm2inch(value):\n return value / 2.54\n\n\ndef reorderLegend(ax=None, order=None, unique=False):\n if ax is None: ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) # sort both labels and handles by labels\n if order is not None: # Sort according to a given list (not necessarily complete)\n keys = dict(zip(order, range(len(order))))\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0], np.inf)))\n if unique:\n labels, handles = zip(*unique_everseen(zip(labels, handles), key=labels)) # Keep only the first of each handle\n return handles, labels\n\n\ndef unique_everseen(seq, key=None):\n seen = set()\n seen_add = seen.add\n return [x for x, k in zip(seq, key) if not (k in seen or seen_add(k))]\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.ticker.AutoMinorLocator", "matplotlib.rcParams.update", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
parneetk/PyTorch-Style-Transfer
[ "f38ec4b1cd57cee4304787b054a6e6c9ce3b00ff" ]
[ "experiments/net/mynn.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Hang Zhang\n## ECE Department, Rutgers University\n## Email: [email protected]\n## Copyright (c) 2017\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass MultConst(nn.Module):\n\tdef forward(self, input):\n\t\treturn 255*input\n\n\nclass GramMatrix(nn.Module):\n\tdef forward(self, y):\n\t\t(b, ch, h, w) = y.size()\n\t\tfeatures = y.view(b, ch, w * h)\n\t\tfeatures_t = features.transpose(1, 2)\n\t\tgram = features.bmm(features_t) / (ch * h * w)\n\t\treturn gram\n\t\n\nclass InstanceNormalization(nn.Module):\n\t\"\"\"InstanceNormalization\n\tImproves convergence of neural-style.\n\tref: https://arxiv.org/pdf/1607.08022.pdf\n\t\"\"\"\n\n\tdef __init__(self, dim, eps=1e-5):\n\t\tsuper(InstanceNormalization, self).__init__()\n\t\tself.weight = nn.Parameter(torch.FloatTensor(dim))\n\t\tself.bias = nn.Parameter(torch.FloatTensor(dim))\n\t\tself.eps = eps\n\t\tself._reset_parameters()\n\n\tdef _reset_parameters(self):\n\t\tself.weight.data.uniform_()\n\t\tself.bias.data.zero_()\n\n\tdef forward(self, x):\n\t\tn = x.size(2) * x.size(3)\n\t\tt = x.view(x.size(0), x.size(1), n)\n\t\tmean = torch.mean(t, 2).unsqueeze(2).expand_as(x)\n\t\t# Calculate the biased var. torch.var returns unbiased var\n\t\tvar = torch.var(t, 2).unsqueeze(2).expand_as(x) * ((n - 1) / float(n))\n\t\tscale_broadcast = self.weight.unsqueeze(1).unsqueeze(1).unsqueeze(0)\n\t\tscale_broadcast = scale_broadcast.expand_as(x)\n\t\tshift_broadcast = self.bias.unsqueeze(1).unsqueeze(1).unsqueeze(0)\n\t\tshift_broadcast = shift_broadcast.expand_as(x)\n\t\tout = (x - mean) / torch.sqrt(var + self.eps)\n\t\tout = out * scale_broadcast + shift_broadcast\n\t\treturn out\n\n\nclass Basicblock(nn.Module):\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):\n\t\tsuper(Basicblock, self).__init__()\n\t\tself.downsample = downsample\n\t\tif self.downsample is not None:\n\t\t\tself.residual_layer = nn.Conv2d(inplanes, planes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=stride)\n\t\tconv_block=[]\n\t\tconv_block+=[norm_layer(inplanes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(inplanes, planes, kernel_size=3, stride=stride),\n\t\t\t\t\t\t\t\tnorm_layer(planes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=1),\n\t\t\t\t\t\t\t\tnorm_layer(planes)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\n\tdef forward(self, input):\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.residual_layer(input)\n\t\telse:\n\t\t\tresidual = input\n\t\treturn residual + self.conv_block(input)\n\t\t\t\n\nclass UpBasicblock(nn.Module):\n\t\"\"\" Up-sample residual block (from MSG-Net paper)\n\tEnables passing identity all the way through the generator\n\tref https://arxiv.org/abs/1703.06953\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=2, norm_layer=nn.BatchNorm2d):\n\t\tsuper(UpBasicblock, self).__init__()\n\t\tself.residual_layer = UpsampleConvLayer(inplanes, planes,\n \t\t\t \t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=1, upsample=stride)\n\t\tconv_block=[]\n\t\tconv_block+=[norm_layer(inplanes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tUpsampleConvLayer(inplanes, planes, kernel_size=3, stride=1, upsample=stride),\n\t\t\t\t\t\t\t\tnorm_layer(planes),\n\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\n\tdef forward(self, input):\n\t\treturn self.residual_layer(input) + self.conv_block(input)\n\n\nclass Bottleneck(nn.Module):\n\t\"\"\" Pre-activation residual block\n\tIdentity Mapping in Deep Residual Networks\n\tref https://arxiv.org/abs/1603.05027\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):\n\t\tsuper(Bottleneck, self).__init__()\n\t\tself.expansion = 4\n\t\tself.downsample = downsample\n\t\tif self.downsample is not None:\n\t\t\tself.residual_layer = nn.Conv2d(inplanes, planes * self.expansion,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=stride)\n\t\tconv_block = []\n\t\tconv_block += [norm_layer(inplanes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(inplanes, planes, kernel_size=1, stride=1)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tConvLayer(planes, planes, kernel_size=3, stride=stride)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\t\t\n\tdef forward(self, x):\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.residual_layer(x)\n\t\telse:\n\t\t\tresidual = x\n\t\treturn residual + self.conv_block(x)\n\n\nclass UpBottleneck(nn.Module):\n\t\"\"\" Up-sample residual block (from MSG-Net paper)\n\tEnables passing identity all the way through the generator\n\tref https://arxiv.org/abs/1703.06953\n\t\"\"\"\n\tdef __init__(self, inplanes, planes, stride=2, norm_layer=nn.BatchNorm2d):\n\t\tsuper(UpBottleneck, self).__init__()\n\t\tself.expansion = 4\n\t\tself.residual_layer = UpsampleConvLayer(inplanes, planes * self.expansion,\n \t\t\t \t\t\t\t\t\t\t\t\t\tkernel_size=1, stride=1, upsample=stride)\n\t\tconv_block = []\n\t\tconv_block += [norm_layer(inplanes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(inplanes, planes, kernel_size=1, stride=1)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tUpsampleConvLayer(planes, planes, kernel_size=3, stride=1, upsample=stride)]\n\t\tconv_block += [norm_layer(planes),\n\t\t\t\t\t\t\t\t\tnn.ReLU(inplace=True),\n\t\t\t\t\t\t\t\t\tnn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1)]\n\t\tself.conv_block = nn.Sequential(*conv_block)\n\n\tdef forward(self, x):\n\t\treturn self.residual_layer(x) + self.conv_block(x)\n\n\nclass ConvLayer(torch.nn.Module):\n\tdef __init__(self, in_channels, out_channels, kernel_size, stride):\n\t\tsuper(ConvLayer, self).__init__()\n\t\treflection_padding = int(np.floor(kernel_size / 2))\n\t\tself.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n\t\tself.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n\tdef forward(self, x):\n\t\tout = self.reflection_pad(x)\n\t\tout = self.conv2d(out)\n\t\treturn out\n\nclass UpsampleConvLayer(torch.nn.Module):\n\t\"\"\"UpsampleConvLayer\n\tUpsamples the input and then does a convolution. This method gives better results\n\tcompared to ConvTranspose2d.\n\tref: http://distill.pub/2016/deconv-checkerboard/\n\t\"\"\"\n\n\tdef __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):\n\t\tsuper(UpsampleConvLayer, self).__init__()\n\t\tself.upsample = upsample\n\t\tif upsample:\n\t\t\tself.upsample_layer = torch.nn.UpsamplingNearest2d(scale_factor=upsample)\n\t\tself.reflection_padding = int(np.floor(kernel_size / 2))\n\t\tif self.reflection_padding != 0:\n\t\t\tself.reflection_pad = nn.ReflectionPad2d(self.reflection_padding)\n\t\tself.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n\tdef forward(self, x):\n\t\tif self.upsample:\n\t\t\tx = self.upsample_layer(x)\n\t\tif self.reflection_padding != 0:\n\t\t\tx = self.reflection_pad(x)\n\t\tout = self.conv2d(x)\n\t\treturn out\n\n" ]
[ [ "torch.nn.Sequential", "torch.mean", "torch.nn.ReflectionPad2d", "torch.nn.UpsamplingNearest2d", "torch.sqrt", "torch.nn.Conv2d", "torch.FloatTensor", "numpy.floor", "torch.var", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dennisbappert/sod-using-vit
[ "24ed0692d8eb09adf2f74e69a132f267a4137b68" ]
[ "train.py" ]
[ "import datetime\nimport os\nimport random\nimport time\nimport warnings\n\nimport hydra\nimport torch\nfrom hydra.utils import instantiate\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch.cuda.amp import autocast, GradScaler\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, WeightedRandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom lib.data import ToDeviceFunction, PrefetchLoader\nfrom lib.utils import print_torch_setup, mkdir, save_on_master, MetricLogger, flatten_dict, SmoothedValue, torchvision\nfrom lib.utils.denormalize import denormalize\nfrom lib.utils.smoothing import gaussian_blur\n\n\[email protected](config_path='conf', config_name='config')\ndef main(cfg: DictConfig) -> None:\n if cfg.trainer.print_torch_setup is True:\n print_torch_setup()\n\n if cfg.trainer.seed is not None:\n random.seed(cfg.trainer.seed)\n torch.manual_seed(cfg.trainer.seed)\n torch.backends.cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n assert torch.cuda.is_available(), 'This code requires a GPU to train'\n torch.backends.cudnn.benchmark = True\n assert cfg.trainer.output_dir, 'You need to specify an output directory'\n\n mkdir(cfg.trainer.output_dir)\n experiment_name = time.strftime(\"%Y%m%d-%H%M%S\")\n print(f'The current experiment will be tracked as {experiment_name}')\n output_dir = os.path.join(cfg.trainer.output_dir, experiment_name)\n print(f'Results will be saved in {output_dir}')\n writer = SummaryWriter(output_dir)\n\n # this is just a workaround for now\n # hparams logging to a file and as text into tensorboard\n # it is certainly not perfect... :/\n hparams = flatten_dict(OmegaConf.to_container(cfg, resolve=True))\n hparams_as_str = [str(k) + ' >>> ' + str(v) + '\\n' for k, v in hparams.items()]\n # TODO: this seems to not work properly!\n # writer.add_hparams(hparams, metric_dict={'acc': 1}, run_name=experiment_name)\n with open(os.path.join(output_dir, 'hparams.txt'), 'w', encoding='utf-8') as hparams_file:\n for line in hparams_as_str:\n hparams_file.write(line)\n writer.add_text('hparams', '\\r\\n'.join(hparams_as_str), global_step=0)\n\n device = torch.device(cfg.trainer.device)\n assert device.type == 'cuda', 'Only GPU based training is supported'\n\n dataset = instantiate(cfg.dataset.train)\n\n assert cfg.dataset.val_split is not None, 'Handling a separate validation set is not implemented as of now!'\n train_size = int((1 - cfg.dataset.val_split) * len(dataset))\n val_size = len(dataset) - train_size\n train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])\n\n train_sampler_weights = dataset.make_weights_for_dataset_sampling(train_dataset)\n sampler = WeightedRandomSampler(train_sampler_weights, num_samples=cfg.dataset.train_samples_per_epoch,\n replacement=True)\n train_collate_fn = dataset.get_collate_fn(mode='train', channels_last=cfg.trainer.channels_last)\n train_dataloader = instantiate(cfg.dataloader.train,\n dataset=train_dataset,\n collate_fn=train_collate_fn,\n sampler=sampler)\n\n val_collate_fn = dataset.get_collate_fn(mode='val', channels_last=cfg.trainer.channels_last)\n val_dataloader = instantiate(cfg.dataloader.val,\n dataset=val_dataset,\n collate_fn=val_collate_fn)\n\n # this handler moves a batch to the GPU as uint8, casts it to a float after transferring it\n # and normalizes the images\n to_device_handler = ToDeviceFunction(device=device, mean=cfg.dataset.mean, std=cfg.dataset.std)\n\n # the prefetch loader prefetches the next batch onto the GPU which makes up a couple\n # of percent in the training loop\n train_dataloader = PrefetchLoader(loader=train_dataloader,\n to_device_handler=to_device_handler)\n\n # val_dataloader = PrefetchLoader(loader=val_dataloader,\n # to_device_handler=to_device_handler)\n\n model = instantiate(cfg.models.model,\n device=device\n ).to(device)\n\n if cfg.trainer.channels_last is True:\n model = model.to(memory_format=torch.channels_last)\n\n if cfg.trainer.anomaly_detection is True:\n torch.autograd.set_detect_anomaly(mode=True)\n\n params_to_optimize = [\n {\"params\": [p for p in model.parameters()\n if p.requires_grad]}\n ]\n\n optimizer = instantiate(cfg.optimizer, params_to_optimize)\n\n scaler = GradScaler(enabled=cfg.trainer.amp)\n\n if cfg.trainer.resume is not None:\n if os.path.isfile(cfg.trainer.resume):\n print(\"Trying to load checkpoint '{}'\".format(cfg.trainer.resume))\n\n if cfg.trainer.from_u2net_checkpoint is True:\n checkpoint = torch.load(cfg.trainer.resume, map_location=device)\n model.load_state_dict(checkpoint)\n else:\n checkpoint = torch.load(cfg.trainer.resume, map_location=device)\n model.load_state_dict(checkpoint['model'])\n\n if cfg.trainer.weights_only is False:\n cfg.trainer.start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n scaler.load_state_dict(checkpoint['scaler'])\n\n print(f'Loaded checkpoint {cfg.trainer.resume}. Resuming training at epoch {cfg.trainer.start_epoch}')\n else:\n warnings.warn(f'Checkpoint f{cfg.trainer.resume} not found!')\n\n print(\"Start training...\")\n start_time = time.time()\n\n if cfg.trainer.dry_run is True:\n print(\"Doing dry run, running val on train dataset...\")\n # validate_one_epoch(writer, model, train_dataloader, device, 0, cfg.trainer.print_freq)\n return\n\n for epoch in range(cfg.trainer.start_epoch, cfg.trainer.epochs):\n train_one_epoch(writer, device, model, optimizer, scaler, train_dataloader, epoch, cfg)\n # validate_one_epoch(writer, model, val_dataloader, epoch, cfg)\n\n checkpoint = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scaler': scaler.state_dict(),\n 'epoch': epoch,\n 'cfg': cfg}\n save_on_master(\n checkpoint,\n os.path.join(output_dir, 'model_{}.pth'.format(epoch)))\n save_on_master(\n checkpoint,\n os.path.join(output_dir, 'checkpoint.pth'))\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\ndef create_metric_logger(train, epoch, writer):\n if train:\n prefix = 'train'\n else:\n prefix = 'val'\n\n metric_logger = MetricLogger(epoch=epoch, delimiter=\" \", writer=writer, experiment_prefix=prefix)\n\n if train:\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'), log=False)\n metric_logger.add_meter('samples/s', SmoothedValue(window_size=10, fmt='{value}'), log=True, log_value='median',\n title='samples per second')\n metric_logger.add_meter('loss', SmoothedValue(), log=True, log_value='global_avg',\n title='loss')\n\n return metric_logger\n\n\ndef criterion(aux, y, metadata, device):\n # aux ^= [d0, d1, d2, d3, d4, d5, d6]\n\n def masked_l1_loss(y_hat, y, mask):\n loss = F.l1_loss(y_hat, y, reduction='none')\n loss = (loss * mask.float()).sum()\n non_zero_elements = mask.sum()\n return loss / non_zero_elements\n\n mask = y[:, 0]\n smoothed_mask = gaussian_blur(\n mask.unsqueeze(dim=1), (9, 9), (2.5, 2.5)).squeeze(dim=1)\n unknown_mask = y[:, 1]\n\n l1_mask = torch.ones(mask.shape, device=device)\n l1_details_mask = torch.zeros(mask.shape, device=device)\n\n # i synthesised some detailed masks using pymatting.github.io\n # by synthesising trimaps from segmentation masks and use these\n # in an additional loss to let the model learn the unknown areas\n # between foreground and background. this is not perfect as the generated\n # trimaps and masks are not super accurate, but it seems to go in the right\n # direction.\n detailed_masks = [x['detailed_masks'] for x in metadata]\n for idx, detailed_mask in enumerate(detailed_masks):\n if not detailed_mask:\n l1_mask[idx] = l1_mask[idx] - unknown_mask[idx]\n else:\n l1_details_mask[idx] = unknown_mask[idx]\n\n loss = 0\n for output in aux:\n loss += 2 * masked_l1_loss(output, mask, l1_mask)\n # this loss should give some learning signals to focus on unknown areas\n loss += 3 * masked_l1_loss(output, mask, l1_details_mask)\n # i'm not quite sure if this loss gives the right incentive, the idea\n # is to blur the segmentation mask a bit to reduce background bleeding\n # caused by bad labels, preliminary results seem to be quite ok.\n loss += F.mse_loss(output, smoothed_mask)\n\n aux = {\n 'l1_mask': l1_mask,\n 'l1_detailed_mask': l1_details_mask,\n 'mask': mask,\n 'smoothed_mask': smoothed_mask\n }\n\n return loss, aux\n\n\ndef train_one_epoch(writer, device, model, optimizer, scaler, data_loader, epoch, cfg):\n model.train()\n\n metric_logger = create_metric_logger(train=True, epoch=epoch, writer=writer)\n\n for x, y, metadata in metric_logger.log_every(data_loader, cfg.trainer.print_freq):\n start_time = time.time()\n\n with autocast(enabled=cfg.trainer.amp):\n y_hat, aux_outputs = model(x)\n loss, aux = criterion(aux_outputs, y, metadata, device)\n\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n metric_logger.update(\n loss=loss.item(),\n lr=optimizer.param_groups[0][\"lr\"])\n\n metric_logger.meters['samples/s'].update(x.size(0) / (time.time() - start_time))\n\n if random.random() < .1:\n sample = denormalize(x[:4], mean=cfg.dataset.mean, std=cfg.dataset.std)\n sample_foreground = y_hat[:4].unsqueeze(dim=1).repeat(1,3,1, 1) * sample\n\n writer.add_image(\n f'train-metrics/sample',\n torchvision.utils.make_grid(\n [torchvision.utils.make_grid(sample, nrow=4),\n torchvision.utils.make_grid(sample_foreground),\n torchvision.utils.make_grid(y_hat[:4].unsqueeze(dim=1), nrow=4)], nrow=1),\n metric_logger.global_step)\n\n writer.add_image(\n f'train-metrics/loss insights',\n torchvision.utils.make_grid(\n [torchvision.utils.make_grid(aux['l1_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['l1_detailed_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['smoothed_mask'][:4].unsqueeze(dim=1), nrow=4),\n torchvision.utils.make_grid(aux['mask'][:4].unsqueeze(dim=1), nrow=4)], nrow=1),\n metric_logger.global_step)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.ones", "torch.autograd.set_detect_anomaly", "torch.zeros", "torch.nn.functional.l1_loss", "torch.manual_seed", "torch.load", "torch.utils.data.WeightedRandomSampler", "torch.cuda.amp.autocast", "torch.nn.functional.mse_loss", "torch.utils.data.random_split", "torch.cuda.amp.GradScaler", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
npabon/ProDy
[ "390322d9b7688809f91656bc1cadfdb66cd0a9b3" ]
[ "lib/prody/atomic/atom.py" ]
[ "# -*- coding: utf-8 -*-\n# ProDy: A Python Package for Protein Dynamics Analysis\n# \n# Copyright (C) 2010-2012 Ahmet Bakan\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n\n\"\"\"This module defines classes to handle individual atoms.\"\"\"\n\n__author__ = 'Ahmet Bakan'\n__copyright__ = 'Copyright (C) 2010-2012 Ahmet Bakan'\n\nimport numpy as np\n\nfrom . import flags\nfrom .fields import ATOMIC_FIELDS, READONLY\nfrom .fields import wrapGetMethod, wrapSetMethod\nfrom .pointer import AtomPointer\nfrom .bond import Bond\n\n__all__ = ['Atom']\n\n\nclass Atom(AtomPointer):\n \n \"\"\"A class for handling individual atoms in an :class:`.AtomGroup`.\"\"\"\n \n __slots__ = ['_ag', '_acsi', '_index']\n \n def __init__(self, ag, index, acsi):\n AtomPointer.__init__(self, ag, acsi)\n self._index = int(index)\n \n def __repr__(self):\n\n n_csets = self._ag.numCoordsets()\n if n_csets == 1:\n return '<Atom: {0} from {1} (index {2})>'.format(\n self.getName(), self._ag.getTitle(), self._index)\n elif n_csets > 1:\n return ('<Atom: {0} from {1} (index {2}; active #{3} of '\n '{4} coordsets)>').format(self.getName(), \n self._ag.getTitle(), self._index, self.getACSIndex(), \n n_csets)\n else:\n return ('<Atom: {0} from {1} (index {2}; no coordinates)>'\n ).format(self.getName(), self._ag.getTitle(), self._index)\n\n def __str__(self):\n\n return 'Atom {0} (index {1})'.format(self.getName(), self._index)\n\n def __len__(self):\n \n return 1\n \n def __int__(self):\n \n return self._index\n \n def numAtoms(self, flag=None):\n \"\"\"Return number of atoms, or number of atoms with given *flag*.\"\"\"\n \n return len(self._getSubset(flag)) if flag else 1\n \n def getIndex(self):\n \"\"\"Return index of the atom.\"\"\"\n \n return self._index\n \n def getIndices(self):\n \"\"\"Return index of the atom in an :class:`numpy.ndarray`.\"\"\"\n \n return np.array([self._index])\n \n _getIndices = getIndices\n \n def iterAtoms(self):\n \"\"\"Yield atoms.\"\"\"\n\n yield Atom(ag=self._ag, index=self._index, acsi=self.getACSIndex())\n\n __iter__ = iterAtoms\n \n def getCoords(self):\n \"\"\"Return a copy of coordinates of the atom from the active coordinate \n set.\"\"\"\n \n if self._ag._coords is not None:\n return self._ag._coords[self.getACSIndex(), self._index].copy()\n \n def _getCoords(self):\n \"\"\"Return a view of coordinates of the atom from the active coordinate \n set.\"\"\"\n \n if self._ag._coords is not None:\n return self._ag._coords[self.getACSIndex(), self._index]\n \n def setCoords(self, coords):\n \"\"\"Set coordinates of the atom in the active coordinate set.\"\"\"\n \n acsi = self.getACSIndex()\n self._ag._coords[acsi, self._index] = coords\n self._ag._setTimeStamp(acsi)\n \n def getCoordsets(self, indices=None):\n \"\"\"Return a copy of coordinate set(s) at given *indices*.\"\"\"\n \n if self._ag._coords is None:\n return None\n \n if indices is None:\n return self._ag._coords[:, self._index].copy()\n \n if isinstance(indices, (int, slice)):\n return self._ag._coords[indices, self._index].copy()\n \n if isinstance(indices, (list, np.ndarray)):\n return self._ag._coords[indices, self._index]\n \n raise IndexError('indices must be an integer, a list/array of '\n 'integers, a slice, or None')\n \n def _getCoordsets(self, indices=None): \n \"\"\"Return a view of coordinate set(s) at given *indices*.\"\"\"\n \n if self._ag._coords is None:\n return None\n \n if indices is None:\n indices = slice(None)\n\n return self._ag._coords[indices, self._index]\n\n def iterCoordsets(self):\n \"\"\"Yield copies of coordinate sets.\"\"\"\n \n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index].copy()\n\n def _iterCoordsets(self):\n \"\"\"Yield views of coordinate sets.\"\"\"\n \n for i in range(self.numCoordsets()):\n yield self._ag._coords[i, self._index]\n \n def getData(self, label):\n \"\"\"Return a copy of data associated with *label*, if it is present.\"\"\"\n \n try:\n data = self._ag._getData(label)\n except KeyError:\n pass\n else:\n if data.ndim > 1:\n return data[self._index]\n else:\n return data[self._index].copy()\n \n _getData = getData\n \n def setData(self, label, data):\n \"\"\"Update *data* associated with *label*.\n \n :raise AttributeError: when *label* is not in use or read-only\"\"\"\n \n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(repr(label)))\n if label in ATOMIC_FIELDS:\n getattr(self, 'set' + ATOMIC_FIELDS[label].meth)(data)\n else:\n try:\n self._ag._data[label][self._index] = data \n except KeyError:\n raise AttributeError('data with label {0} must be set for'\n ' AtomGroup first'.format(repr(label)))\n \n def getFlag(self, label):\n \"\"\"Return atom flag.\"\"\"\n \n return self._ag._getFlags(label)[self._index]\n \n def setFlag(self, label, value):\n \"\"\"Update flag associated with *label*.\n \n :raise AttributeError: when *label* is not in use or read-only\"\"\"\n \n if label in flags.PLANTERS:\n raise AttributeError('flag {0} cannot be changed by user'\n .format(repr(label)))\n flags = self._ag._getFlags(label)\n if flags is None:\n raise AttributeError('flags with label {0} must be set for '\n 'AtomGroup first'.format(repr(label)))\n flags[self._index] = value\n \n def getSelstr(self):\n \"\"\"Return selection string that will select this atom.\"\"\"\n \n return 'index {0}'.format(self._index)\n\n def numBonds(self):\n \"\"\"Return number of bonds formed by this atom. Bonds must be set first\n using :meth:`.AtomGroup.setBonds`.\"\"\"\n \n numbonds = self._ag._data.get('numbonds')\n if numbonds is not None:\n return numbonds[self._index]\n \n def iterBonds(self):\n \"\"\"Yield bonds formed by the atom. Use :meth:`setBonds` for setting\n bonds.\"\"\"\n \n ag = self._ag\n acsi = self.getACSIndex()\n for bond in self._iterBonds():\n yield Bond(ag, bond, acsi) \n\n def _iterBonds(self):\n \"\"\"Yield pairs of bonded atom indices.\"\"\"\n\n ag = self._ag\n if ag._bmap is None:\n raise ValueError('bonds are not set, use `AtomGroup.setBonds`')\n \n this = self._index\n for other in ag._bmap[this]:\n if other == -1:\n break\n yield this, other \n \n def iterBonded(self):\n \"\"\"Yield bonded atoms. Use :meth:`setBonds` for setting bonds.\"\"\"\n \n ag = self._ag\n if ag._bmap is None:\n raise ValueError('bonds are not set, use `AtomGroup.setBonds`')\n \n acsi = self.getACSIndex()\n this = self._index\n for other in self._ag._bmap[this]:\n if other == -1:\n break\n yield Atom(ag, other, acsi)\n\n\nfor fname, field in ATOMIC_FIELDS.items():\n \n if field.private:\n continue\n \n meth = field.meth\n getMeth = 'get' + meth\n setMeth = 'set' + meth\n # Define public method for retrieving a copy of data array\n def getData(self, meth=field.meth_pl, call=field.call):\n data = getattr(self._ag, '_get' + meth)()\n if data is not None:\n return data[self._index] \n getData = wrapGetMethod(getData)\n getData.__name__ = getMeth\n getData.__doc__ = field.getDocstr('get', False)\n setattr(Atom, getMeth, getData)\n setattr(Atom, '_' + getMeth, getData)\n \n if field.readonly:\n continue\n \n # Define public method for setting values in data array\n def setData(self, value, var=fname, none=field.none):\n array = self._ag._data[var]\n if array is None:\n raise AttributeError('attribute of the AtomGroup is '\n 'not set')\n array[self._index] = value\n if none: self._ag._none(none)\n setData = wrapSetMethod(setData)\n setData.__name__ = setMeth \n setData.__doc__ = field.getDocstr('set', False)\n setattr(Atom, setMeth, setData)\n\ndel getData\ndel setData\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
em3ndez/gretel-synthetics
[ "7d9f433a741469860c6ec3aadf76da02036671c4" ]
[ "src/gretel_synthetics/batch.py" ]
[ "\"\"\"\nThis module allows automatic splitting of a DataFrame\ninto smaller DataFrames (by clusters of columns) and doing\nmodel training and text generation on each sub-DF independently.\n\nThen we can concat each sub-DF back into one final synthetic dataset.\n\nFor example usage, please see our Jupyter Notebook.\n\"\"\"\nimport abc\nimport glob\nimport gzip\nimport io\nimport json\nimport logging\nimport shutil\nimport tempfile\nimport threading\nimport time\n\nfrom copy import deepcopy\nfrom dataclasses import dataclass, field\nfrom itertools import zip_longest\nfrom math import ceil\nfrom pathlib import Path\nfrom typing import Callable, Dict\nfrom typing import Iterator as IteratorType\nfrom typing import List, Optional, Type, Union\n\nimport cloudpickle\nimport gretel_synthetics.const as const\nimport numpy as np\nimport pandas as pd\n\nfrom gretel_synthetics.config import (\n BaseConfig,\n config_from_model_dir,\n CONFIG_MAP,\n LocalConfig,\n)\nfrom gretel_synthetics.errors import TooManyInvalidError\nfrom gretel_synthetics.generate import generate_text, GenText, SeedingGenerator\nfrom gretel_synthetics.tokenizers import BaseTokenizerTrainer\nfrom gretel_synthetics.train import train\nfrom tqdm.auto import tqdm\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nMAX_INVALID = 1000\nBATCH_SIZE = 15\nFIELD_DELIM = \"field_delimiter\"\nGEN_LINES = \"gen_lines\"\nREAD = \"read\"\nWRITE = \"write\"\nHEADER_FILE = \"headers.json\"\nORIG_HEADERS = \"original_headers.json\"\nCHECKPOINT_DIR = \"checkpoint_dir\"\nCONFIG_FILE = \"model_params.json\"\nTRAIN_FILE = \"train.csv\"\nPATH_HOLDER = \"___path_holder___\"\nFILE = \"file\"\nMEMORY = \"memory\"\n\n\n@dataclass\nclass GenerationSummary:\n \"\"\"A class to capture the summary data after synthetic data is generated.\"\"\"\n\n valid_lines: int = 0\n invalid_lines: int = 0\n is_valid: bool = False\n\n\nclass _BatchEpochCallback:\n \"\"\"\n Wrapper class to take a user supplied callback and inject the batch number. The batch number\n is then available in the EpochState object when it is supplied to the callback.\n \"\"\"\n\n def __init__(self, user_callback: callable, batch_number: int):\n self._batch_number = batch_number\n self._user_callback = user_callback\n\n def callback(self, epoch_state):\n epoch_state.batch = self._batch_number\n self._user_callback(epoch_state)\n\n\n@dataclass\nclass Batch:\n \"\"\"A representation of a synthetic data workflow. It should not be used\n directly. This object is created automatically by the primary batch handler,\n such as ``DataFrameBatch``. This class holds all of the necessary information\n for training, data generation and DataFrame re-assembly.\n \"\"\"\n\n checkpoint_dir: str\n input_data_path: str\n headers: List[str]\n config: LocalConfig\n gen_data_count: int = 0\n\n training_df: Type[pd.DataFrame] = field(default_factory=lambda: None, init=False)\n gen_data_stream: io.StringIO = field(default_factory=io.StringIO, init=False)\n gen_data_invalid: List[GenText] = field(default_factory=list, init=False)\n validator: Callable = field(default_factory=lambda: None, init=False)\n\n def __post_init__(self):\n self.reset_gen_data()\n\n @property\n def synthetic_df(self) -> pd.DataFrame:\n \"\"\"Get a DataFrame constructed from the generated lines\"\"\"\n if not self.gen_data_stream.getvalue(): # pragma: no cover\n return pd.DataFrame()\n self.gen_data_stream.seek(0)\n return pd.read_csv(self.gen_data_stream, sep=self.config.field_delimiter)\n\n def set_validator(self, fn: Callable, save=True):\n \"\"\"Assign a validation callable to this batch. Optionally\n pickling and saving the validator for loading later\n \"\"\"\n self.validator = fn\n if save:\n p = Path(self.checkpoint_dir) / \"validator.p.gz\"\n with gzip.open(p, \"w\") as fout:\n fout.write(cloudpickle.dumps(fn))\n\n def load_validator_from_file(self):\n \"\"\"Load a saved validation object if it exists\"\"\"\n p = Path(self.checkpoint_dir) / \"validator.p.gz\"\n if p.exists():\n with gzip.open(p, \"r\") as fin:\n self.validator = cloudpickle.loads(fin.read())\n\n def reset_gen_data(self):\n \"\"\"Reset all objects that accumulate or track synthetic\n data generation\n \"\"\"\n self.gen_data_invalid = []\n self.gen_data_stream = io.StringIO()\n self.gen_data_stream.write(\n self.config.field_delimiter.join(self.headers) + \"\\n\"\n )\n self.gen_data_count = 0\n\n def add_valid_data(self, data: GenText):\n \"\"\"Take a ``gen_text`` object and add the generated\n line to the generated data stream\n \"\"\"\n self.gen_data_stream.write(data.text + \"\\n\")\n self.gen_data_count += 1\n\n def _basic_validator(self, raw_line: str): # pragma: no cover\n return len(raw_line.split(self.config.field_delimiter)) == len(self.headers)\n\n def get_validator(self):\n \"\"\"If a custom validator is set, we return that. Otherwise,\n we return the built-in validator, which simply checks if a generated\n line has the right number of values based on the number of headers\n for this batch.\n\n This at least makes sure the resulting DataFrame will be the right\n shape\n \"\"\"\n if self.validator is not None:\n return self.validator\n\n return self._basic_validator\n\n\ndef _create_batch_from_dir(batch_dir: str):\n path = Path(batch_dir)\n if not path.is_dir(): # pragma: no cover\n raise ValueError(\"%s is not a directory\" % batch_dir)\n\n if not (path / HEADER_FILE).is_file(): # pragma: no cover\n raise ValueError(\"missing headers\")\n headers = json.loads(open(path / HEADER_FILE).read())\n\n if not (path / CONFIG_FILE).is_file(): # pragma: no cover\n raise ValueError(\"missing model param file\")\n\n config = config_from_model_dir(batch_dir)\n\n # training path can be empty, since we will not need access\n # to training data simply for read-only data generation\n train_path = \"\"\n\n # Wrap the user supplied callback with a _BatchEpochCallback so we have the batch number too.\n if config.epoch_callback is not None:\n batch_count = int(Path(batch_dir).name.split(\"_\")[-1])\n config.epoch_callback = _BatchEpochCallback(\n config.epoch_callback, batch_count\n ).callback\n\n batch = Batch(\n checkpoint_dir=batch_dir,\n input_data_path=train_path,\n headers=headers,\n config=config,\n )\n\n batch.load_validator_from_file()\n\n return batch\n\n\ndef _crawl_checkpoint_for_batches(checkpoint_dir: str):\n logger.info(\"Looking for and loading batch data...\")\n matching_dirs = glob.glob(str(Path(checkpoint_dir) / \"batch_*\"))\n if not matching_dirs:\n raise ValueError(\n \"checkpoint directory does not exist or does not contain batch data\"\n )\n\n batches = []\n for batch_dir in matching_dirs:\n idx = int(Path(batch_dir).name.split(\"_\")[-1])\n batches.append((idx, _create_batch_from_dir(batch_dir)))\n\n logger.info(\"Found and loaded %d batches\", len(batches))\n return dict(sorted(batches, key=lambda b: b[0]))\n\n\ndef _build_batch_dirs(\n base_ckpoint: str, headers: List[List[str]], config: dict\n) -> dict:\n \"\"\"Return a mapping of batch number => ``Batch`` object\"\"\"\n out = {}\n logger.info(\"Creating directory structure for batch jobs...\")\n base_path = Path(config[\"checkpoint_dir\"])\n if not base_path.is_dir():\n base_path.mkdir()\n for i, headers in enumerate(headers):\n ckpoint = Path(base_ckpoint) / f\"batch_{i}\"\n if not ckpoint.is_dir():\n ckpoint.mkdir()\n checkpoint_dir = str(ckpoint)\n input_data_path = str(ckpoint / \"train.csv\")\n new_config = deepcopy(config)\n new_config.update(\n {\"checkpoint_dir\": checkpoint_dir, \"input_data_path\": input_data_path}\n )\n\n # Determine what BaseConfig subclass to use, if the config template does\n # not have a model type then we'll default to using a LocalConfig which gives\n # us backwards compat to 0.14.0\n config_class_str = new_config.get(const.MODEL_TYPE, None)\n if config_class_str is None:\n config_class = LocalConfig\n else:\n config_class = CONFIG_MAP[config_class_str]\n\n # Wrap the user supplied callback with a _BatchEpochCallback so we have the batch number too.\n if new_config.get(\"epoch_callback\") is not None:\n new_config[\"epoch_callback\"] = _BatchEpochCallback(\n new_config.get(\"epoch_callback\"), i\n ).callback\n\n out[i] = Batch(\n checkpoint_dir=checkpoint_dir,\n input_data_path=input_data_path,\n headers=headers,\n config=config_class(**new_config),\n )\n # try and load any previously saved validators\n out[i].load_validator_from_file()\n\n # we write the headers out as well incase we load these\n # batches back in via \"read\" mode only later...it's the only\n # way to get the header names back\n with open(ckpoint / HEADER_FILE, \"w\") as fout:\n fout.write(json.dumps(headers))\n\n return out\n\n\ndef _validate_batch_seed_values(\n batch: Batch, seed_values: Union[dict, List[dict]]\n) -> Union[str, List[str]]:\n \"\"\"Validate that seed values line up with the first N columns in a batch. Also construct\n an appropiate seed string based on the values in the batch\n \"\"\"\n ret_str = True\n if isinstance(seed_values, dict):\n seed_values = [seed_values]\n elif isinstance(seed_values, list):\n ret_str = False\n else:\n raise TypeError(\"seed_values should be a dict or list of dicts\")\n\n seed_strings = []\n\n for seed in seed_values:\n if len(seed) > len(batch.headers):\n raise RuntimeError(\n \"The number of seed fields is greater than the number of columns in the first batch\"\n )\n\n headers_to_seed = batch.headers[: len(seed)]\n tmp = []\n for header in headers_to_seed:\n value = seed.get(header)\n if value is None:\n raise RuntimeError(\n f\"The header: {header} is not in the seed values mapping\"\n ) # noqa\n tmp.append(str(value))\n\n seed_strings.append(\n batch.config.field_delimiter.join(tmp) + batch.config.field_delimiter\n )\n\n if ret_str:\n return seed_strings[0]\n else:\n return seed_strings\n\n\nclass _BufferedRecords(abc.ABC):\n \"\"\"Base class for all buffers used when\n generating records\n \"\"\"\n\n @abc.abstractmethod\n def add(self, record: dict):\n ...\n\n @abc.abstractmethod\n def get_records(self):\n ...\n\n def cleanup(self):\n pass\n\n\nclass _BufferedDicts(_BufferedRecords):\n\n _records: List[dict]\n\n def __init__(self):\n self._records = []\n\n def add(self, record: dict):\n self._records.append(record)\n\n def get_records(self):\n return self._records\n\n\nclass _BufferedDataFrame(_BufferedRecords):\n \"\"\"Buffer dictionaries into a memory or file, then\n load it as a DataFrame and set the column order\n based on the provided list. This allows\n datatypes to be inferred as if the values were\n being read from a CSV on disk.\n\n NOTE: The cleanup() method must be called when done\n with this class.\n \"\"\"\n\n def __init__(self, delim: str, columns: List[str], method: str = FILE):\n self.delim = delim\n self.columns = columns\n self.headers_set = False\n self.method = method\n\n # Create our actual buffer file-like object\n if self.method == FILE:\n self.buffer = tempfile.TemporaryFile(mode=\"w+\")\n elif self.method == MEMORY:\n self.buffer = io.StringIO()\n else:\n raise ValueError(\"Invalid method\")\n\n def add(self, record: dict):\n # write the columns names into the buffer, we\n # use the first dict to specify the order and\n # assume subsequent dicts have the same order\n if not self.headers_set:\n _columns = self.delim.join(record.keys())\n self.buffer.write(_columns + \"\\n\")\n self.headers_set = True\n _row = self.delim.join(record.values())\n self.buffer.write(_row + \"\\n\")\n\n @property\n def df(self) -> pd.DataFrame:\n self.buffer.seek(0)\n return pd.read_csv(self.buffer, sep=self.delim)[self.columns]\n\n def get_records(self) -> pd.DataFrame:\n return self.df\n\n def cleanup(self):\n if self.method == FILE:\n self.buffer.close()\n\n\n@dataclass\nclass GenerationProgress:\n \"\"\"\n This class should not have to be used directly.\n\n It is used to communicate the current progress of record generation.\n\n When a callback function is passed to the ``RecordFactory.generate_all()`` method,\n each time the callback is called an instance of this class will be passed\n as the single argument::\n\n def my_callback(data: GenerationProgress):\n ...\n\n factory: RecordFactory\n df = factory.generate_all(output=\"df\", callback=my_callback)\n\n This class is used to periodically communicate progress of generation to the user,\n through a callback that can be passed to ``RecordFactory.generate_all()`` method.\n \"\"\"\n\n current_valid_count: int = 0\n \"\"\"The number of valid lines/records that\n were generated so far.\n \"\"\"\n\n current_invalid_count: int = 0\n \"\"\"The number of invalid lines/records that\n were generated so far.\n \"\"\"\n\n new_valid_count: int = 0\n \"\"\"The number of new valid lines/records that\n were generated since the last progress callback.\n \"\"\"\n\n new_invalid_count: int = 0\n \"\"\"The number of new valid lines/records that\n were generated since the last progress callback.\n \"\"\"\n\n completion_percent: float = 0.0\n \"\"\"The percentage of valid lines/records that have been generated.\"\"\"\n\n timestamp: float = field(default_factory=time.time)\n \"\"\"The timestamp from when the information in this object has been captured.\"\"\"\n\n\nclass _GenerationCallback:\n \"\"\"\n Wrapper around a callback function that is sending progress updates only once\n per configured time period (``update_interval``).\n\n Args:\n callback_fn: Callback function to be invoked with current progress.\n update_interval: Number of seconds to wait between sending progress update.\n \"\"\"\n\n def __init__(self, callback_fn: callable, update_interval: int = 30):\n self._callback_fn = callback_fn\n self._update_interval = update_interval\n\n self._last_update_time = int(time.monotonic())\n self._last_progress = GenerationProgress()\n\n def update_progress(\n self,\n num_lines: int,\n valid_count: int,\n invalid_count: int,\n *,\n force_update=False,\n ):\n\n \"\"\"\n Method that's being called from the generator with a progress update.\n\n Args:\n num_lines: Total number of lines to be generated.\n valid_count: Number of valid lines that were generated so far.\n invalid_count: Number of invalid lines that were generated so far.\n final_update:\n Is this the final update? It is ``True`` when sending last update, after the\n whole generation was complete.\n \"\"\"\n now = int(time.monotonic())\n\n if now - self._last_update_time >= self._update_interval or force_update:\n current_progress = GenerationProgress(\n current_valid_count=valid_count,\n current_invalid_count=invalid_count,\n new_valid_count=valid_count - self._last_progress.current_valid_count,\n new_invalid_count=invalid_count\n - self._last_progress.current_invalid_count,\n completion_percent=0\n if num_lines == 0\n else round(valid_count / num_lines * 100, 2),\n )\n\n self._callback_fn(current_progress)\n self._last_update_time = now\n self._last_progress = current_progress\n\n\n@dataclass\nclass _FactoryCounter:\n num_lines: int = 0\n \"\"\"The target number of lines to generate when\n iterating or generating all records.\n \"\"\"\n\n max_invalid: int = MAX_INVALID\n \"\"\"The number of max invalid lines to tolerate before\n stopping generation and raising a ``RunTimeError.``\n \"\"\"\n\n valid_count: int = 0\n \"\"\"The number of valid records / lines that have been generated\n \"\"\"\n\n invalid_count: int = 0\n \"\"\"The number of invalid records / lines that were generated\n \"\"\"\n\n\ndef _threading_generation_callback(\n counter: _FactoryCounter, callback: _GenerationCallback, event: threading.Event\n):\n while not event.is_set():\n try:\n callback.update_progress(\n counter.num_lines, counter.valid_count, counter.invalid_count\n )\n except Exception:\n event.set()\n break\n time.sleep(1)\n\n\nclass RecordFactory:\n \"\"\"A stateful factory that can be used to generate and validate entire\n records, regardless of the number of underlying header clusters that were\n used to build multiple sub-models.\n\n Instances of this class should be created by calling the appropiate method\n of the ``DataFrameBatch`` instance. This class should not have to\n be used directly. You should be able to create an instance like so::\n\n factory = batcher.create_record_factory(num_lines=50)\n\n The class is init'd with default capacity and limits as specified\n by the ``num_lines`` and ``max_invalid`` attributes. At any time,\n you can inspect the state of the instance by doing::\n\n factory.summary\n\n The factory instance can be used one of two ways: buffered or unbuffered.\n\n For unbuffered mode, the entire instance can be used as an iterator to\n create synthetic records. Each record will be a dictionary.\n\n NOTE:\n All values in the generated dictionaries will be strings.\n\n The ``valid_count`` and ``invalid_count`` counters will update as\n records are generated.\n\n When creating the record factory, you may also provide an entire\n record validator::\n\n def validator(rec: dict):\n ...\n\n factory = batcher.create_record_factory(num_lines=50, validator=validator)\n\n Each generated record dict will be passed to the validator. This validator may either\n return False or raise an exception to mark a record as invalid.\n\n At any point, you may reset the state of the factory by calling::\n\n factory.reset()\n\n This will reset all counters and allow you to keep generating records.\n\n Finally, you can generate records in buffered mode, where generated records\n will be buffered in memory and returned as one collection. By default, a list\n of dicts will be returned::\n\n factory.generate_all()\n\n You may request the records to be returned as a DataFrame. The dtypes will\n be inferred as if you were reading the data from a CSV::\n\n factory.generate_all(output=\"df\")\n\n NOTE:\n When using ``generate_all``, the factory states will be reset automatically.\n \"\"\"\n\n validator: Callable\n \"\"\"An optional callable that will receive a fully constructed record for one\n final validation before returning or yielding a single record. Records that\n do not pass this validation will also increment the ``invalid_count.``\n \"\"\"\n\n _batches: Dict[int, Batch]\n _header_list: List[str]\n _seed_fields: Union[str, List[str]]\n _record_generator: IteratorType[dict]\n _delimiter: str\n _parallelism: int\n _counter = _FactoryCounter\n _invalid_cache_size: int\n _thread_event: threading.Event = None\n\n invalid_cache: List[dict]\n\n def __init__(\n self,\n *,\n num_lines: int,\n batches: dict,\n header_list: list,\n delimiter: str,\n seed_fields: Union[dict, list] = None,\n max_invalid=MAX_INVALID,\n validator: Optional[Callable] = None,\n parallelism: int = 4,\n invalid_cache_size: int = 100,\n ):\n self._counter = _FactoryCounter()\n self._counter.num_lines = num_lines\n self.max_invalid = max_invalid\n self._batches = batches\n self._header_list = header_list\n self._seed_fields = seed_fields\n self._delimiter = delimiter\n self._parallelism = parallelism\n self.validator = validator\n self._invalid_cache_size = invalid_cache_size\n self.reset()\n\n if self._seed_fields is not None:\n self._seed_fields = _validate_batch_seed_values(\n self._batches[0], self._seed_fields\n )\n\n if isinstance(self._seed_fields, list):\n logger.info(\n \"Adjusting num_lines and parallelism because seed_fields is a list, will only target %d lines\",\n len(self._seed_fields),\n ) # noqa\n self._parallelism = 1\n self._counter.num_lines = len(self._seed_fields)\n\n def _cache_invalid(self, line: GenText):\n self.invalid_cache.append(line.as_dict())\n self.invalid_cache = self.invalid_cache[: self._invalid_cache_size]\n\n def _get_record(self) -> IteratorType[dict]:\n # our actual batch line generators\n generators = []\n\n # if we have a list of seed fields, we do special\n # handling to create the proper generator\n seed_generator = None # assume no seeds to start\n if isinstance(self._seed_fields, list):\n seed_generator = SeedingGenerator(\n self._batches[0].config,\n seed_list=self._seed_fields,\n line_validator=self._batches[0].get_validator(),\n max_invalid=self.max_invalid * 10000,\n )\n generators.append((self._batches[0], seed_generator))\n\n for idx, batch in self._batches.items():\n start_string = None\n if idx == 0 and seed_generator:\n # We've already added the first batch's generator to the list\n # so we just continue on to the next one\n continue\n if idx == 0:\n # In the event we have seeds that aren't a list, (i.e. static seeds)\n start_string = self._seed_fields\n generators.append(\n (\n batch,\n # We seed the low level API with much higher limits on\n # valid / invalid generation because we will enforce\n # those limits in this high level instance.\n generate_text(\n batch.config,\n line_validator=batch.get_validator(),\n max_invalid=self.max_invalid * 10000,\n num_lines=self._counter.num_lines * 10000,\n start_string=start_string,\n parallelism=self._parallelism,\n ),\n )\n )\n\n # At this point, we've created our list of generators. Below here\n # is what gets run on every next() call, which tries to construct\n # a full record from all the underlying batches.\n\n # keep looping as long as our target line count is less than\n # our total line count\n while self._counter.valid_count < self._counter.num_lines:\n # loop over each batch line generater and attempt\n # to construct a full line, we'll only count a\n # full line once we get through each generator\n\n # if we are using a watchdog thread to monitor generation\n # and it throws an exception, a threading event will be set\n # that signals generation should stop\n if self._thread_event and self._thread_event.is_set():\n break\n\n if self._counter.invalid_count >= self.max_invalid:\n raise RuntimeError(\"Invalid record count exceeded during generation\")\n\n seed_cache = None\n if seed_generator:\n # If we're using a seeding generator (from a list of seeds)\n # we cache the next seed we are about to use to generate\n # the next record.\n seed_cache = seed_generator.settings.start_string[0]\n\n record = {}\n batch: Batch\n for batch, gen in generators:\n while True:\n\n # see above usage for watchdog thread exception handling\n if self._thread_event and self._thread_event.is_set():\n break\n\n line = next(gen) # type: GenText\n if line.valid is False:\n self._cache_invalid(line)\n self._counter.invalid_count += 1\n if self._counter.invalid_count > self.max_invalid:\n raise RuntimeError(\n \"Invalid record count exceeded during generation\"\n )\n continue\n partial_rec = dict(\n zip_longest(batch.headers, line.values_as_list(), fillvalue=\"\")\n )\n record.update(partial_rec)\n break\n\n # Do a final validation, if configured, on the fully constructed\n # record, if this validation fails, we'll still increment our\n # invalid count.\n\n valid = True # assume we have a valid record\n\n if self.validator is not None:\n try:\n _valid = self.validator(record)\n if _valid is False:\n valid = False\n except Exception:\n valid = False\n\n if not valid:\n self._counter.invalid_count += 1\n if seed_cache:\n seed_generator.settings.start_string.insert(0, seed_cache)\n continue # back to the while start\n\n self._counter.valid_count += 1\n yield record\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self._record_generator)\n\n def reset(self):\n self._counter.valid_count = 0\n self._counter.invalid_count = 0\n self._record_generator = self._get_record()\n self._thread_event = None\n self.invalid_cache = []\n\n def generate_all(\n self,\n output: Optional[str] = None,\n callback: Optional[callable] = None,\n callback_interval: int = 30,\n callback_threading: bool = False,\n ):\n \"\"\"Attempt to generate the full number of records that was set when\n creating the ``RecordFactory.`` This method will create a buffer\n that holds all records and then returns the the buffer once\n generation is complete.\n\n Args:\n output: How the records should be returned. If ``None``, which is the\n default, then a list of record dicts will be returned. Other options\n that are supported are: 'df' for a DataFrame.\n callback: An optional callable that will periodically be called with\n a ``GenerationProgress`` instance as the single argument while\n records are being generated.\n callback_interval: If using a callback, the minimum number of seconds that\n should occur between callbacks.\n callback_threading: If enabled, a watchdog thread will be used to execute\n the callback. This will ensure that the callback is called regardless\n of invalid or valid counts. If callback threading is disabled, the callback\n will only be called after valid records are generated. If the callback\n raises and exception, then a threading event will be set which will trigger\n the stopping of generation.\n\n Returns:\n Generated records in an object that is dependent on the ``output`` param. By default\n this will be a list of dicts.\n \"\"\"\n progress_callback = None\n if callback:\n progress_callback = _GenerationCallback(callback, callback_interval)\n\n self.reset()\n if output is not None and output not in (\"df\",):\n raise ValueError(\"invalid output type\")\n\n _iter = tqdm(self._record_generator, total=self._counter.num_lines)\n\n buffer = None # type: _BufferedRecords\n\n if output == \"df\":\n buffer = _BufferedDataFrame(self._delimiter, self._header_list)\n\n if not buffer:\n buffer = _BufferedDicts()\n\n callback_thread = None\n if callback_threading:\n if not progress_callback:\n raise ValueError(\n \"Cannot use callback_threading without a progress callback\"\n )\n self._thread_event = threading.Event()\n callback_thread = threading.Thread(\n target=_threading_generation_callback,\n args=(self._counter, progress_callback, self._thread_event),\n )\n callback_thread.start()\n\n try:\n for rec in _iter:\n # NOTE: This iterator will block while no records are being\n # succesfully generated. If callbacks need to occur in this\n # situation, ensure the callback threading option is enabled\n #\n # If threading is enabled, and the callback encounters an exception,\n # a threading event will be set and the generator will break out of its\n # loop and generation will cease.\n buffer.add(rec)\n\n if progress_callback and not callback_threading:\n progress_callback.update_progress(\n self._counter.num_lines,\n self._counter.valid_count,\n self._counter.invalid_count,\n )\n\n except (RuntimeError, StopIteration) as err:\n logger.warning(\n f\"Runtime error on iteration, returning current buffer, {str(err)}\"\n )\n finally:\n if callback_threading:\n self._thread_event.set()\n callback_thread.join()\n\n # send final progress update\n if progress_callback:\n progress_callback.update_progress(\n self._counter.num_lines,\n self._counter.valid_count,\n self._counter.invalid_count,\n force_update=True,\n )\n\n out_records = buffer.get_records()\n buffer.cleanup()\n return out_records\n\n @property\n def summary(self):\n return {\n \"num_lines\": self._counter.num_lines,\n \"max_invalid\": self._counter.max_invalid,\n \"valid_count\": self._counter.valid_count,\n \"invalid_count\": self._counter.invalid_count,\n }\n\n\nclass DataFrameBatch:\n \"\"\"Create a multi-batch trainer / generator. When created, the directory\n structure to store models and training data will automatically be created.\n The directory structure will be created under the \"checkpoint_dir\" location\n provided in the ``config`` template. There will be one directory per batch,\n where each directory will be called \"batch_N\" where N is the batch number, starting\n from 0.\n\n Training and generating can happen per-batch or we can loop over all batches to\n do both train / generation functions.\n\n Example:\n When creating this object, you must explicitly create the training data\n from the input DataFrame before training models::\n\n my_batch = DataFrameBatch(df=my_df, config=my_config)\n my_batch.create_training_data()\n my_batch.train_all_batches()\n\n Args:\n df: The input, source DataFrame\n batch_size: If ``batch_headers`` is not provided we automatically break up\n the number of columns in the source DataFrame into batches of N columns.\n batch_headers: A list of lists of strings can be provided which will control\n the number of batches. The number of inner lists is the number of batches, and each\n inner list represents the columns that belong to that batch\n config: A template training config to use, this will be used as kwargs for each Batch's\n synthetic configuration. This may also be a sucblass of ``BaseConfig``. If this is used,\n you can set the ``input_data_path`` param to the constant ``PATH_HOLDER`` as it does not\n really matter\n tokenizer_class: An optional ``BaseTokenizerTrainer`` subclass. If not provided the default\n tokenizer will be used for the underlying ML engine.\n\n NOTE:\n When providing a config, the source of training data is not necessary, only the\n ``checkpoint_dir`` is needed. Each batch will control its input training data path\n after it creates the training dataset.\n \"\"\"\n\n batches: Dict[int, Batch]\n \"\"\"A mapping of ``Batch`` objects to a batch number. The batch number (key)\n increments from 0..N where N is the number of batches being used.\n \"\"\"\n\n batch_size: int\n \"\"\"The max number of columns allowed for a single DF batch\n \"\"\"\n\n # NOTE: Allowing a dict is for backwards compat\n config: Union[dict, BaseConfig]\n \"\"\"The template config that will be used for all batches. If a dict\n is provided we default to a TensorFlowConfig.\n \"\"\"\n\n mode: Union[WRITE, READ]\n\n master_header_list: List[str]\n \"\"\"During training, this is the original column order. When reading from\n disk, we concatenate all headers from all batches together. This list is not\n guaranteed to preserve the original header order.\n \"\"\"\n\n original_headers: List[str]\n \"\"\"Stores the original header list / order from the original training data that was used.\n This is written out to the model directory during training and loaded back in when\n using read-only mode.\n \"\"\"\n\n def __init__(\n self,\n *,\n df: pd.DataFrame = None,\n batch_size: int = BATCH_SIZE,\n batch_headers: List[List[str]] = None,\n config: Union[dict, BaseConfig] = None,\n tokenizer: BaseTokenizerTrainer = None,\n mode: str = WRITE,\n checkpoint_dir: str = None,\n ):\n\n if mode not in (WRITE, READ): # pragma: no cover\n raise ValueError(\"mode must be read or write\")\n\n self.mode = mode\n\n # If the config was a subclass of BaseConfig, then we convert\n # it to a dict and utilize that dict as our template. We do this\n # because when we re-create the batches we want to utilize the\n # Config constructors to set some attrs for us\n if isinstance(config, BaseConfig):\n config = config.as_dict()\n\n self.tokenizer = tokenizer\n\n self.original_headers = None\n\n if self.mode == READ:\n if isinstance(config, dict):\n _ckpoint_dir = config.get(\"checkpoint_dir\")\n else:\n _ckpoint_dir = checkpoint_dir\n\n if _ckpoint_dir is None:\n raise ValueError(\"checkpoint_dir required for read mode\")\n else:\n self._read_checkpoint_dir = _ckpoint_dir\n\n if self.mode == WRITE:\n if not config:\n raise ValueError(\"config is required!\")\n\n checkpoint_path = Path(config[CHECKPOINT_DIR])\n overwrite = config.get(\"overwrite\", False)\n if (\n not overwrite\n and checkpoint_path.is_dir()\n and any(checkpoint_path.iterdir())\n ):\n raise RuntimeError(\n \"checkpoint_dir already exists and is non-empty, set overwrite on config or remove model directory!\"\n ) # noqa\n\n if overwrite and checkpoint_path.is_dir():\n shutil.rmtree(checkpoint_path)\n\n if not isinstance(df, pd.DataFrame):\n raise ValueError(\"df must be a DataFrame in write mode\")\n\n if FIELD_DELIM not in config:\n raise ValueError(\"field_delimiter must be in config\")\n\n if GEN_LINES not in config:\n config[GEN_LINES] = df.shape[0]\n\n self._source_df = df\n self.batch_size = batch_size\n self.config = config\n self._source_df.fillna(\"\", inplace=True)\n self.master_header_list = list(self._source_df.columns)\n\n if not batch_headers:\n self.batch_headers = self._create_header_batches()\n else: # pragma: no cover\n self.batch_headers = batch_headers\n\n self.batches = _build_batch_dirs(\n self.config[\"checkpoint_dir\"], self.batch_headers, self.config\n )\n\n # Preserve the original order of the DF headers\n self.original_headers = list(self._source_df)\n with open(Path(self.config[CHECKPOINT_DIR]) / ORIG_HEADERS, \"w\") as fout:\n fout.write(json.dumps(list(self.original_headers)))\n else:\n self.batches = _crawl_checkpoint_for_batches(self._read_checkpoint_dir)\n self.master_header_list = []\n for batch in self.batches.values():\n self.master_header_list.extend(batch.headers)\n\n try:\n self.original_headers = json.loads(\n open(Path(self._read_checkpoint_dir) / ORIG_HEADERS).read()\n )\n except FileNotFoundError:\n self.original_headers = None\n\n logger.info(\"Validating underlying models exist via generation test...\")\n try:\n self.generate_all_batch_lines(parallelism=1, num_lines=1)\n except Exception as err:\n raise RuntimeError(\n \"Error testing generation during model load\"\n ) from err\n\n def _create_header_batches(self):\n num_batches = ceil(len(self._source_df.columns) / self.batch_size)\n tmp = np.array_split(list(self._source_df.columns), num_batches)\n return [list(row) for row in tmp]\n\n def create_training_data(self):\n \"\"\"Split the original DataFrame into N smaller DataFrames. Each\n smaller DataFrame will have the same number of rows, but a subset\n of the columns from the original DataFrame.\n\n This method iterates over each ``Batch`` object and assigns\n a smaller training DataFrame to the ``training_df`` attribute\n of the object.\n\n Finally, a training CSV is written to disk in the specific\n batch directory\n \"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n for i, batch in self.batches.items():\n logger.info(f\"Generating training DF and CSV for batch {i}\")\n out_df = self._source_df[batch.headers]\n batch.training_df = out_df.copy(deep=True)\n out_df.to_csv(\n batch.input_data_path,\n header=False,\n index=False,\n sep=self.config[FIELD_DELIM],\n )\n\n def train_batch(self, batch_idx: int):\n \"\"\"Train a model for a single batch. All model information will\n be written into that batch's directory.\n\n Args:\n batch_idx: The index of the batch, from the ``batches`` dictionary\n \"\"\"\n if self.tokenizer is not None:\n _tokenizer = deepcopy(self.tokenizer)\n _tokenizer.config = self.batches[batch_idx].config\n else:\n _tokenizer = None\n\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n try:\n train(self.batches[batch_idx].config, _tokenizer)\n except KeyError:\n raise ValueError(\"batch_idx is invalid\")\n\n def train_all_batches(self):\n \"\"\"Train a model for each batch.\"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n for idx in self.batches.keys():\n self.train_batch(idx)\n\n def set_batch_validator(self, batch_idx: int, validator: Callable):\n \"\"\"Set a validator for a specific batch. If a validator is configured\n for a batch, each generated record from that batch will be sent\n to the validator.\n\n Args:\n batch_idx: The batch number .\n validator: A callable that should take exactly one argument,\n which will be the raw line generated from the ``generate_text``\n function.\n \"\"\"\n if self.mode == READ: # pragma: no cover\n raise RuntimeError(\"Method cannot be used in read-only mode\")\n if not callable(validator):\n raise ValueError(\"validator must be callable!\")\n try:\n self.batches[batch_idx].set_validator(validator)\n except KeyError:\n raise ValueError(\"invalid batch number!\")\n\n def generate_batch_lines(\n self,\n batch_idx: int,\n max_invalid=MAX_INVALID,\n raise_on_exceed_invalid: bool = False,\n num_lines: int = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallelism: int = 0,\n ) -> GenerationSummary:\n \"\"\"Generate lines for a single batch. Lines generated are added\n to the underlying ``Batch`` object for each batch. The lines\n can be accessed after generation and re-assembled into a DataFrame.\n\n Args:\n batch_idx: The batch number\n max_invalid: The max number of invalid lines that can be generated, if\n this is exceeded, generation will stop\n raise_on_exceed_invalid: If true and if the number of lines generated exceeds the ``max_invalid``\n amount, we will re-raise the error thrown by the generation module which will interrupt\n the running process. Otherwise, we will not raise the caught exception and just return ``False``\n indicating that the batch failed to generate all lines.\n num_lines: The number of lines to generate, if ``None``, then we use the number from the\n batch's config\n seed_fields: A dictionary that maps field/column names to initial seed values for those columns. This seed\n will only apply to the first batch that gets trained and generated. Additionally, the fields provided\n in the mapping MUST exist at the front of the first batch.\n\n NOTE:\n This param may also be a list of dicts. If this is the case, then ``num_lines`` will automatically\n be set to the list length downstream, and a 1:1 ratio will be used for generating valid lines for\n each prefix.\n parallelism: The number of concurrent workers to use. ``1`` (the default) disables parallelization,\n while a non-positive value means \"number of CPUs + x\" (i.e., use ``0`` for using as many workers\n as there are CPUs). A floating-point value is interpreted as a fraction of the available CPUs,\n rounded down.\n \"\"\"\n try:\n batch = self.batches[batch_idx]\n except KeyError: # pragma: no cover\n raise ValueError(\"invalid batch index\")\n\n seed_string = None\n\n # If we are on batch 0 and we have seed values, we want to validate that\n # the seed values line up properly with the first N columns.\n if batch_idx == 0 and seed_fields is not None:\n seed_string = _validate_batch_seed_values(batch, seed_fields)\n\n batch: Batch\n batch.reset_gen_data()\n validator = batch.get_validator()\n if num_lines is None:\n num_lines = batch.config.gen_lines\n\n if isinstance(seed_fields, list):\n num_lines = len(seed_fields)\n\n t = tqdm(total=num_lines, desc=\"Valid record count \")\n t2 = tqdm(total=max_invalid, desc=\"Invalid record count \")\n line: GenText\n summary = GenerationSummary()\n try:\n for line in generate_text(\n batch.config,\n line_validator=validator,\n max_invalid=max_invalid,\n num_lines=num_lines,\n start_string=seed_string,\n parallelism=parallelism,\n ):\n if line.valid is None or line.valid is True:\n batch.add_valid_data(line)\n t.update(1)\n summary.valid_lines += 1\n else:\n t2.update(1)\n batch.gen_data_invalid.append(line)\n summary.invalid_lines += 1\n except TooManyInvalidError:\n if raise_on_exceed_invalid:\n raise\n else:\n return summary\n t.close()\n t2.close()\n summary.is_valid = batch.gen_data_count >= num_lines\n return summary\n\n def create_record_factory(\n self,\n *,\n num_lines: int,\n max_invalid: int = MAX_INVALID,\n validator: Callable = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallellism: int = 4,\n **kwargs,\n ) -> RecordFactory:\n if validator is not None:\n if not callable(validator):\n raise ValueError(\"validator must be callable\")\n return RecordFactory(\n num_lines=num_lines,\n batches=self.batches,\n delimiter=self.batches[0].config.field_delimiter,\n header_list=self.original_headers or self.master_header_list,\n seed_fields=seed_fields,\n max_invalid=max_invalid,\n validator=validator,\n parallelism=parallellism,\n **kwargs,\n )\n\n def generate_all_batch_lines(\n self,\n max_invalid=MAX_INVALID,\n raise_on_failed_batch: bool = False,\n num_lines: int = None,\n seed_fields: Union[dict, List[dict]] = None,\n parallelism: int = 0,\n ) -> Dict[int, GenerationSummary]:\n \"\"\"Generate synthetic lines for all batches. Lines for each batch\n are added to the individual ``Batch`` objects. Once generateion is\n done, you may re-assemble the dataset into a DataFrame.\n\n Example::\n\n my_batch.generate_all_batch_lines()\n # Wait for all generation to complete\n synthetic_df = my_batch.batches_to_df()\n\n Args:\n max_invalid: The number of invalid lines, per batch. If this number\n is exceeded for any batch, generation will stop.\n raise_on_failed_batch: If True, then an exception will be raised if any single batch\n fails to generate the requested number of lines. If False, then the failed batch\n will be set to ``False`` in the result dictionary from this method.\n num_lines: The number of lines to create from each batch. If ``None`` then the value\n from the config template will be used.\n\n NOTE:\n Will be overridden / ignored if ``seed_fields`` is a list. Will be set to the len of the list.\n seed_fields: A dictionary that maps field/column names to initial seed values for those columns. This seed\n will only apply to the first batch that gets trained and generated. Additionally, the fields provided\n in the mapping MUST exist at the front of the first batch.\n\n NOTE:\n This param may also be a list of dicts. If this is the case, then ``num_lines`` will automatically\n be set to the list length downstream, and a 1:1 ratio will be used for generating valid lines for\n each prefix.\n parallelism: The number of concurrent workers to use. ``1`` (the default) disables parallelization,\n while a non-positive value means \"number of CPUs + x\" (i.e., use ``0`` for using as many workers\n as there are CPUs). A floating-point value is interpreted as a fraction of the available CPUs,\n rounded down.\n\n Returns:\n A dictionary of batch number to a dictionary that reports the number of valid, invalid lines and bool value\n that shows if each batch was able to generate the full number of requested lines::\n\n {\n 0: GenerationSummary(valid_lines=1000, invalid_lines=10, is_valid=True),\n 1: GenerationSummary(valid_lines=500, invalid_lines=5, is_valid=True)\n }\n \"\"\"\n batch_status = {}\n for idx in self.batches.keys():\n batch_status[idx] = self.generate_batch_lines(\n idx,\n max_invalid=max_invalid,\n raise_on_exceed_invalid=raise_on_failed_batch,\n num_lines=num_lines,\n seed_fields=seed_fields,\n parallelism=parallelism,\n )\n return batch_status\n\n def batch_to_df(self, batch_idx: int) -> pd.DataFrame: # pragma: no cover\n \"\"\"Extract a synthetic data DataFrame from a single batch.\n\n Args:\n batch_idx: The batch number\n\n Returns:\n A DataFrame with synthetic data\n \"\"\"\n try:\n return self.batches[batch_idx].synthetic_df\n except KeyError:\n raise ValueError(\"batch_idx is invalid!\")\n\n def batches_to_df(self) -> pd.DataFrame:\n \"\"\"Convert all batches to a single synthetic data DataFrame.\n\n Returns:\n A single DataFrame that is the concatenation of all the\n batch DataFrames.\n \"\"\"\n batch_iter = iter(self.batches.values())\n base_batch = next(batch_iter)\n accum_df = base_batch.synthetic_df\n\n for batch in batch_iter:\n accum_df = pd.concat([accum_df, batch.synthetic_df], axis=1)\n\n return accum_df[self.original_headers or self.master_header_list]\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
weiwei1115/models
[ "14c3209118b2cadcce9a8f66b760c9cddb3a02ad", "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "14c3209118b2cadcce9a8f66b760c9cddb3a02ad", "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3" ]
[ "PaddleNLP/examples/text_generation/vae-seq2seq/model.py", "PaddleNLP/examples/text_matching/simnet/utils.py", "PaddleCV/3d_vision/PointRCNN/train.py", "PaddleNLP/examples/language_model/elmo/base.py", "PaddleNLP/legacy/dialogue_domain_classification/run_classifier.py", "dygraph/similarity_net/run_classifier.py", "dygraph/slowfast/eval.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport paddle.nn.initializer as I\n\n\nclass CrossEntropyWithKL(nn.Layer):\n \"\"\"\n backward_loss = kl_loss * kl_weight + cross_entropy_loss\n \"\"\"\n\n def __init__(self, base_kl_weight, anneal_r):\n super(CrossEntropyWithKL, self).__init__()\n self.kl_weight = base_kl_weight\n self.anneal_r = anneal_r\n self.loss = 0.0\n self.kl_loss = 0.0\n self.rec_loss = 0.0\n\n def update_kl_weight(self):\n self.kl_weight = min(1.0, self.kl_weight + self.anneal_r)\n\n def forward(self, kl_loss, dec_output, trg_mask, label):\n self.update_kl_weight()\n self.kl_loss = kl_loss\n\n rec_loss = F.softmax_with_cross_entropy(\n logits=dec_output, label=label, soft_label=False)\n\n rec_loss = paddle.squeeze(rec_loss, axis=[2])\n rec_loss = rec_loss * trg_mask\n rec_loss = paddle.mean(rec_loss, axis=[0])\n rec_loss = paddle.sum(rec_loss)\n self.rec_loss = rec_loss\n\n self.loss = self.kl_loss * self.kl_weight + self.rec_loss\n return self.loss\n\n\nclass Perplexity(paddle.metric.Metric):\n def __init__(self, name='ppl', reset_freq=100, *args, **kwargs):\n self.cross_entropy = kwargs.pop('loss')\n super(Perplexity, self).__init__(*args, **kwargs)\n self._name = name\n self.total_ce = 0\n self.word_count = 0\n self.reset_freq = reset_freq\n self.batch_size = 0\n\n def update(self, kl_loss, dec_output, trg_mask, label, *args):\n # Perplexity is calculated using cross entropy\n self.batch_size = dec_output.shape[0]\n loss = self.cross_entropy.loss.numpy()\n self.total_ce += loss[0] * self.batch_size\n self.word_count += np.sum(trg_mask)\n\n def reset(self):\n self.total_ce = 0\n self.word_count = 0\n\n def accumulate(self):\n return np.exp(self.total_ce / self.word_count)\n\n def name(self):\n return self._name\n\n\nclass NegativeLogLoss(paddle.metric.Metric):\n def __init__(self, name='nll', reset_freq=100, *args, **kwargs):\n self.cross_entropy = kwargs.pop('loss')\n super(NegativeLogLoss, self).__init__(*args, **kwargs)\n self._name = name\n self.total_ce = 0\n self.batch_count = 0\n self.reset_freq = reset_freq\n self.batch_size = 0\n self.sample_count = 0\n\n def update(self, kl_loss, dec_output, trg_mask, label, *args):\n self.batch_size = dec_output.shape[0]\n loss = self.cross_entropy.loss.numpy()\n self.total_ce += loss[0] * self.batch_size\n self.sample_count += self.batch_size\n\n def reset(self):\n self.total_ce = 0\n self.sample_count = 0\n\n def accumulate(self):\n return (self.total_ce / self.sample_count)\n\n def name(self):\n return self._name\n\n\nclass TrainCallback(paddle.callbacks.ProgBarLogger):\n def __init__(self, ppl, nll, log_freq=200, verbose=2):\n super(TrainCallback, self).__init__(log_freq, verbose)\n self.ppl = ppl\n self.nll = nll\n\n def on_train_begin(self, logs=None):\n super(TrainCallback, self).on_train_begin(logs)\n self.train_metrics = [\"loss\", \"ppl\", \"nll\", \"kl weight\", \"kl loss\", \"rec loss\"]\n\n def on_epoch_begin(self, epoch=None, logs=None):\n super(TrainCallback, self).on_epoch_begin(epoch, logs)\n self.ppl.reset()\n self.nll.reset()\n\n def on_train_batch_end(self, step, logs=None):\n # loss and kl weight are not accumulated\n logs[\"kl weight\"] = self.ppl.cross_entropy.kl_weight\n logs[\"kl loss\"] = self.ppl.cross_entropy.kl_loss.numpy()[0]\n logs[\"rec loss\"] = self.ppl.cross_entropy.rec_loss.numpy()[0]\n super(TrainCallback, self).on_train_batch_end(step, logs)\n\n def on_eval_begin(self, logs=None):\n super(TrainCallback, self).on_eval_begin(logs)\n self.eval_metrics = [\"loss\", \"ppl\", \"nll\"]\n\n def on_eval_batch_end(self, step, logs=None):\n super(TrainCallback, self).on_eval_batch_end(step, logs)\n\n\nclass LSTMEncoder(nn.Layer):\n def __init__(self,\n vocab_size,\n embed_dim,\n hidden_size,\n num_layers,\n init_scale=0.1,\n enc_dropout=0.):\n super(LSTMEncoder, self).__init__()\n self.src_embedder = nn.Embedding(\n vocab_size,\n embed_dim,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n self.lstm = nn.LSTM(\n input_size=embed_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=enc_dropout)\n if enc_dropout > 0.0:\n self.dropout = nn.Dropout(enc_dropout)\n else:\n self.dropout = None\n\n def forward(self, src, src_length):\n src_emb = self.src_embedder(src)\n\n if self.dropout:\n src_emb = self.dropout(src_emb)\n enc_output, enc_final_state = self.lstm(\n src_emb, sequence_length=src_length)\n if self.dropout:\n enc_output = self.dropout(enc_output)\n\n enc_final_state = [\n [h, c] for h, c in zip(enc_final_state[0], enc_final_state[1])\n ]\n return enc_output, enc_final_state\n\n\nclass LSTMDecoderCell(nn.Layer):\n def __init__(self,\n num_layers,\n embed_dim,\n hidden_size,\n latent_size,\n dropout=None):\n super(LSTMDecoderCell, self).__init__()\n self.dropout = dropout\n self.lstm_cells = nn.LayerList([\n nn.LSTMCell(\n input_size=embed_dim + latent_size, hidden_size=hidden_size)\n for i in range(num_layers)\n ])\n\n def forward(self, step_input, lstm_states, latent_z):\n new_lstm_states = []\n step_input = paddle.concat([step_input, latent_z], 1)\n for i, lstm_cell in enumerate(self.lstm_cells):\n out, new_lstm_state = lstm_cell(step_input, lstm_states[i])\n if self.dropout:\n step_input = self.dropout(out)\n else:\n step_input = out\n new_lstm_states.append(new_lstm_state)\n if self.dropout:\n step_input = self.dropout(step_input)\n out = step_input\n return out, new_lstm_states\n\n\nclass LSTMDecoder(nn.Layer):\n def __init__(self,\n vocab_size,\n embed_dim,\n hidden_size,\n latent_size,\n num_layers,\n init_scale=0.1,\n dec_dropout=0.):\n super(LSTMDecoder, self).__init__()\n self.num_layers = num_layers\n self.embed_dim = embed_dim\n self.hidden_size = hidden_size\n self.latent_size = latent_size\n self.trg_embedder = nn.Embedding(\n vocab_size,\n embed_dim,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n self.output_fc = nn.Linear(\n hidden_size,\n vocab_size,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n if dec_dropout > 0.0:\n self.dropout = nn.Dropout(dec_dropout)\n else:\n self.dropout = None\n\n self.lstm = nn.RNN(\n LSTMDecoderCell(self.num_layers, self.embed_dim, self.hidden_size,\n self.latent_size, self.dropout))\n\n def forward(self, trg, dec_initial_states, latent_z):\n trg_emb = self.trg_embedder(trg)\n if self.dropout:\n trg_emb = self.dropout(trg_emb)\n lstm_output, _ = self.lstm(\n inputs=trg_emb,\n initial_states=dec_initial_states,\n latent_z=latent_z)\n dec_output = self.output_fc(lstm_output)\n return dec_output\n\n\nclass VAESeq2SeqModel(nn.Layer):\n def __init__(self,\n embed_dim,\n hidden_size,\n latent_size,\n vocab_size,\n num_layers=1,\n init_scale=0.1,\n PAD_ID=0,\n enc_dropout=0.,\n dec_dropout=0.):\n super(VAESeq2SeqModel, self).__init__()\n self.PAD_ID = PAD_ID\n self.latent_size = latent_size\n self.vocab_size = vocab_size\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.encoder = LSTMEncoder(vocab_size, embed_dim, hidden_size,\n num_layers, init_scale, enc_dropout)\n self.decoder = LSTMDecoder(vocab_size, embed_dim, hidden_size,\n latent_size, num_layers, init_scale,\n dec_dropout)\n self.distributed_fc = nn.Linear(\n hidden_size * 2,\n latent_size * 2,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n self.fc = nn.Linear(\n latent_size,\n 2 * hidden_size * num_layers,\n weight_attr=paddle.ParamAttr(initializer=I.Uniform(\n low=-init_scale, high=init_scale)))\n\n def sampling(self, z_mean, z_log_var):\n \"\"\"\n Reparameterization trick \n \"\"\"\n # By default, random_normal has mean=0 and std=1.0\n epsilon = paddle.normal(shape=(z_mean.shape[0], self.latent_size))\n epsilon.stop_gradient = True\n return z_mean + paddle.exp(0.5 * z_log_var) * epsilon\n\n def build_distribution(self, enc_final_state=None):\n enc_hidden = [\n paddle.concat(\n state, axis=-1) for state in enc_final_state\n ]\n\n enc_hidden = paddle.concat(enc_hidden, axis=-1)\n z_mean_log_var = self.distributed_fc(enc_hidden)\n z_mean, z_log_var = paddle.split(z_mean_log_var, 2, -1)\n return z_mean, z_log_var\n\n def calc_kl_dvg(self, means, logvars):\n \"\"\"\n Compute the KL divergence between Gaussian distribution\n \"\"\"\n kl_cost = -0.5 * (\n logvars - paddle.square(means) - paddle.exp(logvars) + 1.0)\n kl_cost = paddle.mean(kl_cost, 0)\n\n return paddle.sum(kl_cost)\n\n def forward(self, src, src_length, trg, trg_length):\n # Encoder\n _, enc_final_state = self.encoder(src, src_length)\n\n # Build distribution\n z_mean, z_log_var = self.build_distribution(enc_final_state)\n\n # Decoder\n latent_z = self.sampling(z_mean, z_log_var)\n\n dec_first_hidden_cell = self.fc(latent_z)\n dec_first_hidden, dec_first_cell = paddle.split(\n dec_first_hidden_cell, 2, axis=-1)\n if self.num_layers > 1:\n dec_first_hidden = paddle.split(dec_first_hidden, self.num_layers)\n dec_first_cell = paddle.split(dec_first_cell, self.num_layers)\n else:\n dec_first_hidden = [dec_first_hidden]\n dec_first_cell = [dec_first_cell]\n dec_initial_states = [[h, c]\n for h, c in zip(dec_first_hidden, dec_first_cell)]\n\n dec_output = self.decoder(trg, dec_initial_states, latent_z)\n\n kl_loss = self.calc_kl_dvg(z_mean, z_log_var)\n trg_mask = (self.PAD_ID != trg).astype(paddle.get_default_dtype())\n return kl_loss, dec_output, trg_mask\n\n\nclass VAESeq2SeqInferModel(VAESeq2SeqModel):\n def __init__(self,\n embed_dim,\n hidden_size,\n latent_size,\n vocab_size,\n start_token=1,\n end_token=2,\n beam_size=1,\n max_out_len=100):\n self.start_token = start_token\n self.end_token = end_token\n self.beam_size = beam_size\n self.max_out_len = max_out_len\n super(VAESeq2SeqInferModel, self).__init__(embed_dim, hidden_size,\n latent_size, vocab_size)\n\n def forward(self, trg):\n # Encoder\n latent_z = paddle.normal(shape=(trg.shape[0], self.latent_size))\n dec_first_hidden_cell = self.fc(latent_z)\n dec_first_hidden, dec_first_cell = paddle.split(\n dec_first_hidden_cell, 2, axis=-1)\n if self.num_layers > 1:\n dec_first_hidden = paddle.split(dec_first_hidden, self.num_layers)\n dec_first_cell = paddle.split(dec_first_cell, self.num_layers)\n else:\n dec_first_hidden = [dec_first_hidden]\n dec_first_cell = [dec_first_cell]\n dec_initial_states = [[h, c]\n for h, c in zip(dec_first_hidden, dec_first_cell)]\n\n output_fc = lambda x: F.one_hot(\n paddle.multinomial(\n F.softmax(paddle.squeeze(\n self.decoder.output_fc(x),[1]))),num_classes=self.vocab_size)\n\n latent_z = nn.BeamSearchDecoder.tile_beam_merge_with_batch(\n latent_z, self.beam_size)\n\n decoder = nn.BeamSearchDecoder(\n cell=self.decoder.lstm.cell,\n start_token=self.start_token,\n end_token=self.end_token,\n beam_size=self.beam_size,\n embedding_fn=self.decoder.trg_embedder,\n output_fn=output_fc)\n\n outputs, _ = nn.dynamic_decode(\n decoder,\n inits=dec_initial_states,\n max_step_num=self.max_out_len,\n latent_z=latent_z)\n return outputs\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport jieba\nimport numpy as np\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = {}\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n tokens = reader.readlines()\n for index, token in enumerate(tokens):\n token = token.rstrip(\"\\n\").split(\"\\t\")[0]\n vocab[token] = index\n return vocab\n\n\ndef convert_ids_to_tokens(wids, inversed_vocab):\n \"\"\" Converts a token string (or a sequence of tokens) in a single integer id\n (or a sequence of ids), using the vocabulary.\n \"\"\"\n tokens = []\n for wid in wids:\n wstr = inversed_vocab.get(wid, None)\n if wstr:\n tokens.append(wstr)\n return tokens\n\n\ndef convert_tokens_to_ids(tokens, vocab):\n \"\"\" Converts a token id (or a sequence of id) in a token string\n (or a sequence of tokens), using the vocabulary.\n \"\"\"\n\n ids = []\n unk_id = vocab.get('[UNK]', None)\n for token in tokens:\n wid = vocab.get(token, unk_id)\n if wid:\n ids.append(wid)\n return ids\n\n\ndef pad_texts_to_max_seq_len(texts, max_seq_len, pad_token_id=0):\n \"\"\"\n Padded the texts to the max sequence length if the length of text is lower than it.\n Unless it truncates the text.\n\n Args:\n texts(obj:`list`): Texts which contrains a sequence of word ids.\n max_seq_len(obj:`int`): Max sequence length.\n pad_token_id(obj:`int`, optinal, defaults to 0) : The pad token index.\n \"\"\"\n for index, text in enumerate(texts):\n seq_len = len(text)\n if seq_len < max_seq_len:\n padded_tokens = [pad_token_id for _ in range(max_seq_len - seq_len)]\n new_text = text + padded_tokens\n texts[index] = new_text\n elif seq_len > max_seq_len:\n new_text = text[:max_seq_len]\n texts[index] = new_text\n\n\ndef generate_batch(batch, pad_token_id=0, return_label=True):\n \"\"\"\n Generates a batch whose text will be padded to the max sequence length in the batch.\n\n Args:\n batch(obj:`List[Example]`) : One batch, which contains texts, labels and the true sequence lengths.\n pad_token_id(obj:`int`, optinal, defaults to 0) : The pad token index.\n\n Returns:\n batch(:obj:`Tuple[list]`): The batch data which contains texts, seq_lens and labels.\n \"\"\"\n queries = [entry[0] for entry in batch]\n titles = [entry[1] for entry in batch]\n query_seq_lens = [entry[2] for entry in batch]\n title_seq_lens = [entry[3] for entry in batch]\n\n query_batch_max_seq_len = max(query_seq_lens)\n pad_texts_to_max_seq_len(queries, query_batch_max_seq_len, pad_token_id)\n title_batch_max_seq_len = max(title_seq_lens)\n pad_texts_to_max_seq_len(titles, title_batch_max_seq_len, pad_token_id)\n\n if return_label:\n labels = [entry[-1] for entry in batch]\n return queries, titles, query_seq_lens, title_seq_lens, labels\n else:\n return queries, titles, query_seq_lens, title_seq_lens\n\n\ndef convert_example(example, vocab, unk_token_id=1, is_test=False):\n \"\"\"\n Builds model inputs from a sequence for sequence classification tasks. \n It use `jieba.cut` to tokenize text.\n\n Args:\n example(obj:`list[str]`): List of input data, containing text and label if it have label.\n vocab(obj:`dict`): The vocabulary.\n unk_token_id(obj:`int`, defaults to 1): The unknown token id.\n is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.\n\n Returns:\n query_ids(obj:`list[int]`): The list of query ids.\n title_ids(obj:`list[int]`): The list of title ids.\n query_seq_len(obj:`int`): The input sequence query length.\n title_seq_len(obj:`int`): The input sequence title length.\n label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.\n \"\"\"\n\n query, title = example[0], example[1]\n query_tokens = jieba.lcut(query)\n title_tokens = jieba.lcut(title)\n\n query_ids = convert_tokens_to_ids(query_tokens, vocab)\n query_seq_len = len(query_ids)\n title_ids = convert_tokens_to_ids(title_tokens, vocab)\n title_seq_len = len(title_ids)\n\n if not is_test:\n label = np.array(example[-1], dtype=\"int64\")\n return query_ids, title_ids, query_seq_len, title_seq_len, label\n else:\n return query_ids, title_ids, query_seq_len, title_seq_len\n\n\ndef preprocess_prediction_data(data, vocab):\n \"\"\"\n It process the prediction data as the format used as training.\n\n Args:\n data (obj:`List[List[str, str]]`): \n The prediction data whose each element is a text pair. \n Each text will be tokenized by jieba.lcut() function.\n\n Returns:\n examples (obj:`list`): The processed data whose each element \n is a `list` object, which contains \n\n - query_ids(obj:`list[int]`): The list of query ids.\n - title_ids(obj:`list[int]`): The list of title ids.\n - query_seq_len(obj:`int`): The input sequence query length.\n - title_seq_len(obj:`int`): The input sequence title length.\n\n \"\"\"\n examples = []\n for query, title in data:\n query_tokens = jieba.lcut(query)\n title_tokens = jieba.lcut(title)\n query_ids = convert_tokens_to_ids(query_tokens, vocab)\n title_ids = convert_tokens_to_ids(title_tokens, vocab)\n examples.append([query_ids, title_ids, len(query_ids), len(title_ids)])\n return examples\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport sys\nimport time\nimport shutil\nimport argparse\nimport logging\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.layers import control_flow\nimport paddle.fluid.layers.learning_rate_scheduler as lr_scheduler\n\nfrom models.point_rcnn import PointRCNN\nfrom data.kitti_rcnn_reader import KittiRCNNReader\nfrom utils.run_utils import *\nfrom utils.config import cfg, load_config, set_config_from_list\nfrom utils.optimizer import optimize\n\nlogging.root.handlers = []\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"PointRCNN semantic segmentation train script\")\n parser.add_argument(\n '--cfg',\n type=str,\n default='cfgs/default.yml',\n help='specify the config for training')\n parser.add_argument(\n '--train_mode',\n type=str,\n default='rpn',\n required=True,\n help='specify the training mode')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=16,\n required=True,\n help='training batch size, default 16')\n parser.add_argument(\n '--epoch',\n type=int,\n default=200,\n required=True,\n help='epoch number. default 200.')\n parser.add_argument(\n '--save_dir',\n type=str,\n default='checkpoints',\n help='directory name to save train snapshoot')\n parser.add_argument(\n '--resume',\n type=str,\n default=None,\n help='path to resume training based on previous checkpoints. '\n 'None for not resuming any checkpoints.')\n parser.add_argument(\n '--resume_epoch',\n type=int,\n default=0,\n help='resume epoch id')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='./data',\n help='KITTI dataset root directory')\n parser.add_argument(\n '--gt_database',\n type=str,\n default='data/gt_database/train_gt_database_3level_Car.pkl',\n help='generated gt database for augmentation')\n parser.add_argument(\n '--rcnn_training_roi_dir',\n type=str,\n default=None,\n\thelp='specify the saved rois for rcnn training when using rcnn_offline mode')\n parser.add_argument(\n '--rcnn_training_feature_dir',\n type=str,\n default=None,\n\thelp='specify the saved features for rcnn training when using rcnn_offline mode')\n parser.add_argument(\n '--worker_num',\n type=int,\n default=16,\n\thelp='multiprocess reader process num, default 16')\n parser.add_argument(\n '--log_interval',\n type=int,\n default=1,\n help='mini-batch interval to log.')\n parser.add_argument(\n '--set',\n dest='set_cfgs',\n default=None,\n nargs=argparse.REMAINDER,\n help='set extra config keys if needed.')\n args = parser.parse_args()\n return args\n\n\ndef train():\n args = parse_args()\n print_arguments(args)\n # check whether the installed paddle is compiled with GPU\n # PointRCNN model can only run on GPU\n check_gpu(True)\n\n load_config(args.cfg)\n if args.set_cfgs is not None:\n set_config_from_list(args.set_cfgs)\n\n if args.train_mode == 'rpn':\n cfg.RPN.ENABLED = True\n cfg.RCNN.ENABLED = False\n elif args.train_mode == 'rcnn':\n cfg.RCNN.ENABLED = True\n cfg.RPN.ENABLED = cfg.RPN.FIXED = True\n elif args.train_mode == 'rcnn_offline':\n cfg.RCNN.ENABLED = True\n cfg.RPN.ENABLED = False\n else:\n raise NotImplementedError(\"unknown train mode: {}\".format(args.train_mode))\n\n checkpoints_dir = os.path.join(args.save_dir, args.train_mode)\n if not os.path.isdir(checkpoints_dir):\n os.makedirs(checkpoints_dir)\n\n kitti_rcnn_reader = KittiRCNNReader(data_dir=args.data_dir,\n npoints=cfg.RPN.NUM_POINTS,\n split=cfg.TRAIN.SPLIT,\n mode='TRAIN',\n classes=cfg.CLASSES,\n rcnn_training_roi_dir=args.rcnn_training_roi_dir,\n rcnn_training_feature_dir=args.rcnn_training_feature_dir,\n gt_database_dir=args.gt_database)\n num_samples = len(kitti_rcnn_reader)\n steps_per_epoch = int(num_samples / args.batch_size)\n logger.info(\"Total {} samples, {} batch per epoch.\".format(num_samples, steps_per_epoch))\n boundaries = [i * steps_per_epoch for i in cfg.TRAIN.DECAY_STEP_LIST]\n values = [cfg.TRAIN.LR * (cfg.TRAIN.LR_DECAY ** i) for i in range(len(boundaries) + 1)]\n\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n # build model\n startup = fluid.Program()\n train_prog = fluid.Program()\n with fluid.program_guard(train_prog, startup):\n with fluid.unique_name.guard():\n train_model = PointRCNN(cfg, args.batch_size, True, 'TRAIN')\n train_model.build()\n train_loader = train_model.get_loader()\n train_feeds = train_model.get_feeds()\n train_outputs = train_model.get_outputs()\n train_loss = train_outputs['loss']\n lr = optimize(train_loss,\n learning_rate=cfg.TRAIN.LR,\n warmup_factor=1. / cfg.TRAIN.DIV_FACTOR,\n decay_factor=1e-5,\n total_step=steps_per_epoch * args.epoch,\n warmup_pct=cfg.TRAIN.PCT_START,\n train_prog=train_prog,\n startup_prog=startup,\n weight_decay=cfg.TRAIN.WEIGHT_DECAY,\n clip_norm=cfg.TRAIN.GRAD_NORM_CLIP)\n train_keys, train_values = parse_outputs(train_outputs, 'loss')\n\n exe.run(startup)\n\n if args.resume:\n if not os.path.isdir(args.resume):\n assert os.path.exists(\"{}.pdparams\".format(args.resume)), \\\n \"Given resume weight {}.pdparams not exist.\".format(args.resume)\n assert os.path.exists(\"{}.pdopt\".format(args.resume)), \\\n \"Given resume optimizer state {}.pdopt not exist.\".format(args.resume)\n fluid.load(train_prog, args.resume, exe)\n\n build_strategy = fluid.BuildStrategy()\n build_strategy.memory_optimize = False\n build_strategy.enable_inplace = False\n build_strategy.fuse_all_optimizer_ops = False\n train_compile_prog = fluid.compiler.CompiledProgram(\n train_prog).with_data_parallel(loss_name=train_loss.name,\n build_strategy=build_strategy)\n\n def save_model(exe, prog, path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n logger.info(\"Save model to {}\".format(path))\n fluid.save(prog, path)\n\n # get reader\n train_reader = kitti_rcnn_reader.get_multiprocess_reader(args.batch_size,\n train_feeds,\n proc_num=args.worker_num,\n drop_last=True)\n train_loader.set_sample_list_generator(train_reader, place)\n\n train_stat = Stat()\n for epoch_id in range(args.resume_epoch, args.epoch):\n try:\n train_loader.start()\n train_iter = 0\n train_periods = []\n while True:\n cur_time = time.time()\n train_outs = exe.run(train_compile_prog, fetch_list=train_values + [lr.name])\n period = time.time() - cur_time\n train_periods.append(period)\n train_stat.update(train_keys, train_outs[:-1])\n if train_iter % args.log_interval == 0:\n log_str = \"\"\n for name, values in zip(train_keys + ['learning_rate'], train_outs):\n log_str += \"{}: {:.6f}, \".format(name, np.mean(values))\n logger.info(\"[TRAIN] Epoch {}, batch {}: {}time: {:.2f}\".format(epoch_id, train_iter, log_str, period))\n train_iter += 1\n except fluid.core.EOFException:\n logger.info(\"[TRAIN] Epoch {} finished, {}average time: {:.2f}\".format(epoch_id, train_stat.get_mean_log(), np.mean(train_periods[2:])))\n save_model(exe, train_prog, os.path.join(checkpoints_dir, str(epoch_id)))\n train_stat.reset()\n train_periods = []\n finally:\n train_loader.reset()\n\n\nif __name__ == \"__main__\":\n import paddle\n paddle.enable_static()\n train()\n", "import paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.io import Dataset, DataLoader\n\nimport re\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom gensim.models.keyedvectors import KeyedVectors\n\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef load_data_and_labels(positive_data_file, negative_data_file):\n \"\"\"\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n # Load data from files\n positive_examples = list(\n open(\n positive_data_file, 'r', encoding='latin-1').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(\n open(\n negative_data_file, 'r', encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = list(map(lambda x: x.split(), x_text))\n # Generate labels\n positive_labels = [1 for _ in positive_examples]\n negative_labels = [0 for _ in negative_examples]\n y = np.array(positive_labels + negative_labels)\n return [x_text, y]\n\n\nclass Word2VecBoWTextClassification(nn.Layer):\n def __init__(self, batch_size, word_embedding_dim, sent_embedding_dim,\n num_labels):\n super(Word2VecBoWTextClassification, self).__init__()\n\n self._fc1 = nn.Linear(word_embedding_dim, sent_embedding_dim)\n self._fc2 = nn.Linear(sent_embedding_dim, num_labels)\n self._dropout = nn.Dropout(p=0.5)\n\n def forward(self, inputs):\n word_emb, seq_lens = inputs\n\n # [batch_size, word_embedding_dim]\n sent_emb = self.average_word_embedding(word_emb, seq_lens)\n\n # [batch_size, sent_embedding_dim]\n dense = self._fc1(sent_emb)\n dense = self._dropout(dense)\n\n # [batch_size, num_labels]\n out = self._fc2(dense)\n return out\n\n def average_word_embedding(self, word_emb, seq_lens):\n \"\"\"\n Parameters:\n word_emb: It is a Tensor with shape `[batch_size, max_seq_len, word_embedding_dim]`.\n seq_lens: It is a Tensor with shape `[batch_size]`.\n \"\"\"\n seq_lens = paddle.unsqueeze(seq_lens, axis=-1)\n seq_lens = paddle.cast(seq_lens, dtype=word_emb.dtype)\n\n # [batch_size, word_embedding_dim]\n sent_emb = paddle.sum(word_emb, axis=1)\n # [batch_size, word_embedding_dim]\n sent_emb = sent_emb / seq_lens\n return sent_emb\n\n\nclass SentencePolarityDatasetV1(Dataset):\n def __init__(self, x, y, gensim_model, max_seq_len):\n super(SentencePolarityDatasetV1, self).__init__()\n\n self._text = list(zip(x, y))\n self._gensim_model = gensim_model\n self._vector_size = gensim_model.vector_size\n self._max_seq_len = max_seq_len\n self._data = self.convert_to_ids()\n\n def convert_to_ids(self):\n data = []\n for sentence, label in self._text:\n sentence = sentence[:self._max_seq_len]\n ids = np.zeros([len(sentence), self._vector_size], dtype=np.float32)\n for i, word in enumerate(sentence):\n if word in self._gensim_model:\n ids[i] = self._gensim_model[word]\n else:\n ids[i] = np.random.uniform(-0.25, 0.25, self._vector_size)\n data.append([ids, label])\n return data\n\n def __getitem__(self, idx):\n ids = np.copy(self._data[idx][0])\n label = self._data[idx][1]\n return (ids, label)\n\n def __len__(self):\n return len(self._data)\n\n\ndef generate_batch(batch):\n batch_ids, batch_label = zip(*batch)\n max_len = max([ids.shape[0] for ids in batch_ids])\n new_batch_ids = np.zeros(\n [len(batch_ids), max_len, batch_ids[0].shape[1]], dtype=np.float32)\n new_batch_label = []\n new_batch_seq_len = []\n for i, (ids, label) in enumerate(zip(batch_ids, batch_label)):\n seq_len = ids.shape[0]\n new_batch_ids[i, :seq_len, :] = ids\n new_batch_label.append(label)\n new_batch_seq_len.append(seq_len)\n return new_batch_ids, new_batch_label, new_batch_seq_len\n\n\ndef train_base():\n paddle.disable_static()\n\n batch_size = 64\n max_seq_len = 256\n epochs = 20\n lr = 0.001\n weight_decay = 0.0001\n sent_embedding_dim = 64\n num_labels = 2\n\n pos_file = './sentence-polarity-dataset-v1/rt-polarity.pos'\n neg_file = './sentence-polarity-dataset-v1/rt-polarity.neg'\n x_text, y = load_data_and_labels(pos_file, neg_file)\n x_train, x_test, y_train, y_test = train_test_split(\n x_text, y, test_size=0.1, random_state=1)\n\n pretrained_word2vec_file = './sentence-polarity-dataset-v1/GoogleNews-vectors-negative300.bin'\n #gensim_model = KeyedVectors.load_word2vec_format(pretrained_word2vec_file, binary=True, limit=300000)\n gensim_model = KeyedVectors.load_word2vec_format(\n pretrained_word2vec_file, binary=True)\n print('\\nLoaded word2vec from %s\\n' % pretrained_word2vec_file)\n\n train_dataset = SentencePolarityDatasetV1(x_train, y_train, gensim_model,\n max_seq_len)\n test_dataset = SentencePolarityDatasetV1(x_test, y_test, gensim_model,\n max_seq_len)\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n return_list=True,\n shuffle=True,\n collate_fn=lambda batch: generate_batch(batch))\n test_loader = DataLoader(\n test_dataset,\n batch_size=batch_size,\n return_list=True,\n shuffle=False,\n collate_fn=lambda batch: generate_batch(batch))\n\n model = Word2VecBoWTextClassification(batch_size, gensim_model.vector_size,\n sent_embedding_dim, num_labels)\n model.train()\n\n adam = paddle.optimizer.Adam(\n parameters=model.parameters(),\n learning_rate=lr,\n weight_decay=weight_decay)\n criterion = nn.CrossEntropyLoss()\n\n for epoch in range(epochs):\n print('Epoch %d/%d' % (epoch + 1, epochs))\n for step, batch_data in enumerate(train_loader, start=1):\n ids, label, seq_lens = batch_data\n\n output = model((ids, seq_lens))\n loss = criterion(output, label)\n loss.backward()\n adam.step()\n adam.clear_grad()\n\n if step % 10 == 0:\n print('step %d, loss %.4f' % (step, loss.numpy()[0]))\n\n acc = test(model, test_loader)\n print('\\ntest acc %.4f\\n' % acc)\n\n\ndef test(model, test_loader):\n correct = num = 0\n model.eval()\n with paddle.no_grad():\n for batch_data in test_loader:\n ids, label, seq_lens = batch_data\n\n # [batch_size, 2]\n output = model((ids, seq_lens))\n\n num += label.shape[0]\n predict = paddle.argmax(output, axis=1)\n label = paddle.cast(label, dtype=predict.dtype)\n correct += paddle.sum(\n paddle.cast(\n predict == label, dtype='int64')).numpy()[0]\n model.train()\n return correct * 1.0 / num\n\n\nif __name__ == '__main__':\n train_base()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport multiprocessing\nimport sys\n# sys.path.append(\"../models/classification/\")\nfrom nets import textcnn_net_multi_label\nimport paddle\nimport paddle.fluid as fluid\nfrom utils import ArgumentGroup, print_arguments, DataProcesser, DataReader, ConfigReader\nfrom utils import init_checkpoint, check_version, logger\nimport random\nimport codecs\nimport logging\nimport math\nnp.random.seed(0)\nrandom.seed(0)\n\nparser = argparse.ArgumentParser(__doc__)\nDEV_COUNT = 1\nmodel_g = ArgumentGroup(parser, \"model\", \"model configuration and paths.\")\nmodel_g.add_arg(\"init_checkpoint\", str, None,\n \"Init checkpoint to resume training from.\")\nmodel_g.add_arg(\"checkpoints\", str, \"./checkpoints\",\n \"Path to save checkpoints.\")\nmodel_g.add_arg(\"config_path\", str, \"./data/input/model.conf\", \"Model conf.\")\nmodel_g.add_arg(\"build_dict\", bool, False, \"Build dict.\")\n\ntrain_g = ArgumentGroup(parser, \"training\", \"training options.\")\ntrain_g.add_arg(\"cpu_num\", int, 3, \"Number of Threads.\")\ntrain_g.add_arg(\"epoch\", int, 100, \"Number of epoches for training.\")\ntrain_g.add_arg(\"learning_rate\", float, 0.1,\n \"Learning rate used to train with warmup.\")\ntrain_g.add_arg(\"save_steps\", int, 1000,\n \"The steps interval to save checkpoints.\")\ntrain_g.add_arg(\"validation_steps\", int, 100,\n \"The steps interval to evaluate model performance.\")\ntrain_g.add_arg(\"random_seed\", int, 7, \"random seed\")\ntrain_g.add_arg(\n \"threshold\", float, 0.1,\n \"When the confidence exceeds the threshold, the corresponding label is given.\"\n)\n\nlog_g = ArgumentGroup(parser, \"logging\", \"logging related.\")\nlog_g.add_arg(\"skip_steps\", int, 10, \"The steps interval to print loss.\")\n\ndata_g = ArgumentGroup(parser, \"data\",\n \"Data paths, vocab paths and data processing options\")\ndata_g.add_arg(\"data_dir\", str, \"./data/input/\", \"Path to training data.\")\ndata_g.add_arg(\"save_dir\", str, \"./data/output/\", \"Path to save.\")\ndata_g.add_arg(\"max_seq_len\", int, 50,\n \"Tokens' number of the longest seqence allowed.\")\ndata_g.add_arg(\"batch_size\", int, 64,\n \"The total number of examples in one batch for training.\")\n\nrun_type_g = ArgumentGroup(parser, \"run_type\", \"running type options.\")\nrun_type_g.add_arg(\"use_cuda\", bool, False, \"If set, use GPU for training.\")\n# run_type_g.add_arg(\"use_fast_executor\", bool, False, \"If set, use fast parallel executor (in experiment).\")\nrun_type_g.add_arg(\"do_train\", bool, True,\n \"Whether to perform evaluation on test data set.\")\nrun_type_g.add_arg(\"do_eval\", bool, True,\n \"Whether to perform evaluation on test data set.\")\nrun_type_g.add_arg(\"do_test\", bool, True,\n \"Whether to perform evaluation on test data set.\")\nargs = parser.parse_args()\n\n\ndef get_score(pred_result, label, eval_phase):\n \"\"\"[get precision recall and f-score]\n \n Arguments:\n pred_result {[type]} -- [pred labels]\n label {[type]} -- [origin labels]\n \"\"\"\n tp = 0\n total = 0\n true_cnt = 0\n pred_pos_num = 0\n pos_num = 0\n for i in range(len(pred_result)):\n total += 1\n pred_labels = []\n actual_labels = []\n for j in range(1, len(pred_result[0])): # the 0 one is background\n if pred_result[i][j] == 1:\n pred_labels.append(j)\n if label[i][j] == 1:\n actual_labels.append(j)\n if len(pred_labels) > 0:\n pred_pos_num += 1\n if len(actual_labels) > 0:\n pos_num += 1\n if set(actual_labels).issubset(set(pred_labels)):\n tp += 1\n true_cnt += 1\n elif len(pred_labels) == 0 and len(actual_labels) == 0:\n true_cnt += 1\n try:\n precision = tp * 1.0 / pred_pos_num\n recall = tp * 1.0 / pos_num\n f1 = 2 * precision * recall / (recall + precision)\n except Exception as e:\n precision = 0\n recall = 0\n f1 = 0\n acc = true_cnt * 1.0 / total\n logger.info(\"tp, pred_pos_num, pos_num, total\")\n logger.info(\"%d, %d, %d, %d\" % (tp, pred_pos_num, pos_num, total))\n logger.info(\"%s result is : precision is %f, recall is %f, f1_score is %f, acc is %f\" % (eval_phase, precision, \\\n recall, f1, acc))\n\n\ndef train(args, train_exe, build_res, place):\n \"\"\"[train the net]\n \n Arguments:\n args {[type]} -- [description]\n train_exe {[type]} -- [description]\n compiled_prog{[type]} -- [description]\n build_res {[type]} -- [description]\n place {[type]} -- [description]\n \"\"\"\n global DEV_COUNT\n compiled_prog = build_res[\"compiled_prog\"]\n cost = build_res[\"cost\"]\n prediction = build_res[\"prediction\"]\n pred_label = build_res[\"pred_label\"]\n label = build_res[\"label\"]\n fetch_list = [cost.name, prediction.name, pred_label.name, label.name]\n train_data_loader = build_res[\"train_data_loader\"]\n train_prog = build_res[\"train_prog\"]\n steps = 0\n time_begin = time.time()\n test_exe = train_exe\n logger.info(\"Begin training\")\n for i in range(args.epoch):\n try:\n for data in train_data_loader():\n avg_cost_np, avg_pred_np, pred_label, label = train_exe.run(feed=data, program=compiled_prog, \\\n fetch_list=fetch_list)\n steps += 1\n if steps % int(args.skip_steps) == 0:\n time_end = time.time()\n used_time = time_end - time_begin\n get_score(pred_label, label, eval_phase=\"Train\")\n logger.info('loss is {}'.format(avg_cost_np))\n logger.info(\"epoch: %d, step: %d, speed: %f steps/s\" %\n (i, steps, args.skip_steps / used_time))\n time_begin = time.time()\n if steps % args.save_steps == 0:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps))\n fluid.io.save(train_prog, save_path)\n logger.info(\"[save]step %d : save at %s\" %\n (steps, save_path))\n if steps % args.validation_steps == 0:\n if args.do_eval:\n evaluate(args, test_exe, build_res, \"eval\")\n if args.do_test:\n evaluate(args, test_exe, build_res, \"test\")\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Train error : %s\" % str(e))\n exit(1)\n save_path = os.path.join(args.checkpoints, \"step_\" + str(steps))\n fluid.io.save(train_prog, save_path)\n logger.info(\"[save]step %d : save at %s\" % (steps, save_path))\n\n\ndef evaluate(args,\n test_exe,\n build_res,\n eval_phase,\n save_result=False,\n id2intent=None):\n \"\"\"[evaluate on dev/test dataset]\n \n Arguments:\n args {[type]} -- [description]\n test_exe {[type]} -- [description]\n test_prog {[type]} -- [description]\n build_res {[type]} -- [description]\n place {[type]} -- [description]\n eval_phase {[type]} -- [description]\n \n Keyword Arguments:\n threshold {float} -- [description] (default: {0.5})\n save_result {bool} -- [description] (default: {False})\n id2intent {[type]} -- [description] (default: {None})\n \"\"\"\n place = build_res[\"test_place\"]\n threshold = args.threshold\n cost = build_res[\"cost\"]\n prediction = build_res[\"prediction\"]\n pred_label = build_res[\"pred_label\"]\n label = build_res[\"label\"]\n fetch_list = [cost.name, prediction.name, pred_label.name, label.name]\n total_cost, total_acc, pred_prob_list, pred_label_list, label_list = [], [], [], [], []\n if eval_phase == \"eval\":\n test_prog = build_res[\"eval_compiled_prog\"]\n test_data_loader = build_res[\"eval_data_loader\"]\n elif eval_phase == \"test\":\n test_prog = build_res[\"test_compiled_prog\"]\n test_data_loader = build_res[\"test_data_loader\"]\n else:\n exit(1)\n logger.info(\"-----------------------------------------------------------\")\n for data in test_data_loader():\n avg_cost_np, avg_pred_np, pred_label, label= test_exe.run(program=test_prog, fetch_list=fetch_list, feed=data, \\\n return_numpy=True)\n total_cost.append(avg_cost_np)\n pred_prob_list.extend(avg_pred_np)\n pred_label_list.extend(pred_label)\n label_list.extend(label)\n\n if save_result:\n logger.info(\"save result at : %s\" % args.save_dir + \"/\" + eval_phase +\n \".rst\")\n save_dir = args.save_dir\n if not os.path.exists(save_dir):\n logger.warning(\"save dir not exists, and create it\")\n os.makedirs(save_dir)\n fin = codecs.open(\n os.path.join(args.data_dir, eval_phase + \".txt\"),\n \"r\",\n encoding=\"utf8\")\n fout = codecs.open(\n args.save_dir + \"/\" + eval_phase + \".rst\", \"w\", encoding=\"utf8\")\n for line in pred_prob_list:\n query = fin.readline().rsplit(\"\\t\", 1)[0]\n res = []\n for i in range(1, len(line)):\n if line[i] > threshold:\n #res.append(id2intent[i]+\":\"+str(line[i]))\n res.append(id2intent[i])\n if len(res) == 0:\n res.append(id2intent[0])\n fout.write(\"%s\\t%s\\n\" % (query, \"\\2\".join(sorted(res))))\n fout.close()\n fin.close()\n\n logger.info(\"[%s] result: \" % eval_phase)\n get_score(pred_label_list, label_list, eval_phase)\n logger.info('loss is {}'.format(sum(total_cost) * 1.0 / len(total_cost)))\n logger.info(\"-----------------------------------------------------------\")\n\n\ndef create_net(args,\n flow_data,\n class_dim,\n dict_dim,\n place,\n model_name=\"textcnn_net\",\n is_infer=False):\n \"\"\"[create network and loader]\n \n Arguments:\n flow_data {[type]} -- [description]\n class_dim {[type]} -- [description]\n dict_dim {[type]} -- [description]\n place {[type]} -- [description]\n \n Keyword Arguments:\n model_name {str} -- [description] (default: {\"textcnn_net\"})\n is_infer {bool} -- [description] (default: {False})\n \n Returns:\n [type] -- [description]\n \"\"\"\n if model_name == \"textcnn_net\":\n model = textcnn_net_multi_label\n else:\n return\n char_list = fluid.data(\n name=\"char\",\n shape=[None, args.max_seq_len, 1],\n dtype=\"int64\",\n lod_level=0)\n label = fluid.data(\n name=\"label\", shape=[None, class_dim], dtype=\"float32\",\n lod_level=0) # label data\n data_loader = fluid.io.DataLoader.from_generator(\n feed_list=[char_list, label],\n capacity=args.batch_size * 10,\n iterable=True,\n return_list=False)\n output = model(\n char_list,\n label,\n dict_dim,\n emb_dim=flow_data[\"model\"][\"emb_dim\"],\n hid_dim=flow_data[\"model\"][\"hid_dim\"],\n hid_dim2=flow_data[\"model\"][\"hid_dim2\"],\n class_dim=class_dim,\n win_sizes=flow_data[\"model\"][\"win_sizes\"],\n is_infer=is_infer,\n threshold=args.threshold,\n max_seq_len=args.max_seq_len)\n if is_infer:\n prediction = output\n return [data_loader, prediction]\n else:\n avg_cost, prediction, pred_label, label = output[0], output[1], output[\n 2], output[3]\n return [data_loader, avg_cost, prediction, pred_label, label]\n\n\ndef build_data_loader(args, char_dict, intent_dict):\n \"\"\"[decorate samples for dataloader]\n \n Arguments:\n args {[type]} -- [description]\n char_dict {[type]} -- [description]\n intent_dict {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n \"\"\"\n loader_res = {}\n if args.do_train:\n train_processor = DataReader(char_dict, intent_dict, args.max_seq_len)\n train_data_generator = train_processor.prepare_data(\n data_path=args.data_dir + \"train.txt\",\n batch_size=args.batch_size,\n mode='train')\n loader_res[\"train_data_generator\"] = train_data_generator\n num_train_examples = train_processor._get_num_examples()\n logger.info(\"Num train examples: %d\" % num_train_examples)\n logger.info(\"Num train steps: %d\" % (math.ceil(num_train_examples * 1.0 / args.batch_size) * \\\n args.epoch // DEV_COUNT))\n if math.ceil(num_train_examples * 1.0 /\n args.batch_size) // DEV_COUNT <= 0:\n logger.error(\n \"Num of train steps is less than 0 or equals to 0, exit\")\n exit(1)\n if args.do_eval:\n eval_processor = DataReader(char_dict, intent_dict, args.max_seq_len)\n eval_data_generator = eval_processor.prepare_data(\n data_path=args.data_dir + \"eval.txt\",\n batch_size=args.batch_size,\n mode='eval')\n loader_res[\"eval_data_generator\"] = eval_data_generator\n num_eval_examples = eval_processor._get_num_examples()\n logger.info(\"Num eval examples: %d\" % num_eval_examples)\n if args.do_test:\n test_processor = DataReader(char_dict, intent_dict, args.max_seq_len)\n test_data_generator = test_processor.prepare_data(\n data_path=args.data_dir + \"test.txt\",\n batch_size=args.batch_size,\n mode='test')\n loader_res[\"test_data_generator\"] = test_data_generator\n return loader_res\n\n\ndef build_graph(args, model_config, num_labels, dict_dim, place, test_place,\n loader_res):\n \"\"\"[build paddle graph]\n \n Arguments:\n args {[type]} -- [description]\n model_config {[type]} -- [description]\n num_labels {[type]} -- [description]\n dict_dim {[type]} -- [description]\n place {[type]} -- [description]\n loader_res {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n \"\"\"\n res = {}\n cost, prediction, pred_label, label = None, None, None, None\n train_prog = fluid.default_main_program()\n\n startup_prog = fluid.default_startup_program()\n eval_prog = train_prog.clone(for_test=True)\n test_prog = train_prog.clone(for_test=True)\n train_prog.random_seed = args.random_seed\n startup_prog.random_seed = args.random_seed\n if args.do_train:\n with fluid.program_guard(train_prog, startup_prog):\n with fluid.unique_name.guard():\n train_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \\\n dict_dim, place, model_name=\"textcnn_net\")\n train_data_loader.set_sample_list_generator(\n loader_res['train_data_generator'], places=place)\n res[\"train_data_loader\"] = train_data_loader\n sgd_optimizer = fluid.optimizer.SGD(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=args.learning_rate,\n decay_steps=1000,\n decay_rate=0.5,\n staircase=True))\n sgd_optimizer.minimize(cost)\n if args.do_eval:\n with fluid.program_guard(eval_prog, startup_prog):\n with fluid.unique_name.guard():\n eval_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \\\n dict_dim, test_place, model_name=\"textcnn_net\")\n eval_data_loader.set_sample_list_generator(\n loader_res['eval_data_generator'], places=test_place)\n res[\"eval_data_loader\"] = eval_data_loader\n if args.do_test:\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n test_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \\\n dict_dim, test_place, model_name=\"textcnn_net\")\n test_data_loader.set_sample_list_generator(\n loader_res['test_data_generator'], places=test_place)\n res[\"test_data_loader\"] = test_data_loader\n res[\"cost\"] = cost\n res[\"prediction\"] = prediction\n res[\"label\"] = label\n res[\"pred_label\"] = pred_label\n res[\"train_prog\"] = train_prog\n res[\"eval_prog\"] = eval_prog\n res[\"test_prog\"] = test_prog\n\n return res\n\n\ndef main(args):\n \"\"\"\n Main Function\n \"\"\"\n global DEV_COUNT\n startup_prog = fluid.default_startup_program()\n random.seed(args.random_seed)\n model_config = ConfigReader.read_conf(args.config_path)\n if args.use_cuda:\n test_place = fluid.cuda_places(0)\n place = fluid.cuda_places()\n DEV_COUNT = len(place)\n else:\n test_place = fluid.cpu_places(1)\n os.environ['CPU_NUM'] = str(args.cpu_num)\n place = fluid.cpu_places()\n DEV_COUNT = args.cpu_num\n logger.info(\"Dev Num is %s\" % str(DEV_COUNT))\n exe = fluid.Executor(place[0])\n if args.do_train and args.build_dict:\n DataProcesser.build_dict(args.data_dir + \"train.txt\", args.data_dir)\n # read dict\n char_dict = DataProcesser.read_dict(args.data_dir + \"char.dict\")\n dict_dim = len(char_dict)\n intent_dict = DataProcesser.read_dict(args.data_dir + \"domain.dict\")\n id2intent = {}\n for key, value in intent_dict.items():\n id2intent[int(value)] = key\n num_labels = len(intent_dict)\n # build model\n loader_res = build_data_loader(args, char_dict, intent_dict)\n build_res = build_graph(args, model_config, num_labels, dict_dim, place,\n test_place, loader_res)\n build_res[\"place\"] = place\n build_res[\"test_place\"] = test_place\n if not (args.do_train or args.do_eval or args.do_test):\n raise ValueError(\"For args `do_train`, `do_eval` and `do_test`, at \"\n \"least one of them must be True.\")\n\n exe.run(startup_prog)\n if args.init_checkpoint and args.init_checkpoint != \"None\":\n try:\n init_checkpoint(\n exe, args.init_checkpoint, main_program=startup_prog)\n logger.info(\"Load model from %s\" % args.init_checkpoint)\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Faild load model from %s [%s]\" %\n (args.init_checkpoint, str(e)))\n build_strategy = fluid.compiler.BuildStrategy()\n build_strategy.fuse_all_reduce_ops = False\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.num_threads = 1\n # add compiled prog\n if args.do_train:\n compiled_prog = fluid.compiler.CompiledProgram(build_res[\"train_prog\"]).with_data_parallel( \\\n loss_name=build_res[\"cost\"].name, \\\n build_strategy=build_strategy, \\\n exec_strategy=exec_strategy)\n build_res[\"compiled_prog\"] = compiled_prog\n if args.do_test:\n test_compiled_prog = fluid.compiler.CompiledProgram(build_res[\n \"test_prog\"])\n build_res[\"test_compiled_prog\"] = test_compiled_prog\n if args.do_eval:\n eval_compiled_prog = fluid.compiler.CompiledProgram(build_res[\n \"eval_prog\"])\n build_res[\"eval_compiled_prog\"] = eval_compiled_prog\n\n if args.do_train:\n train(args, exe, build_res, place)\n if args.do_eval:\n evaluate(args, exe, build_res, \"eval\", \\\n save_result=True, id2intent=id2intent)\n if args.do_test:\n evaluate(args, exe, build_res, \"test\",\\\n save_result=True, id2intent=id2intent)\n\n\nif __name__ == \"__main__\":\n import paddle\n paddle.enable_static()\n logger.info(\"the paddle version is %s\" % paddle.__version__)\n check_version('1.6.0')\n print_arguments(args)\n main(args)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSimNet Task\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport argparse\nimport multiprocessing\nimport sys\n\ndefaultencoding = 'utf-8'\nif sys.getdefaultencoding() != defaultencoding:\n reload(sys)\n sys.setdefaultencoding(defaultencoding)\n\nsys.path.append(\"..\")\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport config\nimport utils\nimport reader\nimport nets.paddle_layers as layers\nimport io\nimport logging\n\nfrom utils import ArgConfig\nfrom utils import load_dygraph\nfrom model_check import check_version\nfrom model_check import check_cuda\n\n\ndef train(conf_dict, args):\n \"\"\"\n train process\n \"\"\"\n\n # Get device\n if args.use_cuda:\n place = fluid.CUDAPlace(0)\n else:\n place = fluid.CPUPlace()\n\n # run train\n logging.info(\"start train process ...\")\n\n def valid_and_test(pred_list, process, mode):\n \"\"\"\n return auc and acc\n \"\"\"\n pred_list = np.vstack(pred_list)\n if mode == \"test\":\n label_list = process.get_test_label()\n elif mode == \"valid\":\n label_list = process.get_valid_label()\n if args.task_mode == \"pairwise\":\n pred_list = (pred_list + 1) / 2\n pred_list = np.hstack(\n (np.ones_like(pred_list) - pred_list, pred_list))\n metric.reset()\n metric.update(pred_list, label_list)\n auc = metric.eval()\n if args.compute_accuracy:\n acc = utils.get_accuracy(pred_list, label_list, args.task_mode,\n args.lamda)\n return auc, acc\n else:\n return auc\n\n with fluid.dygraph.guard(place):\n # used for continuous evaluation \n if args.enable_ce:\n SEED = 102\n fluid.default_startup_program().random_seed = SEED\n fluid.default_main_program().random_seed = SEED\n\n # loading vocabulary\n vocab = utils.load_vocab(args.vocab_path)\n # get vocab size\n conf_dict['dict_size'] = len(vocab)\n conf_dict['seq_len'] = args.seq_len\n\n # Load network structure dynamically\n net = utils.import_class(\"./nets\", conf_dict[\"net\"][\"module_name\"],\n conf_dict[\"net\"][\"class_name\"])(conf_dict)\n if args.init_checkpoint is not \"\":\n model, _ = load_dygraph(args.init_checkpoint)\n net.set_dict(model)\n # Load loss function dynamically\n loss = utils.import_class(\"./nets/losses\",\n conf_dict[\"loss\"][\"module_name\"],\n conf_dict[\"loss\"][\"class_name\"])(conf_dict)\n # Load Optimization method\n learning_rate = conf_dict[\"optimizer\"][\"learning_rate\"]\n optimizer_name = conf_dict[\"optimizer\"][\"class_name\"]\n if optimizer_name == 'SGDOptimizer':\n optimizer = fluid.optimizer.SGDOptimizer(\n learning_rate, parameter_list=net.parameters())\n elif optimizer_name == 'AdamOptimizer':\n beta1 = conf_dict[\"optimizer\"][\"beta1\"]\n beta2 = conf_dict[\"optimizer\"][\"beta2\"]\n epsilon = conf_dict[\"optimizer\"][\"epsilon\"]\n optimizer = fluid.optimizer.AdamOptimizer(\n learning_rate,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n parameter_list=net.parameters())\n\n # load auc method\n metric = fluid.metrics.Auc(name=\"auc\")\n simnet_process = reader.SimNetProcessor(args, vocab)\n\n # set global step\n global_step = 0\n ce_info = []\n losses = []\n start_time = time.time()\n\n train_loader = fluid.io.DataLoader.from_generator(\n capacity=16,\n return_list=True,\n iterable=True,\n use_double_buffer=True)\n get_train_examples = simnet_process.get_reader(\n \"train\", epoch=args.epoch)\n train_loader.set_sample_list_generator(\n paddle.batch(\n get_train_examples, batch_size=args.batch_size), place)\n if args.do_valid:\n valid_loader = fluid.io.DataLoader.from_generator(\n capacity=16,\n return_list=True,\n iterable=True,\n use_double_buffer=True)\n get_valid_examples = simnet_process.get_reader(\"valid\")\n valid_loader.set_sample_list_generator(\n paddle.batch(\n get_valid_examples, batch_size=args.batch_size),\n place)\n pred_list = []\n\n if args.task_mode == \"pairwise\":\n\n for left, pos_right, neg_right in train_loader():\n\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])\n neg_right = fluid.layers.reshape(neg_right, shape=[-1, 1])\n net.train()\n global_step += 1\n left_feat, pos_score = net(left, pos_right)\n pred = pos_score\n _, neg_score = net(left, neg_right)\n avg_cost = loss.compute(pos_score, neg_score)\n losses.append(np.mean(avg_cost.numpy()))\n avg_cost.backward()\n optimizer.minimize(avg_cost)\n net.clear_gradients()\n\n if args.do_valid and global_step % args.validation_steps == 0:\n for left, pos_right in valid_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(\n pos_right, shape=[-1, 1])\n net.eval()\n left_feat, pos_score = net(left, pos_right)\n pred = pos_score\n\n pred_list += list(pred.numpy())\n valid_result = valid_and_test(pred_list, simnet_process,\n \"valid\")\n if args.compute_accuracy:\n valid_auc, valid_acc = valid_result\n logging.info(\n \"global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f\"\n % (global_step, valid_auc, valid_acc,\n np.mean(losses)))\n else:\n valid_auc = valid_result\n logging.info(\n \"global_steps: %d, valid_auc: %f, valid_loss: %f\" %\n (global_step, valid_auc, np.mean(losses)))\n\n if global_step % args.save_steps == 0:\n model_save_dir = os.path.join(args.output_dir,\n conf_dict[\"model_path\"])\n model_path = os.path.join(model_save_dir, str(global_step))\n\n if not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\n fluid.dygraph.save_dygraph(net.state_dict(), model_path)\n\n logging.info(\"saving infer model in %s\" % model_path)\n else:\n for left, right, label in train_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n right = fluid.layers.reshape(right, shape=[-1, 1])\n label = fluid.layers.reshape(label, shape=[-1, 1])\n net.train()\n global_step += 1\n left_feat, pred = net(left, right)\n avg_cost = loss.compute(pred, label)\n losses.append(np.mean(avg_cost.numpy()))\n avg_cost.backward()\n optimizer.minimize(avg_cost)\n net.clear_gradients()\n\n if args.do_valid and global_step % args.validation_steps == 0:\n for left, right in valid_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n right = fluid.layers.reshape(right, shape=[-1, 1])\n net.eval()\n left_feat, pred = net(left, right)\n pred_list += list(pred.numpy())\n valid_result = valid_and_test(pred_list, simnet_process,\n \"valid\")\n if args.compute_accuracy:\n valid_auc, valid_acc = valid_result\n logging.info(\n \"global_steps: %d, valid_auc: %f, valid_acc: %f, valid_loss: %f\"\n % (global_step, valid_auc, valid_acc,\n np.mean(losses)))\n else:\n valid_auc = valid_result\n logging.info(\n \"global_steps: %d, valid_auc: %f, valid_loss: %f\" %\n (global_step, valid_auc, np.mean(losses)))\n\n if global_step % args.save_steps == 0:\n model_save_dir = os.path.join(args.output_dir,\n conf_dict[\"model_path\"])\n model_path = os.path.join(model_save_dir, str(global_step))\n\n if not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\n fluid.dygraph.save_dygraph(net.state_dict(), model_path)\n\n logging.info(\"saving infer model in %s\" % model_path)\n\n end_time = time.time()\n ce_info.append([np.mean(losses), end_time - start_time])\n # final save\n logging.info(\"the final step is %s\" % global_step)\n model_save_dir = os.path.join(args.output_dir, conf_dict[\"model_path\"])\n model_path = os.path.join(model_save_dir, str(global_step))\n\n if not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\n fluid.dygraph.save_dygraph(net.state_dict(), model_path)\n logging.info(\"saving infer model in %s\" % model_path)\n # used for continuous evaluation\n if args.enable_ce:\n card_num = get_cards()\n ce_loss = 0\n ce_time = 0\n try:\n ce_loss = ce_info[-1][0]\n ce_time = ce_info[-1][1]\n except:\n logging.info(\"ce info err!\")\n print(\"kpis\\teach_step_duration_%s_card%s\\t%s\" %\n (args.task_name, card_num, ce_time))\n print(\"kpis\\ttrain_loss_%s_card%s\\t%f\" %\n (args.task_name, card_num, ce_loss))\n\n if args.do_test:\n # Get Feeder and Reader\n test_loader = fluid.io.DataLoader.from_generator(\n capacity=16,\n return_list=True,\n iterable=True,\n use_double_buffer=True)\n get_test_examples = simnet_process.get_reader(\"test\")\n test_loader.set_sample_list_generator(\n paddle.batch(\n get_test_examples, batch_size=args.batch_size),\n place)\n pred_list = []\n for left, pos_right in test_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])\n net.eval()\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])\n left_feat, pos_score = net(left, pos_right)\n pred = pos_score\n pred_list += list(pred.numpy())\n test_result = valid_and_test(pred_list, simnet_process, \"test\")\n if args.compute_accuracy:\n test_auc, test_acc = test_result\n logging.info(\"AUC of test is %f, Accuracy of test is %f\" %\n (test_auc, test_acc))\n else:\n test_auc = test_result\n logging.info(\"AUC of test is %f\" % test_auc)\n\n\ndef test(conf_dict, args):\n \"\"\"\n Evaluation Function\n \"\"\"\n logging.info(\"start test process ...\")\n if args.use_cuda:\n place = fluid.CUDAPlace(0)\n else:\n place = fluid.CPUPlace()\n with fluid.dygraph.guard(place):\n\n vocab = utils.load_vocab(args.vocab_path)\n simnet_process = reader.SimNetProcessor(args, vocab)\n test_loader = fluid.io.DataLoader.from_generator(\n capacity=16,\n return_list=True,\n iterable=True,\n use_double_buffer=True)\n get_test_examples = simnet_process.get_reader(\"test\")\n test_loader.set_sample_list_generator(\n paddle.batch(\n get_test_examples, batch_size=args.batch_size), place)\n\n conf_dict['dict_size'] = len(vocab)\n conf_dict['seq_len'] = args.seq_len\n\n net = utils.import_class(\"./nets\", conf_dict[\"net\"][\"module_name\"],\n conf_dict[\"net\"][\"class_name\"])(conf_dict)\n\n model, _ = load_dygraph(args.init_checkpoint)\n net.set_dict(model)\n metric = fluid.metrics.Auc(name=\"auc\")\n pred_list = []\n with io.open(\n \"predictions.txt\", \"w\", encoding=\"utf8\") as predictions_file:\n if args.task_mode == \"pairwise\":\n for left, pos_right in test_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])\n\n left_feat, pos_score = net(left, pos_right)\n pred = pos_score\n\n pred_list += list(\n map(lambda item: float(item[0]), pred.numpy()))\n predictions_file.write(u\"\\n\".join(\n map(lambda item: str((item[0] + 1) / 2), pred.numpy()))\n + \"\\n\")\n\n else:\n for left, right in test_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n right = fluid.layers.reshape(right, shape=[-1, 1])\n left_feat, pred = net(left, right)\n\n pred_list += list(\n map(lambda item: float(item[0]), pred.numpy()))\n predictions_file.write(u\"\\n\".join(\n map(lambda item: str(np.argmax(item)), pred.numpy())) +\n \"\\n\")\n\n if args.task_mode == \"pairwise\":\n pred_list = np.array(pred_list).reshape((-1, 1))\n pred_list = (pred_list + 1) / 2\n pred_list = np.hstack(\n (np.ones_like(pred_list) - pred_list, pred_list))\n else:\n pred_list = np.array(pred_list)\n labels = simnet_process.get_test_label()\n\n metric.update(pred_list, labels)\n if args.compute_accuracy:\n acc = utils.get_accuracy(pred_list, labels, args.task_mode,\n args.lamda)\n logging.info(\"AUC of test is %f, Accuracy of test is %f\" %\n (metric.eval(), acc))\n else:\n logging.info(\"AUC of test is %f\" % metric.eval())\n\n if args.verbose_result:\n utils.get_result_file(args)\n logging.info(\"test result saved in %s\" %\n os.path.join(os.getcwd(), args.test_result_path))\n\n\ndef infer(conf_dict, args):\n \"\"\"\n run predict\n \"\"\"\n logging.info(\"start test process ...\")\n if args.use_cuda:\n place = fluid.CUDAPlace(0)\n else:\n place = fluid.CPUPlace()\n\n with fluid.dygraph.guard(place):\n vocab = utils.load_vocab(args.vocab_path)\n simnet_process = reader.SimNetProcessor(args, vocab)\n get_infer_examples = simnet_process.get_infer_reader\n infer_loader = fluid.io.DataLoader.from_generator(\n capacity=16,\n return_list=True,\n iterable=True,\n use_double_buffer=True)\n infer_loader.set_sample_list_generator(\n paddle.batch(\n get_infer_examples, batch_size=args.batch_size), place)\n\n conf_dict['dict_size'] = len(vocab)\n conf_dict['seq_len'] = args.seq_len\n\n net = utils.import_class(\"./nets\", conf_dict[\"net\"][\"module_name\"],\n conf_dict[\"net\"][\"class_name\"])(conf_dict)\n model, _ = load_dygraph(args.init_checkpoint)\n net.set_dict(model)\n\n pred_list = []\n if args.task_mode == \"pairwise\":\n for left, pos_right in infer_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(pos_right, shape=[-1, 1])\n\n left_feat, pos_score = net(left, pos_right)\n pred = pos_score\n pred_list += list(\n map(lambda item: str((item[0] + 1) / 2), pred.numpy()))\n\n else:\n for left, right in infer_loader():\n left = fluid.layers.reshape(left, shape=[-1, 1])\n pos_right = fluid.layers.reshape(right, shape=[-1, 1])\n left_feat, pred = net(left, right)\n pred_list += map(lambda item: str(np.argmax(item)),\n pred.numpy())\n\n with io.open(\n args.infer_result_path, \"w\", encoding=\"utf8\") as infer_file:\n for _data, _pred in zip(simnet_process.get_infer_data(), pred_list):\n infer_file.write(_data + \"\\t\" + _pred + \"\\n\")\n logging.info(\"infer result saved in %s\" %\n os.path.join(os.getcwd(), args.infer_result_path))\n\n\ndef get_cards():\n num = 0\n cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')\n if cards != '':\n num = len(cards.split(\",\"))\n return num\n\n\nif __name__ == \"__main__\":\n\n args = ArgConfig()\n args = args.build_conf()\n\n utils.print_arguments(args)\n check_cuda(args.use_cuda)\n check_version()\n utils.init_log(\"./log/TextSimilarityNet\")\n conf_dict = config.SimNetConfig(args)\n if args.do_train:\n train(conf_dict, args)\n elif args.do_test:\n test(conf_dict, args)\n elif args.do_infer:\n infer(conf_dict, args)\n else:\n raise ValueError(\n \"one of do_train and do_test and do_infer must be True\")\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport os\nimport sys\nimport argparse\nimport ast\nimport logging\nimport numpy as np\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph.base import to_variable\nfrom paddle.io import DataLoader, Dataset, DistributedBatchSampler\nfrom paddle.hapi.model import _all_gather\nfrom paddle.fluid.dygraph.parallel import ParallelEnv\n\nfrom model import *\nfrom config_utils import *\nfrom kinetics_dataset import KineticsDataset\n\nlogging.root.handlers = []\nFORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n \"SLOWFAST test for performance evaluation.\")\n parser.add_argument(\n '--config_file',\n type=str,\n default='slowfast.yaml',\n help='path to config file of model')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=None,\n help='total eval batch size of all gpus.')\n parser.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=True,\n help='default use gpu.')\n parser.add_argument(\n '--use_data_parallel',\n type=ast.literal_eval,\n default=True,\n help='default use data parallel.')\n parser.add_argument(\n '--weights',\n type=str,\n default=None,\n help='Weight path, None to use config setting.')\n parser.add_argument(\n '--log_interval',\n type=int,\n default=1,\n help='mini-batch interval to log.')\n args = parser.parse_args()\n return args\n\n\n# Performance Evaluation\ndef test_slowfast(args):\n config = parse_config(args.config_file)\n test_config = merge_configs(config, 'test', vars(args))\n print_configs(test_config, \"Test\")\n\n if not args.use_gpu:\n place = fluid.CPUPlace()\n elif not args.use_data_parallel:\n place = fluid.CUDAPlace(0)\n else:\n place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)\n\n _nranks = ParallelEnv().nranks # num gpu\n bs_single = int(test_config.TEST.batch_size /\n _nranks) # batch_size of each gpu\n\n with fluid.dygraph.guard(place):\n #build model\n slowfast = SlowFast(cfg=test_config, num_classes=400)\n if args.weights:\n assert os.path.exists(args.weights + '.pdparams'),\\\n \"Given weight dir {} not exist.\".format(args.weights)\n\n logger.info('load test weights from {}'.format(args.weights))\n model_dict, _ = fluid.load_dygraph(args.weights)\n slowfast.set_dict(model_dict)\n\n if args.use_data_parallel:\n strategy = fluid.dygraph.parallel.prepare_context()\n slowfast = fluid.dygraph.parallel.DataParallel(slowfast, strategy)\n\n #create reader\n test_data = KineticsDataset(mode=\"test\", cfg=test_config)\n test_sampler = DistributedBatchSampler(\n test_data, batch_size=bs_single, shuffle=False, drop_last=False)\n test_loader = DataLoader(\n test_data,\n batch_sampler=test_sampler,\n places=place,\n feed_list=None,\n num_workers=8,\n return_list=True)\n\n # start eval\n num_ensemble_views = test_config.TEST.num_ensemble_views\n num_spatial_crops = test_config.TEST.num_spatial_crops\n num_cls = test_config.MODEL.num_classes\n num_clips = num_ensemble_views * num_spatial_crops\n num_videos = len(test_data) // num_clips\n video_preds = np.zeros((num_videos, num_cls))\n video_labels = np.zeros((num_videos, 1), dtype=\"int64\")\n clip_count = {}\n\n print(\n \"[EVAL] eval start, number of videos {}, total number of clips {}\".\n format(num_videos, num_clips * num_videos))\n slowfast.eval()\n for batch_id, data in enumerate(test_loader):\n # call net\n model_inputs = [data[0], data[1]]\n preds = slowfast(model_inputs, training=False)\n labels = data[2]\n clip_ids = data[3]\n\n # gather mulit card, results of following process in each card is the same.\n if _nranks > 1:\n preds = _all_gather(preds, _nranks)\n labels = _all_gather(labels, _nranks)\n clip_ids = _all_gather(clip_ids, _nranks)\n\n # to numpy\n preds = preds.numpy()\n labels = labels.numpy().astype(\"int64\")\n clip_ids = clip_ids.numpy()\n\n # preds ensemble\n for ind in range(preds.shape[0]):\n vid_id = int(clip_ids[ind]) // num_clips\n ts_idx = int(clip_ids[ind]) % num_clips\n if vid_id not in clip_count:\n clip_count[vid_id] = []\n if ts_idx in clip_count[vid_id]:\n print(\n \"[EVAL] Passed!! read video {} clip index {} / {} repeatedly.\".\n format(vid_id, ts_idx, clip_ids[ind]))\n else:\n clip_count[vid_id].append(ts_idx)\n video_preds[vid_id] += preds[ind] # ensemble method: sum\n if video_labels[vid_id].sum() > 0:\n assert video_labels[vid_id] == labels[ind]\n video_labels[vid_id] = labels[ind]\n if batch_id % args.log_interval == 0:\n print(\"[EVAL] Processing batch {}/{} ...\".format(\n batch_id, len(test_data) // test_config.TEST.batch_size))\n\n # check clip index of each video\n for key in clip_count.keys():\n if len(clip_count[key]) != num_clips or sum(clip_count[\n key]) != num_clips * (num_clips - 1) / 2:\n print(\n \"[EVAL] Warning!! video [{}] clip count [{}] not match number clips {}\".\n format(key, clip_count[key], num_clips))\n\n video_preds = to_variable(video_preds)\n video_labels = to_variable(video_labels)\n acc_top1 = fluid.layers.accuracy(\n input=video_preds, label=video_labels, k=1)\n acc_top5 = fluid.layers.accuracy(\n input=video_preds, label=video_labels, k=5)\n print('[EVAL] eval finished, avg_acc1= {}, avg_acc5= {} '.format(\n acc_top1.numpy(), acc_top5.numpy()))\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n logger.info(args)\n test_slowfast(args)\n" ]
[ [ "numpy.exp", "numpy.sum" ], [ "numpy.array" ], [ "numpy.mean" ], [ "numpy.random.uniform", "numpy.copy", "numpy.array", "sklearn.model_selection.train_test_split" ], [ "numpy.random.seed" ], [ "numpy.ones_like", "numpy.argmax", "numpy.mean", "numpy.array", "numpy.vstack" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kathryn-Downey/DeepMergeDomainAdaptation
[ "334331ce8871cda80590cd9ec671941a82fa859c" ]
[ "galaxy_merge_edits/grad_cam.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n#\n# Author: Kazuto Nakashima\n# URL: http://kazuto1011.github.io\n# Created: 2017-05-26\n\nfrom collections import Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\n\nclass _BaseWrapper(object):\n def __init__(self, model):\n super(_BaseWrapper, self).__init__()\n self.device = next(model.parameters()).device\n self.model = model\n self.handlers = [] # a set of hook function handlers\n\n def _encode_one_hot(self, ids):\n one_hot = torch.zeros_like(self.logits).to(self.device)\n one_hot.scatter_(1, ids, 1.0)\n return one_hot\n\n def forward(self, image):\n self.image_shape = image.shape[2:]\n self.features, self.logits = self.model(image)\n self.probs = F.softmax(self.logits, dim=1)\n return self.probs.sort(dim=1, descending=True) # ordered results\n\n def backward(self, ids):\n \"\"\"\n Class-specific backpropagation\n \"\"\"\n one_hot = self._encode_one_hot(ids)\n self.model.zero_grad()\n self.logits.backward(gradient=one_hot, retain_graph=True)\n\n def generate(self):\n raise NotImplementedError\n\n def remove_hook(self):\n \"\"\"\n Remove all the forward/backward hook functions\n \"\"\"\n for handle in self.handlers:\n handle.remove()\n\n\nclass BackPropagation(_BaseWrapper):\n def forward(self, image):\n self.image = image.requires_grad_()\n return super(BackPropagation, self).forward(self.image)\n\n def generate(self):\n gradient = self.image.grad.clone()\n self.image.grad.zero_()\n return gradient\n\n\nclass GuidedBackPropagation(BackPropagation):\n \"\"\"\n \"Striving for Simplicity: the All Convolutional Net\"\n https://arxiv.org/pdf/1412.6806.pdf\n Look at Figure 1 on page 8.\n \"\"\"\n\n def __init__(self, model):\n super(GuidedBackPropagation, self).__init__(model)\n\n def backward_hook(module, grad_in, grad_out):\n # Cut off negative gradients\n if isinstance(module, nn.ReLU):\n return (F.relu(grad_in[0]),)\n\n for module in self.model.named_modules():\n self.handlers.append(module[1].register_backward_hook(backward_hook))\n\n\nclass Deconvnet(BackPropagation):\n \"\"\"\n \"Striving for Simplicity: the All Convolutional Net\"\n https://arxiv.org/pdf/1412.6806.pdf\n Look at Figure 1 on page 8.\n \"\"\"\n\n def __init__(self, model):\n super(Deconvnet, self).__init__(model)\n\n def backward_hook(module, grad_in, grad_out):\n # Cut off negative gradients and ignore ReLU\n if isinstance(module, nn.ReLU):\n return (F.relu(grad_out[0]),)\n\n for module in self.model.named_modules():\n self.handlers.append(module[1].register_backward_hook(backward_hook))\n\n\nclass GradCAM(_BaseWrapper):\n \"\"\"\n \"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization\"\n https://arxiv.org/pdf/1610.02391.pdf\n Look at Figure 2 on page 4\n \"\"\"\n\n def __init__(self, model, candidate_layers=None):\n super(GradCAM, self).__init__(model)\n self.fmap_pool = {}\n self.grad_pool = {}\n self.candidate_layers = candidate_layers # list\n\n def save_fmaps(key):\n def forward_hook(module, input, output):\n\n #print(output) #try 0 or 1?\n self.fmap_pool[key] = output[0].detach() #since it outputs a tuple, what do we do\n\n return forward_hook\n\n def save_grads(key):\n def backward_hook(module, grad_in, grad_out):\n self.grad_pool[key] = grad_out[0].detach()\n\n return backward_hook\n\n # If any candidates are not specified, the hook is registered to all the layers.\n for name, module in self.model.named_modules():\n if self.candidate_layers is None or name in self.candidate_layers:\n self.handlers.append(module.register_forward_hook(save_fmaps(name)))\n self.handlers.append(module.register_backward_hook(save_grads(name)))\n\n def _find(self, pool, target_layer):\n if target_layer in pool.keys():\n return pool[target_layer]\n else:\n raise ValueError(\"Invalid layer name: {}\".format(target_layer))\n\n def generate(self, target_layer):\n fmaps = self._find(self.fmap_pool, target_layer)\n grads = self._find(self.grad_pool, target_layer)\n weights = F.adaptive_avg_pool2d(grads, 1)\n\n gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)\n gcam = F.relu(gcam)\n gcam = F.interpolate(\n gcam, self.image_shape, mode=\"bilinear\", align_corners=False\n )\n\n B, C, H, W = gcam.shape\n gcam = gcam.view(B, -1)\n gcam -= gcam.min(dim=1, keepdim=True)[0]\n gcam /= gcam.max(dim=1, keepdim=True)[0]\n gcam = gcam.view(B, C, H, W)\n\n return gcam\n\n\ndef occlusion_sensitivity(\n model, images, ids, mean=None, patch=35, stride=1, n_batches=128\n):\n \"\"\"\n \"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization\"\n https://arxiv.org/pdf/1610.02391.pdf\n Look at Figure A5 on page 17\n\n Originally proposed in:\n \"Visualizing and Understanding Convolutional Networks\"\n https://arxiv.org/abs/1311.2901\n \"\"\"\n\n torch.set_grad_enabled(False)\n model.eval()\n mean = mean if mean else 0\n patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch)\n pad_H, pad_W = patch_H // 2, patch_W // 2\n\n # Padded image\n images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean)\n B, _, H, W = images.shape\n new_H = (H - patch_H) // stride + 1\n new_W = (W - patch_W) // stride + 1\n\n # Prepare sampling grids\n anchors = []\n grid_h = 0\n while grid_h <= H - patch_H:\n grid_w = 0\n while grid_w <= W - patch_W:\n grid_w += stride\n anchors.append((grid_h, grid_w))\n grid_h += stride\n\n # Baseline score without occlusion\n baseline = model(images).detach().gather(1, ids)\n\n # Compute per-pixel logits\n scoremaps = []\n for i in tqdm(range(0, len(anchors), n_batches), leave=False):\n batch_images = []\n batch_ids = []\n for grid_h, grid_w in anchors[i : i + n_batches]:\n images_ = images.clone()\n images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean\n batch_images.append(images_)\n batch_ids.append(ids)\n batch_images = torch.cat(batch_images, dim=0)\n batch_ids = torch.cat(batch_ids, dim=0)\n scores = model(batch_images).detach().gather(1, batch_ids)\n scoremaps += list(torch.split(scores, B))\n\n diffmaps = torch.cat(scoremaps, dim=1) - baseline\n diffmaps = diffmaps.view(B, new_H, new_W)\n\n return diffmaps\n" ]
[ [ "torch.nn.functional.softmax", "torch.cat", "torch.zeros_like", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.functional.relu", "torch.set_grad_enabled", "torch.mul", "torch.nn.functional.interpolate", "torch.split", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
enthought/numpy-refactor
[ "209866bc55eee56e92692307c4437af024bae87d", "f4eb215d2e5c5a37c0c2f279f506abc242dbb38b", "f4eb215d2e5c5a37c0c2f279f506abc242dbb38b" ]
[ "numpy/fft/tests/test_helper.py", "numpy/core/tests/test_getlimits.py", "numpy/distutils/ccompiler.py" ]
[ "#!/usr/bin/env python\n# Copied from fftpack.helper by Pearu Peterson, October 2005\n\"\"\" Test functions for fftpack.helper module\n\"\"\"\n\nfrom numpy.testing import *\nfrom numpy.fft import fftshift,ifftshift,fftfreq\n\nfrom numpy import pi\n\ndef random(size):\n return rand(*size)\n\nclass TestFFTShift(TestCase):\n def test_definition(self):\n x = [0,1,2,3,4,-4,-3,-2,-1]\n y = [-4,-3,-2,-1,0,1,2,3,4]\n assert_array_almost_equal(fftshift(x),y)\n assert_array_almost_equal(ifftshift(y),x)\n x = [0,1,2,3,4,-5,-4,-3,-2,-1]\n y = [-5,-4,-3,-2,-1,0,1,2,3,4]\n assert_array_almost_equal(fftshift(x),y)\n assert_array_almost_equal(ifftshift(y),x)\n\n def test_inverse(self):\n for n in [1,4,9,100,211]:\n x = random((n,))\n assert_array_almost_equal(ifftshift(fftshift(x)),x)\n\n\nclass TestFFTFreq(TestCase):\n def test_definition(self):\n x = [0,1,2,3,4,-4,-3,-2,-1]\n assert_array_almost_equal(9*fftfreq(9),x)\n assert_array_almost_equal(9*pi*fftfreq(9,pi),x)\n x = [0,1,2,3,4,-5,-4,-3,-2,-1]\n assert_array_almost_equal(10*fftfreq(10),x)\n assert_array_almost_equal(10*pi*fftfreq(10,pi),x)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "\"\"\" Test functions for limits module.\n\"\"\"\n\nfrom numpy.testing import *\n\nimport numpy.lib\ntry:\n reload(numpy.lib)\nexcept NameError:\n # Py3K\n import imp\n imp.reload(numpy.lib)\n\nfrom numpy.core import finfo, iinfo\nfrom numpy import single,double,longdouble\nimport numpy as np\n\n##################################################\n\nclass TestPythonFloat(TestCase):\n def test_singleton(self):\n ftype = finfo(float)\n ftype2 = finfo(float)\n assert_equal(id(ftype),id(ftype2))\n\nclass TestSingle(TestCase):\n def test_singleton(self):\n ftype = finfo(single)\n ftype2 = finfo(single)\n assert_equal(id(ftype),id(ftype2))\n\nclass TestDouble(TestCase):\n def test_singleton(self):\n ftype = finfo(double)\n ftype2 = finfo(double)\n assert_equal(id(ftype),id(ftype2))\n\nclass TestLongdouble(TestCase):\n def test_singleton(self,level=2):\n ftype = finfo(longdouble)\n ftype2 = finfo(longdouble)\n assert_equal(id(ftype),id(ftype2))\n\nclass TestIinfo(TestCase):\n def test_basic(self):\n dts = zip(['i1', 'i2', 'i4', 'i8',\n 'u1', 'u2', 'u4', 'u8'],\n [np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64])\n for dt1, dt2 in dts:\n assert_equal(iinfo(dt1).min, iinfo(dt2).min)\n assert_equal(iinfo(dt1).max, iinfo(dt2).max)\n self.assertRaises(ValueError, iinfo, 'f4')\n\n def test_unsigned_max(self):\n types = np.sctypes['uint']\n for T in types:\n assert_equal(iinfo(T).max, T(-1))\n\n\ndef test_instances():\n iinfo(10)\n finfo(3.0)\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "import re\nimport os\nimport sys\nimport types\nfrom copy import copy\n\nfrom distutils.ccompiler import *\nfrom distutils import ccompiler\nfrom distutils.errors import DistutilsExecError, DistutilsModuleError, \\\n DistutilsPlatformError\nfrom distutils.sysconfig import customize_compiler\nfrom distutils.version import LooseVersion\n\nfrom numpy.distutils import log\nfrom numpy.distutils.exec_command import exec_command\nfrom numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \\\n quote_args, msvc_on_amd64\nfrom numpy.distutils.compat import get_exception\n\n# hack to set compiler optimizing options. Needs to integrated with something.\nimport distutils.sysconfig\n_old_init_posix = distutils.sysconfig._init_posix\ndef _new_init_posix():\n _old_init_posix()\n distutils.sysconfig._config_vars['OPT'] = '-Wall -g -O0'\n#distutils.sysconfig._init_posix = _new_init_posix\n\ndef replace_method(klass, method_name, func):\n if sys.version_info[0] < 3:\n m = types.MethodType(func, None, klass)\n else:\n # Py3k does not have unbound method anymore, MethodType does not work\n m = lambda self, *args, **kw: func(self, *args, **kw)\n setattr(klass, method_name, m)\n\n# Using customized CCompiler.spawn.\ndef CCompiler_spawn(self, cmd, display=None):\n \"\"\"\n Execute a command in a sub-process.\n\n Parameters\n ----------\n cmd : str\n The command to execute.\n display : str or sequence of str, optional\n The text to add to the log file kept by `numpy.distutils`.\n If not given, `display` is equal to `cmd`.\n\n Returns\n -------\n None\n\n Raises\n ------\n DistutilsExecError\n If the command failed, i.e. the exit status was not 0.\n\n \"\"\"\n if display is None:\n display = cmd\n if is_sequence(display):\n display = ' '.join(list(display))\n log.info(display)\n s,o = exec_command(cmd)\n if s:\n if is_sequence(cmd):\n cmd = ' '.join(list(cmd))\n print(o)\n if re.search('Too many open files', o):\n msg = '\\nTry rerunning setup command until build succeeds.'\n else:\n msg = ''\n raise DistutilsExecError('Command \"%s\" failed with exit status %d%s' % (cmd, s, msg))\n\nreplace_method(CCompiler, 'spawn', CCompiler_spawn)\n\ndef CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n \"\"\"\n Return the name of the object files for the given source files.\n\n Parameters\n ----------\n source_filenames : list of str\n The list of paths to source files. Paths can be either relative or\n absolute, this is handled transparently.\n strip_dir : bool, optional\n Whether to strip the directory from the returned paths. If True,\n the file name prepended by `output_dir` is returned. Default is False.\n output_dir : str, optional\n If given, this path is prepended to the returned paths to the\n object files.\n\n Returns\n -------\n obj_names : list of str\n The list of paths to the object files corresponding to the source\n files in `source_filenames`.\n\n \"\"\"\n if output_dir is None:\n output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n base, ext = os.path.splitext(os.path.normpath(src_name))\n base = os.path.splitdrive(base)[1] # Chop off the drive\n base = base[os.path.isabs(base):] # If abs, chop off leading /\n if base.startswith('..'):\n # Resolve starting relative path components, middle ones\n # (if any) have been handled by os.path.normpath above.\n i = base.rfind('..')+2\n d = base[:i]\n d = os.path.basename(os.path.abspath(d))\n base = d + base[i:]\n if ext not in self.src_extensions:\n raise UnknownFileError(\"unknown file type '%s' (from '%s')\" % (ext, src_name))\n if strip_dir:\n base = os.path.basename(base)\n obj_name = os.path.join(output_dir,base + self.obj_extension)\n obj_names.append(obj_name)\n return obj_names\n\nreplace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)\n\ndef CCompiler_compile(self, sources, output_dir=None, macros=None,\n include_dirs=None, debug=0, extra_preargs=None,\n extra_postargs=None, depends=None):\n \"\"\"\n Compile one or more source files.\n\n Please refer to the Python distutils API reference for more details.\n\n Parameters\n ----------\n sources : list of str\n A list of filenames\n output_dir : str, optional\n Path to the output directory.\n macros : list of tuples\n A list of macro definitions.\n include_dirs : list of str, optional\n The directories to add to the default include file search path for\n this compilation only.\n debug : bool, optional\n Whether or not to output debug symbols in or alongside the object\n file(s).\n extra_preargs, extra_postargs : ?\n Extra pre- and post-arguments.\n depends : list of str, optional\n A list of file names that all targets depend on.\n\n Returns\n -------\n objects : list of str\n A list of object file names, one per source file `sources`.\n\n Raises\n ------\n CompileError\n If compilation fails.\n\n \"\"\"\n # This method is effective only with Python >=2.3 distutils.\n # Any changes here should be applied also to fcompiler.compile\n # method to support pre Python 2.3 distutils.\n if not sources:\n return []\n # FIXME:RELATIVE_IMPORT\n if sys.version_info[0] < 3:\n from fcompiler import FCompiler\n else:\n from numpy.distutils.fcompiler import FCompiler\n if isinstance(self, FCompiler):\n display = []\n for fc in ['f77','f90','fix']:\n fcomp = getattr(self,'compiler_'+fc)\n if fcomp is None:\n continue\n display.append(\"Fortran %s compiler: %s\" % (fc, ' '.join(fcomp)))\n display = '\\n'.join(display)\n else:\n ccomp = self.compiler_so\n display = \"C compiler: %s\\n\" % (' '.join(ccomp),)\n log.info(display)\n macros, objects, extra_postargs, pp_opts, build = \\\n self._setup_compile(output_dir, macros, include_dirs, sources,\n depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n display = \"compile options: '%s'\" % (' '.join(cc_args))\n if extra_postargs:\n display += \"\\nextra options: '%s'\" % (' '.join(extra_postargs))\n log.info(display)\n\n # build any sources in same order as they were originally specified\n # especially important for fortran .f90 files using modules\n if isinstance(self, FCompiler):\n objects_to_build = build.keys()\n for obj in objects:\n if obj in objects_to_build:\n src, ext = build[obj]\n if self.compiler_type=='absoft':\n obj = cyg2win32(obj)\n src = cyg2win32(src)\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n else:\n for obj, (src, ext) in build.items():\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # Return *all* object filenames, not just the ones we just built.\n return objects\n\nreplace_method(CCompiler, 'compile', CCompiler_compile)\n\ndef CCompiler_customize_cmd(self, cmd, ignore=()):\n \"\"\"\n Customize compiler using distutils command.\n\n Parameters\n ----------\n cmd : class instance\n An instance inheriting from `distutils.cmd.Command`.\n ignore : sequence of str, optional\n List of `CCompiler` commands (without ``'set_'``) that should not be\n altered. Strings that are checked for are:\n ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',\n 'rpath', 'link_objects')``.\n\n Returns\n -------\n None\n\n \"\"\"\n log.info('customize %s using %s' % (self.__class__.__name__,\n cmd.__class__.__name__))\n def allow(attr):\n return getattr(cmd, attr, None) is not None and attr not in ignore\n\n if allow('include_dirs'):\n self.set_include_dirs(cmd.include_dirs)\n if allow('define'):\n for (name,value) in cmd.define:\n self.define_macro(name, value)\n if allow('undef'):\n for macro in cmd.undef:\n self.undefine_macro(macro)\n if allow('libraries'):\n self.set_libraries(self.libraries + cmd.libraries)\n if allow('library_dirs'):\n self.set_library_dirs(self.library_dirs + cmd.library_dirs)\n if allow('rpath'):\n self.set_runtime_library_dirs(cmd.rpath)\n if allow('link_objects'):\n self.set_link_objects(cmd.link_objects)\n\nreplace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)\n\ndef _compiler_to_string(compiler):\n props = []\n mx = 0\n keys = compiler.executables.keys()\n for key in ['version','libraries','library_dirs',\n 'object_switch','compile_switch',\n 'include_dirs','define','undef','rpath','link_objects']:\n if key not in keys:\n keys.append(key)\n for key in keys:\n if hasattr(compiler,key):\n v = getattr(compiler, key)\n mx = max(mx,len(key))\n props.append((key,repr(v)))\n lines = []\n format = '%-' + repr(mx+1) + 's = %s'\n for prop in props:\n lines.append(format % prop)\n return '\\n'.join(lines)\n\ndef CCompiler_show_customization(self):\n \"\"\"\n Print the compiler customizations to stdout.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Notes\n -----\n Printing is only done if the distutils log threshold is < 2.\n\n \"\"\"\n if 0:\n for attrname in ['include_dirs','define','undef',\n 'libraries','library_dirs',\n 'rpath','link_objects']:\n attr = getattr(self,attrname,None)\n if not attr:\n continue\n log.info(\"compiler '%s' is set to %s\" % (attrname,attr))\n try:\n self.get_version()\n except:\n pass\n if log._global_log.threshold<2:\n print('*'*80)\n print(self.__class__)\n print(_compiler_to_string(self))\n print('*'*80)\n\nreplace_method(CCompiler, 'show_customization', CCompiler_show_customization)\n\ndef CCompiler_customize(self, dist, need_cxx=0):\n \"\"\"\n Do any platform-specific customization of a compiler instance.\n\n This method calls `distutils.sysconfig.customize_compiler` for\n platform-specific customization, as well as optionally remove a flag\n to suppress spurious warnings in case C++ code is being compiled.\n\n Parameters\n ----------\n dist : object\n This parameter is not used for anything.\n need_cxx : bool, optional\n Whether or not C++ has to be compiled. If so (True), the\n ``\"-Wstrict-prototypes\"`` option is removed to prevent spurious\n warnings. Default is False.\n\n Returns\n -------\n None\n\n Notes\n -----\n All the default options used by distutils can be extracted with::\n\n from distutils import sysconfig\n sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',\n 'CCSHARED', 'LDSHARED', 'SO')\n\n \"\"\"\n # See FCompiler.customize for suggested usage.\n log.info('customize %s' % (self.__class__.__name__))\n customize_compiler(self)\n if need_cxx:\n # In general, distutils uses -Wstrict-prototypes, but this option is\n # not valid for C++ code, only for C. Remove it if it's there to\n # avoid a spurious warning on every compilation. All the default\n # options used by distutils can be extracted with:\n\n # from distutils import sysconfig\n # sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',\n # 'CCSHARED', 'LDSHARED', 'SO')\n try:\n self.compiler_so.remove('-Wstrict-prototypes')\n except (AttributeError, ValueError):\n pass\n\n if hasattr(self,'compiler') and 'cc' in self.compiler[0]:\n if not self.compiler_cxx:\n if self.compiler[0].startswith('gcc'):\n a, b = 'gcc', 'g++'\n else:\n a, b = 'cc', 'c++'\n self.compiler_cxx = [self.compiler[0].replace(a,b)]\\\n + self.compiler[1:]\n else:\n if hasattr(self,'compiler'):\n log.warn(\"#### %s #######\" % (self.compiler,))\n log.warn('Missing compiler_cxx fix for '+self.__class__.__name__)\n return\n\nreplace_method(CCompiler, 'customize', CCompiler_customize)\n\ndef simple_version_match(pat=r'[-.\\d]+', ignore='', start=''):\n \"\"\"\n Simple matching of version numbers, for use in CCompiler and FCompiler.\n\n Parameters\n ----------\n pat : str, optional\n A regular expression matching version numbers.\n Default is ``r'[-.\\\\d]+'``.\n ignore : str, optional\n A regular expression matching patterns to skip.\n Default is ``''``, in which case nothing is skipped.\n start : str, optional\n A regular expression matching the start of where to start looking\n for version numbers.\n Default is ``''``, in which case searching is started at the\n beginning of the version string given to `matcher`.\n\n Returns\n -------\n matcher : callable\n A function that is appropriate to use as the ``.version_match``\n attribute of a `CCompiler` class. `matcher` takes a single parameter,\n a version string.\n\n \"\"\"\n def matcher(self, version_string):\n # version string may appear in the second line, so getting rid\n # of new lines:\n version_string = version_string.replace('\\n',' ')\n pos = 0\n if start:\n m = re.match(start, version_string)\n if not m:\n return None\n pos = m.end()\n while 1:\n m = re.search(pat, version_string[pos:])\n if not m:\n return None\n if ignore and re.match(ignore, m.group(0)):\n pos = m.end()\n continue\n break\n return m.group(0)\n return matcher\n\ndef CCompiler_get_version(self, force=False, ok_status=[0]):\n \"\"\"\n Return compiler version, or None if compiler is not available.\n\n Parameters\n ----------\n force : bool, optional\n If True, force a new determination of the version, even if the\n compiler already has a version attribute. Default is False.\n ok_status : list of int, optional\n The list of status values returned by the version look-up process\n for which a version string is returned. If the status value is not\n in `ok_status`, None is returned. Default is ``[0]``.\n\n Returns\n -------\n version : str or None\n Version string, in the format of `distutils.version.LooseVersion`.\n\n \"\"\"\n if not force and hasattr(self,'version'):\n return self.version\n self.find_executables()\n try:\n version_cmd = self.version_cmd\n except AttributeError:\n return None\n if not version_cmd or not version_cmd[0]:\n return None\n try:\n matcher = self.version_match\n except AttributeError:\n try:\n pat = self.version_pattern\n except AttributeError:\n return None\n def matcher(version_string):\n m = re.match(pat, version_string)\n if not m:\n return None\n version = m.group('version')\n return version\n\n status, output = exec_command(version_cmd,use_tee=0)\n\n version = None\n if status in ok_status:\n version = matcher(output)\n if version:\n version = LooseVersion(version)\n self.version = version\n return version\n\nreplace_method(CCompiler, 'get_version', CCompiler_get_version)\n\ndef CCompiler_cxx_compiler(self):\n \"\"\"\n Return the C++ compiler.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n cxx : class instance\n The C++ compiler, as a `CCompiler` instance.\n\n \"\"\"\n if self.compiler_type=='msvc': return self\n cxx = copy(self)\n cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]\n if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:\n # AIX needs the ld_so_aix script included with Python\n cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \\\n + cxx.linker_so[2:]\n else:\n cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]\n return cxx\n\nreplace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)\n\ncompiler_class['intel'] = ('intelccompiler','IntelCCompiler',\n \"Intel C Compiler for 32-bit applications\")\ncompiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler',\n \"Intel C Itanium Compiler for Itanium-based applications\")\nccompiler._default_compilers += (('linux.*','intel'),('linux.*','intele'))\n\nif sys.platform == 'win32':\n compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',\n \"Mingw32 port of GNU C Compiler for Win32\"\\\n \"(for MSC built Python)\")\n if mingw32():\n # On windows platforms, we want to default to mingw32 (gcc)\n # because msvc can't build blitz stuff.\n log.info('Setting mingw32 as default compiler for nt.')\n ccompiler._default_compilers = (('nt', 'mingw32'),) \\\n + ccompiler._default_compilers\n\n\n_distutils_new_compiler = new_compiler\ndef new_compiler (plat=None,\n compiler=None,\n verbose=0,\n dry_run=0,\n force=0):\n # Try first C compilers from numpy.distutils.\n if plat is None:\n plat = os.name\n try:\n if compiler is None:\n compiler = get_default_compiler(plat)\n (module_name, class_name, long_description) = compiler_class[compiler]\n except KeyError:\n msg = \"don't know how to compile C/C++ code on platform '%s'\" % plat\n if compiler is not None:\n msg = msg + \" with '%s' compiler\" % compiler\n raise DistutilsPlatformError(msg)\n module_name = \"numpy.distutils.\" + module_name\n try:\n __import__ (module_name)\n except ImportError:\n msg = str(get_exception())\n log.info('%s in numpy.distutils; trying from distutils',\n str(msg))\n module_name = module_name[6:]\n try:\n __import__(module_name)\n except ImportError:\n msg = str(get_exception())\n raise DistutilsModuleError(\"can't compile C/C++ code: unable to load module '%s'\" % \\\n module_name)\n try:\n module = sys.modules[module_name]\n klass = vars(module)[class_name]\n except KeyError:\n raise DistutilsModuleError((\"can't compile C/C++ code: unable to find class '%s' \" +\n \"in module '%s'\") % (class_name, module_name))\n compiler = klass(None, dry_run, force)\n log.debug('new_compiler returns %s' % (klass))\n return compiler\n\nccompiler.new_compiler = new_compiler\n\n_distutils_gen_lib_options = gen_lib_options\ndef gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):\n library_dirs = quote_args(library_dirs)\n runtime_library_dirs = quote_args(runtime_library_dirs)\n r = _distutils_gen_lib_options(compiler, library_dirs,\n runtime_library_dirs, libraries)\n lib_opts = []\n for i in r:\n if is_sequence(i):\n lib_opts.extend(list(i))\n else:\n lib_opts.append(i)\n return lib_opts\nccompiler.gen_lib_options = gen_lib_options\n\n# Also fix up the various compiler modules, which do\n# from distutils.ccompiler import gen_lib_options\n# Don't bother with mwerks, as we don't support Classic Mac.\nfor _cc in ['msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:\n _m = sys.modules.get('distutils.'+_cc+'compiler')\n if _m is not None:\n setattr(_m, 'gen_lib_options', gen_lib_options)\n\n_distutils_gen_preprocess_options = gen_preprocess_options\ndef gen_preprocess_options (macros, include_dirs):\n include_dirs = quote_args(include_dirs)\n return _distutils_gen_preprocess_options(macros, include_dirs)\nccompiler.gen_preprocess_options = gen_preprocess_options\n\n##Fix distutils.util.split_quoted:\n# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears\n# that removing this fix causes f2py problems on Windows XP (see ticket #723).\n# Specifically, on WinXP when gfortran is installed in a directory path, which\n# contains spaces, then f2py is unable to find it.\nimport re\nimport string\n_wordchars_re = re.compile(r'[^\\\\\\'\\\"%s ]*' % string.whitespace)\n_squote_re = re.compile(r\"'(?:[^'\\\\]|\\\\.)*'\")\n_dquote_re = re.compile(r'\"(?:[^\"\\\\]|\\\\.)*\"')\n_has_white_re = re.compile(r'\\s')\ndef split_quoted(s):\n s = s.strip()\n words = []\n pos = 0\n\n while s:\n m = _wordchars_re.match(s, pos)\n end = m.end()\n if end == len(s):\n words.append(s[:end])\n break\n\n if s[end] in string.whitespace: # unescaped, unquoted whitespace: now\n words.append(s[:end]) # we definitely have a word delimiter\n s = s[end:].lstrip()\n pos = 0\n\n elif s[end] == '\\\\': # preserve whatever is being escaped;\n # will become part of the current word\n s = s[:end] + s[end+1:]\n pos = end+1\n\n else:\n if s[end] == \"'\": # slurp singly-quoted string\n m = _squote_re.match(s, end)\n elif s[end] == '\"': # slurp doubly-quoted string\n m = _dquote_re.match(s, end)\n else:\n raise RuntimeError(\"this can't happen (bad char '%c')\" % s[end])\n\n if m is None:\n raise ValueError(\"bad string (mismatched %s quotes?)\" % s[end])\n\n (beg, end) = m.span()\n if _has_white_re.search(s[beg+1:end-1]):\n s = s[:beg] + s[beg+1:end-1] + s[end:]\n pos = m.end() - 2\n else:\n # Keeping quotes when a quoted word does not contain\n # white-space. XXX: send a patch to distutils\n pos = m.end()\n\n if pos >= len(s):\n words.append(s)\n break\n\n return words\nccompiler.split_quoted = split_quoted\n##Fix distutils.util.split_quoted:\n\n# define DISTUTILS_USE_SDK when necessary to workaround distutils/msvccompiler.py bug\nmsvc_on_amd64()\n" ]
[ [ "numpy.fft.ifftshift", "numpy.fft.fftshift", "numpy.fft.fftfreq" ], [ "numpy.core.finfo", "numpy.core.iinfo" ], [ "numpy.distutils.log.debug", "numpy.distutils.exec_command.exec_command", "numpy.distutils.misc_util.msvc_on_amd64", "numpy.distutils.compat.get_exception", "numpy.distutils.misc_util.mingw32", "numpy.distutils.misc_util.is_sequence", "numpy.distutils.log.warn", "numpy.distutils.misc_util.quote_args", "numpy.distutils.misc_util.cyg2win32", "numpy.distutils.log.info" ] ]
[ { "matplotlib": [], "numpy": [ "1.6", "1.11", "1.10", "1.12", "1.19", "1.13", "1.16", "1.9", "1.18", "1.21", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.19", "1.13", "1.16", "1.9", "1.18", "1.21", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.24", "1.22", "1.23" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
chienerh/EPN_PointCloud
[ "d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30", "d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30", "d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30" ]
[ "SPConvNets/models/pointnet_epn_netvlad.py", "vgtk/vgtk/app/trainer.py", "SPConvNets/utils/loading_pointclouds.py" ]
[ "\"\"\"\nCode taken from https://github.com/cattaneod/PointNetVlad-Pytorch/blob/master/models/PointNetVlad.py\n\"\"\"\n\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nimport math\nimport SPConvNets.models.pr_so3net_pn as pr_so3net_pn\nimport SPConvNets.utils as M\n\n\nclass STN3d(nn.Module):\n def __init__(self, num_points=2500, k=3, use_bn=True):\n super(STN3d, self).__init__()\n self.k = k\n self.kernel_size = 3 if k == 3 else 1\n self.channels = 1 if k == 3 else k\n self.num_points = num_points\n self.use_bn = use_bn\n self.conv1 = torch.nn.Conv2d(self.channels, 64, (1, self.kernel_size))\n self.conv2 = torch.nn.Conv2d(64, 128, (1,1))\n self.conv3 = torch.nn.Conv2d(128, 1024, (1,1))\n self.mp1 = torch.nn.MaxPool2d((num_points, 1), 1)\n self.fc1 = nn.Linear(1024, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, k*k)\n self.fc3.weight.data.zero_()\n self.fc3.bias.data.zero_()\n self.relu = nn.ReLU()\n\n if use_bn:\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(128)\n self.bn3 = nn.BatchNorm2d(1024)\n self.bn4 = nn.BatchNorm1d(512)\n self.bn5 = nn.BatchNorm1d(256)\n\n def forward(self, x):\n batchsize = x.size()[0]\n if self.use_bn:\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n else:\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = self.mp1(x)\n x = x.view(-1, 1024)\n\n if self.use_bn:\n x = F.relu(self.bn4(self.fc1(x)))\n x = F.relu(self.bn5(self.fc2(x)))\n else:\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n iden = Variable(torch.from_numpy(np.eye(self.k).astype(np.float32))).view(\n 1, self.k*self.k).repeat(batchsize, 1)\n if x.is_cuda:\n iden = iden.cuda()\n x = x + iden\n x = x.view(-1, self.k, self.k)\n return x\n\n\nclass PointNetfeat(nn.Module):\n def __init__(self, num_points=2500, global_feat=True, feature_transform=False, max_pool=True):\n super(PointNetfeat, self).__init__()\n self.stn = STN3d(num_points=num_points, k=3, use_bn=False)\n self.feature_trans = STN3d(num_points=num_points, k=64, use_bn=False)\n self.apply_feature_trans = feature_transform\n self.conv1 = torch.nn.Conv2d(1, 64, (1, 3), stride=2)\n self.conv2 = torch.nn.Conv2d(64, 64, (1, 1), stride=2)\n self.conv3 = torch.nn.Conv2d(64, 64, (1, 1), stride=2)\n self.conv4 = torch.nn.Conv2d(64, 128, (1, 1), stride=2)\n self.conv5 = torch.nn.Conv2d(128, 1024, (1, 1), stride=1)\n self.bn1 = nn.BatchNorm2d(64)\n self.bn2 = nn.BatchNorm2d(64)\n self.bn3 = nn.BatchNorm2d(64)\n self.bn4 = nn.BatchNorm2d(128)\n self.bn5 = nn.BatchNorm2d(1024)\n self.mp1 = torch.nn.MaxPool2d((num_points, 1), 1)\n self.num_points = num_points\n self.global_feat = global_feat\n self.max_pool = max_pool\n\n def forward(self, x):\n '''\n INPUT: (22, 1, 4096, 3) [Bx(1+P+N+1), 1, N, D]\n OUTPUT: (22, 1024, 2096, 1) if not max pool\n '''\n batchsize = x.size()[0]\n trans = self.stn(x) # 22, 3, 3\n x = torch.matmul(torch.squeeze(x), trans) # 22, 4096, 3\n x = x.view(batchsize, 1, -1, 3) # 22, 1, 4096, 3\n x = F.relu(self.bn1(self.conv1(x))) # 22, 64, 4096, 1\n x = F.relu(self.bn2(self.conv2(x))) # 22, 64, 4096, 1\n pointfeat = x\n if self.apply_feature_trans:\n f_trans = self.feature_trans(x)\n x = torch.squeeze(x)\n if batchsize == 1:\n x = torch.unsqueeze(x, 0)\n x = torch.matmul(x.transpose(1, 2), f_trans)\n x = x.transpose(1, 2).contiguous()\n x = x.view(batchsize, 64, -1, 1)\n x = F.relu(self.bn3(self.conv3(x)))\n x = F.relu(self.bn4(self.conv4(x)))\n x = self.bn5(self.conv5(x))\n if not self.max_pool:\n return x\n else:\n x = self.mp1(x)\n x = x.view(-1, 1024)\n if self.global_feat:\n return x, trans\n else:\n x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)\n return torch.cat([x, pointfeat], 1), trans\n\n\nclass PointNetEPN_NetVLAD(nn.Module):\n def __init__(self, opt):\n super(PointNetEPN_NetVLAD, self).__init__()\n self.opt = opt\n self.point_net = PointNetfeat(num_points=4096, global_feat=True,\n feature_transform=False, max_pool=False)\n mlps=[[64,64], [128,128]]\n out_mlps=[128, 1024]\n self.epn = pr_so3net_pn.build_model(self.opt, mlps=mlps, out_mlps=out_mlps)\n self.net_vlad = M.NetVLADLoupe(feature_size=1024, max_samples=2*self.opt.num_selected_points, cluster_size=64,\n output_dim=self.opt.global_feature_dim, gating=True, add_batch_norm=True,\n is_training=True)\n\n def forward(self, x):\n # print('x', x.shape)\n x_unsqueeze = x.unsqueeze(1)\n x_pointnet = self.point_net(x_unsqueeze) # Bx(1+P+N+1), LOCAL_DIM, N, 1\n # print('x_pointnet', x_pointnet.shape)\n x_pointnet = x_pointnet.transpose(1, 3).contiguous()\n x_pointnet = x_pointnet.view((-1, self.opt.num_selected_points, 1024))\n # print('x_pointnet', x_pointnet.shape)\n x_epn, _ = self.epn(x)\n # print('x_epn', x_epn.shape)\n x_frontend = torch.cat((x_pointnet, x_epn), 1) # Where to concatenate?\n # print('x_frontend', x_frontend.shape)\n x = self.net_vlad(x_frontend)\n return x, x_frontend\n\n\nclass PointNetVLAD_EPNNetVLAD(nn.Module):\n def __init__(self, opt):\n super(PointNetVLAD_EPNNetVLAD, self).__init__()\n self.opt = opt\n self.point_net = PointNetfeat(num_points=4096, global_feat=True,\n feature_transform=False, max_pool=False)\n self.net_vlad1 = M.NetVLADLoupe(feature_size=1024, max_samples=4096, cluster_size=64,\n output_dim=self.opt.global_feature_dim//2, gating=True, add_batch_norm=True,\n is_training=True)\n mlps=[[64,64], [128,128]]\n out_mlps=[128, 1024]\n self.epn = pr_so3net_pn.build_model(self.opt, mlps=mlps, out_mlps=out_mlps)\n self.net_vlad2 = M.NetVLADLoupe(feature_size=self.opt.model.output_num, max_samples=self.opt.num_selected_points, cluster_size=64,\n output_dim=self.opt.global_feature_dim//2, gating=True, add_batch_norm=True,\n is_training=True)\n\n def forward(self, x):\n # print('x input', x.shape)\n # PointNetVLAD\n x_unsqueeze = x.unsqueeze(1)\n x_pointnet = self.point_net(x_unsqueeze) # Bx(1+P+N+1), LOCAL_DIM, N, 1\n # print('x_pointnet', x_pointnet.shape)\n x_pointnet = x_pointnet.transpose(1, 3).contiguous()\n x_pointnet = x_pointnet.view((-1, 4096, 1024))\n # print('x_pointnet reshaped', x_pointnet.shape)\n x_pointnetvlad = self.net_vlad1(x_pointnet)\n # print('x_pointnetvlad', x_pointnetvlad.shape)\n # EPNNetVLAD\n x_epn, _ = self.epn(x)\n # print('x_epn', x_epn.shape)\n x_epnnetvlad = self.net_vlad2(x_epn)\n # print('x_epnnetvlad', x_epnnetvlad.shape)\n\n x_output = torch.cat((x_pointnetvlad, x_epnnetvlad), 1)\n x_frontend = torch.cat((x_pointnet, x_epn), 1)\n # print('x_output', x_output.shape)\n return x_output, x_frontend", "\nimport os\nimport time\nimport json\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport vgtk\n\n\n# TODO add dataparallel\n# TODO add the_world = ipdb.set_trace\n\nclass Trainer():\n def __init__(self, opt):\n super(Trainer, self).__init__()\n\n opt_dict = vgtk.dump_args(opt)\n self.check_opt(opt)\n\n # set random seed\n random.seed(self.opt.seed)\n np.random.seed(self.opt.seed)\n torch.backends.cudnn.deterministic = True\n torch.manual_seed(self.opt.seed)\n torch.cuda.manual_seed_all(self.opt.seed)\n # np.set_printoptions(precision=3, suppress=True)\n\n # create model dir\n experiment_id = self.opt.experiment_id if self.opt.mode == 'train' else f\"{self.opt.experiment_id}_{self.opt.mode}\"\n model_id = f'model_{time.strftime(\"%Y%m%d_%H:%M:%S\")}'\n self.root_dir = os.path.join(self.opt.model_dir, experiment_id, model_id)\n os.makedirs(self.root_dir, exist_ok=True)\n\n # saving opt\n opt_path = os.path.join(self.root_dir, 'opt.txt')\n # TODO: hierarchical args are not compatible wit json dump\n with open(opt_path, 'w') as fout:\n json.dump(opt_dict, fout, indent=2)\n\n # create logger\n log_path = os.path.join(self.root_dir, 'log.txt')\n self.logger = vgtk.Logger(log_file=log_path)\n self.logger.log('Setup', f'Logger created! Hello World!')\n self.logger.log('Setup', f'Random seed has been set to {self.opt.seed}')\n self.logger.log('Setup', f'Experiment id: {experiment_id}')\n self.logger.log('Setup', f'Model id: {model_id}')\n\n # ckpt dir\n self.ckpt_dir = os.path.join(self.root_dir, 'ckpt')\n os.makedirs(self.ckpt_dir, exist_ok=True)\n self.logger.log('Setup', f'Checkpoint dir created!')\n\n # build dataset\n self._setup_datasets()\n\n # create network\n self._setup_model()\n self._setup_optim()\n self._setup_metric()\n\n # init\n self.start_epoch = 0\n self.start_iter = 0\n\n # check resuming\n self._resume_from_ckpt(opt.resume_path)\n self._setup_model_multi_gpu()\n\n # setup summary\n self.summary = vgtk.Summary()\n\n # setup timer\n self.timer = vgtk.Timer()\n self.summary.register(['Time'])\n\n # done\n self.logger.log('Setup', 'Setup finished!')\n\n def train(self):\n self.opt.mode = 'train'\n self.model.train()\n if self.opt.num_epochs is not None:\n self.train_epoch()\n else:\n self.train_iter()\n\n def test(self):\n self.opt.mode = 'test'\n self.model.eval()\n\n def train_iter(self):\n for i in range(self.opt.num_iterations):\n self.timer.set_point('train_iter')\n self.lr_schedule.step()\n self.step()\n # print({'Time': self.timer.reset_point('train_iter')})\n self.summary.update({'Time': self.timer.reset_point('train_iter')})\n\n if i % self.opt.log_freq == 0:\n if hasattr(self, 'epoch_counter'):\n step = f'Epoch {self.epoch_counter}, Iter {i}'\n else:\n step = f'Iter {i}'\n self._print_running_stats(step)\n\n if i > 0 and i % self.opt.save_freq == 0:\n self._save_network(f'Iter{i}')\n self.test()\n\n def train_epoch(self):\n for i in range(self.opt.num_epochs):\n self.lr_schedule.step()\n self.epoch_step()\n\n if i % self.opt.log_freq == 0:\n self._print_running_stats(f'Epoch {i}')\n\n if i > 0 and i % self.opt.save_freq == 0:\n self._save_network(f'Epoch{i}')\n\n\n # TODO: check that the options have the required key collection\n def check_opt(self, opt, print_opt=True):\n self.opt = opt\n self.opt.device = torch.device('cuda')\n\n def _print_running_stats(self, step):\n stats = self.summary.get()\n self.logger.log('Training', f'{step}: {stats}')\n\n def step(self):\n raise NotImplementedError('Not implemented')\n\n def epoch_step(self):\n raise NotImplementedError('Not implemented')\n\n def _setup_datasets(self):\n self.logger.log('Setup', 'Setup datasets!')\n self.dataset_train = None\n self.dataset_val = None\n self.dataset_test = None\n raise NotImplementedError('Not implemented')\n\n def _setup_model(self):\n self.logger.log('Setup', 'Setup model!')\n self.model = None\n raise NotImplementedError('Not implemented')\n\n def _setup_model_multi_gpu(self):\n if torch.cuda.device_count() > 1:\n self.logger.log('Setup', 'Using Multi-gpu and DataParallel!')\n self._use_multi_gpu = True\n self.model = nn.DataParallel(self.model)\n else:\n self.logger.log('Setup', 'Using Single-gpu!')\n self._use_multi_gpu = False\n\n def _setup_optim(self):\n self.logger.log('Setup', 'Setup optimizer!')\n # torch.autograd.set_detect_anomaly(True)\n self.optimizer = optim.Adam(self.model.parameters(),\n lr=self.opt.train_lr.init_lr)\n self.lr_schedule = vgtk.LearningRateScheduler(self.optimizer,\n **vars(self.opt.train_lr))\n self.logger.log('Setup', 'Optimizer all-set!')\n\n def _setup_metric(self):\n self.logger.log('Setup', 'Setup metric!')\n self.metric = None\n raise NotImplementedError('Not implemented')\n\n # def _resume_from_ckpt(self, resume_path):\n # if resume_path is None:\n # self.logger.log('Setup', f'Seems like we train from scratch!')\n # return\n # self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')\n # state_dicts = torch.load(resume_path)\n # self.model.load_state_dict(state_dicts['model'])\n # self.optimizer.load_state_dict(state_dicts['optimizer'])\n # self.start_epoch = state_dicts['epoch']\n # self.start_iter = state_dicts['iter']\n # self.logger.log('Setup', f'Resume finished! Great!')\n\n def _resume_from_ckpt(self, resume_path):\n if resume_path is None:\n self.logger.log('Setup', f'Seems like we train from scratch!')\n return\n self.logger.log('Setup', f'Resume from checkpoint: {resume_path}')\n\n state_dicts = torch.load(resume_path)\n\n # self.model = nn.DataParallel(self.model)\n self.model.load_state_dict(state_dicts)\n # self.model = self.model.module\n # self.optimizer.load_state_dict(state_dicts['optimizer'])\n # self.start_epoch = state_dicts['epoch']\n # self.start_iter = state_dicts['iter']\n self.logger.log('Setup', f'Resume finished! Great!')\n\n\n\n # TODO\n def _save_network(self, step, label=None,path=None):\n label = self.opt.experiment_id if label is None else label\n if path is None:\n save_filename = '%s_net_%s.pth' % (label, step)\n save_path = os.path.join(self.root_dir, 'ckpt', save_filename)\n else:\n save_path = f'{path}.pth'\n \n if self._use_multi_gpu:\n params = self.model.module.cpu().state_dict()\n else:\n params = self.model.cpu().state_dict()\n torch.save(params, save_path)\n\n if torch.cuda.is_available():\n # torch.cuda.device(gpu_id)\n self.model.to(self.opt.device)\n self.logger.log('Training', f'Checkpoint saved to: {save_path}!')\n", "\"\"\"\nCode taken from PoinhtNetVLAD\n\"\"\"\n\n\nimport os\nimport pickle\nimport numpy as np\nimport random\nimport config as cfg\n\n\ndef get_queries_dict(filename):\n\t#key:{'query':file,'positives':[files],'negatives:[files], 'neighbors':[keys]}\n\twith open(filename, 'rb') as handle:\n\t\tqueries = pickle.load(handle)\n\t\tprint(\"Queries Loaded.\")\n\t\treturn queries\n\ndef get_sets_dict(filename):\n\t#[key_dataset:{key_pointcloud:{'query':file,'northing':value,'easting':value}},key_dataset:{key_pointcloud:{'query':file,'northing':value,'easting':value}}, ...}\n\twith open(filename, 'rb') as handle:\n\t\ttrajectories = pickle.load(handle)\n\t\tprint(\"Trajectories Loaded.\")\n\t\treturn trajectories\n\ndef load_pcd_file(filename):\n\t# load numpy files\n\t#returns Nx3 matrix\n\tpc=np.load(os.path.join(cfg.DATASET_FOLDER,filename))\n\n\t# if(pc.shape[0]!= cfg.NUM_POINTS):\n\t# \tprint(\"Error in pointcloud shape\")\n\t# \treturn np.array([])\n\n\treturn pc\n\ndef load_pc_file(filename):\n\t#returns Nx3 matrix\n\tpc=np.fromfile(os.path.join(cfg.DATASET_FOLDER,filename), dtype=np.float64)\n\n\tif(pc.shape[0]!= cfg.NUM_POINTS*3):\n\t\tprint(\"Error in pointcloud shape, \\ncfg.NUM_POINTS\", cfg.NUM_POINTS, \"\\npoint cloud shape\", pc.shape, \"\\nfilename\", filename)\n\t\treturn np.array([])\n\n\tpc=np.reshape(pc,(pc.shape[0]//3,3))\n\treturn pc\n\ndef load_pc_files(filenames):\n\tpcs=[]\n\tfor filename in filenames:\n\t\t#print(filename)\n\t\tpc=load_pc_file(filename)\n\t\tif(pc.shape[0]!=cfg.NUM_POINTS):\n\t\t\tcontinue\n\t\tpcs.append(pc)\n\tpcs=np.array(pcs)\n\treturn pcs\n\ndef rotate_point_cloud(batch_data):\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n #-90 to 90\n rotation_angle = (np.random.uniform()*np.pi)- np.pi/2.0\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, -sinval, 0],\n [sinval, cosval, 0],\n [0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data\n\ndef jitter_point_cloud(batch_data, sigma=0.005, clip=0.05):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\"\n B, N, C = batch_data.shape\n assert(clip > 0)\n jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)\n jittered_data += batch_data\n return jittered_data\n\ndef get_query_tuple(dict_value, num_pos, num_neg, QUERY_DICT, hard_neg=[], other_neg=False):\n\t#get query tuple for dictionary entry\n\t#return list [query,positives,negatives]\n\n\tquery=load_pc_file(dict_value[\"query\"]) #Nx3\n\n\trandom.shuffle(dict_value[\"positives\"])\n\tpos_files=[]\n\n\tfor i in range(num_pos):\n\t\tpos_files.append(QUERY_DICT[dict_value[\"positives\"][i]][\"query\"])\n\t#positives= load_pc_files(dict_value[\"positives\"][0:num_pos])\n\tpositives=load_pc_files(pos_files)\n\n\tneg_files=[]\n\tneg_indices=[]\n\tif(len(hard_neg)==0):\n\t\trandom.shuffle(dict_value[\"negatives\"])\t\n\t\tfor i in range(num_neg):\n\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][i]][\"query\"])\n\t\t\tneg_indices.append(dict_value[\"negatives\"][i])\n\n\telse:\n\t\trandom.shuffle(dict_value[\"negatives\"])\n\t\tfor i in hard_neg:\n\t\t\tneg_files.append(QUERY_DICT[i][\"query\"])\n\t\t\tneg_indices.append(i)\n\t\tj=0\n\t\twhile(len(neg_files)<num_neg):\n\n\t\t\tif not dict_value[\"negatives\"][j] in hard_neg:\n\t\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][j]][\"query\"])\n\t\t\t\tneg_indices.append(dict_value[\"negatives\"][j])\n\t\t\tj+=1\n\n\tneg_files = neg_files[:num_neg]\n\tnegatives=load_pc_files(neg_files)\n\n\tif(other_neg==False):\n\t\treturn [query,positives,negatives]\n\t#For Quadruplet Loss\n\telse:\n\t\t#get neighbors of negatives and query\n\t\tneighbors=[]\n\t\tfor pos in dict_value[\"positives\"]:\n\t\t\tneighbors.append(pos)\n\t\tfor neg in neg_indices:\n\t\t\tfor pos in QUERY_DICT[neg][\"positives\"]:\n\t\t\t\tneighbors.append(pos)\n\t\tpossible_negs= list(set(QUERY_DICT.keys())-set(neighbors))\n\t\trandom.shuffle(possible_negs)\n\n\t\tif(len(possible_negs)==0):\n\t\t\treturn [query, positives, negatives, np.array([])]\t\t\n\n\t\tneg2= load_pc_file(QUERY_DICT[possible_negs[0]][\"query\"])\n\n\t\treturn [query,positives,negatives,neg2]\n\n\ndef get_rotated_tuple(dict_value, num_pos, num_neg, QUERY_DICT, hard_neg=[],other_neg=False):\n\tquery = load_pc_file(dict_value[\"query\"]) #Nx3\n\tq_rot = rotate_point_cloud(np.expand_dims(query, axis=0))\n\tq_rot = np.squeeze(q_rot)\n\n\trandom.shuffle(dict_value[\"positives\"])\n\tpos_files=[]\n\tfor i in range(num_pos):\n\t\tpos_files.append(QUERY_DICT[dict_value[\"positives\"][i]][\"query\"])\n\t#positives= load_pc_files(dict_value[\"positives\"][0:num_pos])\n\tpositives=load_pc_files(pos_files)\n\tp_rot= rotate_point_cloud(positives)\n\n\tneg_files=[]\n\tneg_indices=[]\n\tif(len(hard_neg)==0):\n\t\trandom.shuffle(dict_value[\"negatives\"])\n\t\tfor i in range(num_neg):\n\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][i]][\"query\"])\n\t\t\tneg_indices.append(dict_value[\"negatives\"][i])\n\telse:\n\t\trandom.shuffle(dict_value[\"negatives\"])\n\t\tfor i in hard_neg:\n\t\t\tneg_files.append(QUERY_DICT[i][\"query\"])\n\t\t\tneg_indices.append(i)\n\t\tj=0\n\t\twhile(len(neg_files)<num_neg):\n\t\t\tif not dict_value[\"negatives\"][j] in hard_neg:\n\t\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][j]][\"query\"])\n\t\t\t\tneg_indices.append(dict_value[\"negatives\"][j])\n\t\t\tj+=1\t\n\tnegatives=load_pc_files(neg_files)\n\tn_rot=rotate_point_cloud(negatives)\n\n\tif(other_neg==False):\n\t\treturn [q_rot,p_rot,n_rot]\n\n\t#For Quadruplet Loss\n\telse:\n\t\t#get neighbors of negatives and query\n\t\tneighbors=[]\n\t\tfor pos in dict_value[\"positives\"]:\n\t\t\tneighbors.append(pos)\n\t\tfor neg in neg_indices:\n\t\t\tfor pos in QUERY_DICT[neg][\"positives\"]:\n\t\t\t\tneighbors.append(pos)\n\t\tpossible_negs= list(set(QUERY_DICT.keys())-set(neighbors))\n\t\trandom.shuffle(possible_negs)\n\n\t\tif(len(possible_negs)==0):\n\t\t\treturn [q_jit, p_jit, n_jit, np.array([])]\n\n\t\tneg2= load_pc_file(QUERY_DICT[possible_negs[0]][\"query\"])\n\t\tn2_rot= rotate_point_cloud(np.expand_dims(neg2, axis=0))\n\t\tn2_rot= np.squeeze(n2_rot)\n\n\t\treturn [q_rot,p_rot,n_rot,n2_rot]\n\ndef get_jittered_tuple(dict_value, num_pos, num_neg, QUERY_DICT, hard_neg=[],other_neg=False):\n\tquery=load_pc_file(dict_value[\"query\"]) #Nx3\n\t#q_rot= rotate_point_cloud(np.expand_dims(query, axis=0))\n\tq_jit= jitter_point_cloud(np.expand_dims(query, axis=0))\n\tq_jit= np.squeeze(q_jit)\n\n\trandom.shuffle(dict_value[\"positives\"])\n\tpos_files=[]\n\tfor i in range(num_pos):\n\t\tpos_files.append(QUERY_DICT[dict_value[\"positives\"][i]][\"query\"])\n\t#positives= load_pc_files(dict_value[\"positives\"][0:num_pos])\n\tpositives=load_pc_files(pos_files)\n\tp_jit= jitter_point_cloud(positives)\n\n\tneg_files=[]\n\tneg_indices=[]\n\tif(len(hard_neg)==0):\n\t\trandom.shuffle(dict_value[\"negatives\"])\n\t\tfor i in range(num_neg):\n\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][i]][\"query\"])\n\t\t\tneg_indices.append(dict_value[\"negatives\"][i])\n\telse:\n\t\trandom.shuffle(dict_value[\"negatives\"])\n\t\tfor i in hard_neg:\n\t\t\tneg_files.append(QUERY_DICT[i][\"query\"])\n\t\t\tneg_indices.append(i)\n\t\tj=0\n\t\twhile(len(neg_files)<num_neg):\n\t\t\tif not dict_value[\"negatives\"][j] in hard_neg:\n\t\t\t\tneg_files.append(QUERY_DICT[dict_value[\"negatives\"][j]][\"query\"])\n\t\t\t\tneg_indices.append(dict_value[\"negatives\"][j])\n\t\t\tj+=1\t\n\tnegatives=load_pc_files(neg_files)\n\tn_jit=jitter_point_cloud(negatives)\n\n\tif(other_neg==False):\n\t\treturn [q_jit,p_jit,n_jit]\n\n\t#For Quadruplet Loss\n\telse:\n\t\t#get neighbors of negatives and query\n\t\tneighbors=[]\n\t\tfor pos in dict_value[\"positives\"]:\n\t\t\tneighbors.append(pos)\n\t\tfor neg in neg_indices:\n\t\t\tfor pos in QUERY_DICT[neg][\"positives\"]:\n\t\t\t\tneighbors.append(pos)\n\t\tpossible_negs= list(set(QUERY_DICT.keys())-set(neighbors))\n\t\trandom.shuffle(possible_negs)\n\n\t\tif(len(possible_negs)==0):\n\t\t\treturn [q_jit, p_jit, n_jit, np.array([])]\n\n\t\tneg2= load_pc_file(QUERY_DICT[possible_negs[0]][\"query\"])\n\t\tn2_jit= jitter_point_cloud(np.expand_dims(neg2, axis=0))\n\t\tn2_jit= np.squeeze(n2_jit)\n\n\t\treturn [q_jit,p_jit,n_jit,n2_jit]\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.cat", "numpy.eye", "torch.nn.Conv2d", "torch.unsqueeze", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.squeeze" ], [ "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.nn.DataParallel", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count", "torch.save" ], [ "numpy.expand_dims", "numpy.reshape", "numpy.squeeze", "numpy.cos", "numpy.sin", "numpy.random.randn", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XLPRUtils/pyUtils
[ "3a62c14b0658ad3c24d83f953ee0d88530b02b23", "3a62c14b0658ad3c24d83f953ee0d88530b02b23" ]
[ "pyxlpr/ppocr/postprocess/pse_postprocess/pse_postprocess.py", "pyxlpr/ppocr/utils/e2e_metric/Deteval.py" ]
[ "# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is refer from:\nhttps://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport paddle\nfrom paddle.nn import functional as F\n\nfrom pyxlpr.ppocr.postprocess.pse_postprocess.pse import pse\n\n\nclass PSEPostProcess(object):\n \"\"\"\n The post process for PSE.\n \"\"\"\n\n def __init__(self,\n thresh=0.5,\n box_thresh=0.85,\n min_area=16,\n box_type='box',\n scale=4,\n **kwargs):\n assert box_type in ['box', 'poly'], 'Only box and poly is supported'\n self.thresh = thresh\n self.box_thresh = box_thresh\n self.min_area = min_area\n self.box_type = box_type\n self.scale = scale\n\n def __call__(self, outs_dict, shape_list):\n pred = outs_dict['maps']\n if not isinstance(pred, paddle.Tensor):\n pred = paddle.to_tensor(pred)\n pred = F.interpolate(\n pred, scale_factor=4 // self.scale, mode='bilinear')\n\n score = F.sigmoid(pred[:, 0, :, :])\n\n kernels = (pred > self.thresh).astype('float32')\n text_mask = kernels[:, 0, :, :]\n kernels[:, 0:, :, :] = kernels[:, 0:, :, :] * text_mask\n\n score = score.numpy()\n kernels = kernels.numpy().astype(np.uint8)\n\n boxes_batch = []\n for batch_index in range(pred.shape[0]):\n boxes, scores = self.boxes_from_bitmap(score[batch_index],\n kernels[batch_index],\n shape_list[batch_index])\n\n boxes_batch.append({'points': boxes, 'scores': scores})\n return boxes_batch\n\n def boxes_from_bitmap(self, score, kernels, shape):\n label = pse(kernels, self.min_area)\n return self.generate_box(score, label, shape)\n\n def generate_box(self, score, label, shape):\n src_h, src_w, ratio_h, ratio_w = shape\n label_num = np.max(label) + 1\n\n boxes = []\n scores = []\n for i in range(1, label_num):\n ind = label == i\n points = np.array(np.where(ind)).transpose((1, 0))[:, ::-1]\n\n if points.shape[0] < self.min_area:\n label[ind] = 0\n continue\n\n score_i = np.mean(score[ind])\n if score_i < self.box_thresh:\n label[ind] = 0\n continue\n\n if self.box_type == 'box':\n rect = cv2.minAreaRect(points)\n bbox = cv2.boxPoints(rect)\n elif self.box_type == 'poly':\n box_height = np.max(points[:, 1]) + 10\n box_width = np.max(points[:, 0]) + 10\n\n mask = np.zeros((box_height, box_width), np.uint8)\n mask[points[:, 1], points[:, 0]] = 255\n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n bbox = np.squeeze(contours[0], 1)\n else:\n raise NotImplementedError\n\n bbox[:, 0] = np.clip(np.round(bbox[:, 0] / ratio_w), 0, src_w)\n bbox[:, 1] = np.clip(np.round(bbox[:, 1] / ratio_h), 0, src_h)\n boxes.append(bbox)\n scores.append(score_i)\n return boxes, scores\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport scipy.io as io\nfrom pyxlpr.ppocr.utils.e2e_metric.polygon_fast import iod, area_of_intersection, area\n\n\ndef get_socre_A(gt_dir, pred_dict):\n allInputs = 1\n\n def input_reading_mod(pred_dict):\n \"\"\"This helper reads input from txt files\"\"\"\n det = []\n n = len(pred_dict)\n for i in range(n):\n points = pred_dict[i]['points']\n text = pred_dict[i]['texts']\n point = \",\".join(map(str, points.reshape(-1, )))\n det.append([point, text])\n return det\n\n def gt_reading_mod(gt_dict):\n \"\"\"This helper reads groundtruths from mat files\"\"\"\n gt = []\n n = len(gt_dict)\n for i in range(n):\n points = gt_dict[i]['points'].tolist()\n h = len(points)\n text = gt_dict[i]['text']\n xx = [\n np.array(\n ['x:'], dtype='<U2'), 0, np.array(\n ['y:'], dtype='<U2'), 0, np.array(\n ['#'], dtype='<U1'), np.array(\n ['#'], dtype='<U1')\n ]\n t_x, t_y = [], []\n for j in range(h):\n t_x.append(points[j][0])\n t_y.append(points[j][1])\n xx[1] = np.array([t_x], dtype='int16')\n xx[3] = np.array([t_y], dtype='int16')\n if text != \"\":\n xx[4] = np.array([text], dtype='U{}'.format(len(text)))\n xx[5] = np.array(['c'], dtype='<U1')\n gt.append(xx)\n return gt\n\n def detection_filtering(detections, groundtruths, threshold=0.5):\n for gt_id, gt in enumerate(groundtruths):\n if (gt[5] == '#') and (gt[1].shape[1] > 1):\n gt_x = list(map(int, np.squeeze(gt[1])))\n gt_y = list(map(int, np.squeeze(gt[3])))\n for det_id, detection in enumerate(detections):\n detection_orig = detection\n detection = [float(x) for x in detection[0].split(',')]\n detection = list(map(int, detection))\n det_x = detection[0::2]\n det_y = detection[1::2]\n det_gt_iou = iod(det_x, det_y, gt_x, gt_y)\n if det_gt_iou > threshold:\n detections[det_id] = []\n\n detections[:] = [item for item in detections if item != []]\n return detections\n\n def sigma_calculation(det_x, det_y, gt_x, gt_y):\n \"\"\"\n sigma = inter_area / gt_area\n \"\"\"\n return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /\n area(gt_x, gt_y)), 2)\n\n def tau_calculation(det_x, det_y, gt_x, gt_y):\n if area(det_x, det_y) == 0.0:\n return 0\n return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /\n area(det_x, det_y)), 2)\n\n ##############################Initialization###################################\n # global_sigma = []\n # global_tau = []\n # global_pred_str = []\n # global_gt_str = []\n ###############################################################################\n\n for input_id in range(allInputs):\n if (input_id != '.DS_Store') and (input_id != 'Pascal_result.txt') and (\n input_id != 'Pascal_result_curved.txt') and (input_id != 'Pascal_result_non_curved.txt') and (\n input_id != 'Deteval_result.txt') and (input_id != 'Deteval_result_curved.txt') \\\n and (input_id != 'Deteval_result_non_curved.txt'):\n detections = input_reading_mod(pred_dict)\n groundtruths = gt_reading_mod(gt_dir)\n detections = detection_filtering(\n detections,\n groundtruths) # filters detections overlapping with DC area\n dc_id = []\n for i in range(len(groundtruths)):\n if groundtruths[i][5] == '#':\n dc_id.append(i)\n cnt = 0\n for a in dc_id:\n num = a - cnt\n del groundtruths[num]\n cnt += 1\n\n local_sigma_table = np.zeros((len(groundtruths), len(detections)))\n local_tau_table = np.zeros((len(groundtruths), len(detections)))\n local_pred_str = {}\n local_gt_str = {}\n\n for gt_id, gt in enumerate(groundtruths):\n if len(detections) > 0:\n for det_id, detection in enumerate(detections):\n detection_orig = detection\n detection = [float(x) for x in detection[0].split(',')]\n detection = list(map(int, detection))\n pred_seq_str = detection_orig[1].strip()\n det_x = detection[0::2]\n det_y = detection[1::2]\n gt_x = list(map(int, np.squeeze(gt[1])))\n gt_y = list(map(int, np.squeeze(gt[3])))\n gt_seq_str = str(gt[4].tolist()[0])\n\n local_sigma_table[gt_id, det_id] = sigma_calculation(\n det_x, det_y, gt_x, gt_y)\n local_tau_table[gt_id, det_id] = tau_calculation(\n det_x, det_y, gt_x, gt_y)\n local_pred_str[det_id] = pred_seq_str\n local_gt_str[gt_id] = gt_seq_str\n\n global_sigma = local_sigma_table\n global_tau = local_tau_table\n global_pred_str = local_pred_str\n global_gt_str = local_gt_str\n\n single_data = {}\n single_data['sigma'] = global_sigma\n single_data['global_tau'] = global_tau\n single_data['global_pred_str'] = global_pred_str\n single_data['global_gt_str'] = global_gt_str\n return single_data\n\n\ndef get_socre_B(gt_dir, img_id, pred_dict):\n allInputs = 1\n\n def input_reading_mod(pred_dict):\n \"\"\"This helper reads input from txt files\"\"\"\n det = []\n n = len(pred_dict)\n for i in range(n):\n points = pred_dict[i]['points']\n text = pred_dict[i]['texts']\n point = \",\".join(map(str, points.reshape(-1, )))\n det.append([point, text])\n return det\n\n def gt_reading_mod(gt_dir, gt_id):\n gt = io.loadmat('%s/poly_gt_img%s.mat' % (gt_dir, gt_id))\n gt = gt['polygt']\n return gt\n\n def detection_filtering(detections, groundtruths, threshold=0.5):\n for gt_id, gt in enumerate(groundtruths):\n if (gt[5] == '#') and (gt[1].shape[1] > 1):\n gt_x = list(map(int, np.squeeze(gt[1])))\n gt_y = list(map(int, np.squeeze(gt[3])))\n for det_id, detection in enumerate(detections):\n detection_orig = detection\n detection = [float(x) for x in detection[0].split(',')]\n detection = list(map(int, detection))\n det_x = detection[0::2]\n det_y = detection[1::2]\n det_gt_iou = iod(det_x, det_y, gt_x, gt_y)\n if det_gt_iou > threshold:\n detections[det_id] = []\n\n detections[:] = [item for item in detections if item != []]\n return detections\n\n def sigma_calculation(det_x, det_y, gt_x, gt_y):\n \"\"\"\n sigma = inter_area / gt_area\n \"\"\"\n return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /\n area(gt_x, gt_y)), 2)\n\n def tau_calculation(det_x, det_y, gt_x, gt_y):\n if area(det_x, det_y) == 0.0:\n return 0\n return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /\n area(det_x, det_y)), 2)\n\n ##############################Initialization###################################\n # global_sigma = []\n # global_tau = []\n # global_pred_str = []\n # global_gt_str = []\n ###############################################################################\n\n for input_id in range(allInputs):\n if (input_id != '.DS_Store') and (input_id != 'Pascal_result.txt') and (\n input_id != 'Pascal_result_curved.txt') and (input_id != 'Pascal_result_non_curved.txt') and (\n input_id != 'Deteval_result.txt') and (input_id != 'Deteval_result_curved.txt') \\\n and (input_id != 'Deteval_result_non_curved.txt'):\n detections = input_reading_mod(pred_dict)\n groundtruths = gt_reading_mod(gt_dir, img_id).tolist()\n detections = detection_filtering(\n detections,\n groundtruths) # filters detections overlapping with DC area\n dc_id = []\n for i in range(len(groundtruths)):\n if groundtruths[i][5] == '#':\n dc_id.append(i)\n cnt = 0\n for a in dc_id:\n num = a - cnt\n del groundtruths[num]\n cnt += 1\n\n local_sigma_table = np.zeros((len(groundtruths), len(detections)))\n local_tau_table = np.zeros((len(groundtruths), len(detections)))\n local_pred_str = {}\n local_gt_str = {}\n\n for gt_id, gt in enumerate(groundtruths):\n if len(detections) > 0:\n for det_id, detection in enumerate(detections):\n detection_orig = detection\n detection = [float(x) for x in detection[0].split(',')]\n detection = list(map(int, detection))\n pred_seq_str = detection_orig[1].strip()\n det_x = detection[0::2]\n det_y = detection[1::2]\n gt_x = list(map(int, np.squeeze(gt[1])))\n gt_y = list(map(int, np.squeeze(gt[3])))\n gt_seq_str = str(gt[4].tolist()[0])\n\n local_sigma_table[gt_id, det_id] = sigma_calculation(\n det_x, det_y, gt_x, gt_y)\n local_tau_table[gt_id, det_id] = tau_calculation(\n det_x, det_y, gt_x, gt_y)\n local_pred_str[det_id] = pred_seq_str\n local_gt_str[gt_id] = gt_seq_str\n\n global_sigma = local_sigma_table\n global_tau = local_tau_table\n global_pred_str = local_pred_str\n global_gt_str = local_gt_str\n\n single_data = {}\n single_data['sigma'] = global_sigma\n single_data['global_tau'] = global_tau\n single_data['global_pred_str'] = global_pred_str\n single_data['global_gt_str'] = global_gt_str\n return single_data\n\n\ndef combine_results(all_data):\n tr = 0.7\n tp = 0.6\n fsc_k = 0.8\n k = 2\n global_sigma = []\n global_tau = []\n global_pred_str = []\n global_gt_str = []\n for data in all_data:\n global_sigma.append(data['sigma'])\n global_tau.append(data['global_tau'])\n global_pred_str.append(data['global_pred_str'])\n global_gt_str.append(data['global_gt_str'])\n\n global_accumulative_recall = 0\n global_accumulative_precision = 0\n total_num_gt = 0\n total_num_det = 0\n hit_str_count = 0\n hit_count = 0\n\n def one_to_one(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idy):\n hit_str_num = 0\n for gt_id in range(num_gt):\n gt_matching_qualified_sigma_candidates = np.where(\n local_sigma_table[gt_id, :] > tr)\n gt_matching_num_qualified_sigma_candidates = gt_matching_qualified_sigma_candidates[\n 0].shape[0]\n gt_matching_qualified_tau_candidates = np.where(\n local_tau_table[gt_id, :] > tp)\n gt_matching_num_qualified_tau_candidates = gt_matching_qualified_tau_candidates[\n 0].shape[0]\n\n det_matching_qualified_sigma_candidates = np.where(\n local_sigma_table[:, gt_matching_qualified_sigma_candidates[0]]\n > tr)\n det_matching_num_qualified_sigma_candidates = det_matching_qualified_sigma_candidates[\n 0].shape[0]\n det_matching_qualified_tau_candidates = np.where(\n local_tau_table[:, gt_matching_qualified_tau_candidates[0]] >\n tp)\n det_matching_num_qualified_tau_candidates = det_matching_qualified_tau_candidates[\n 0].shape[0]\n\n if (gt_matching_num_qualified_sigma_candidates == 1) and (gt_matching_num_qualified_tau_candidates == 1) and \\\n (det_matching_num_qualified_sigma_candidates == 1) and (\n det_matching_num_qualified_tau_candidates == 1):\n global_accumulative_recall = global_accumulative_recall + 1.0\n global_accumulative_precision = global_accumulative_precision + 1.0\n local_accumulative_recall = local_accumulative_recall + 1.0\n local_accumulative_precision = local_accumulative_precision + 1.0\n\n gt_flag[0, gt_id] = 1\n matched_det_id = np.where(local_sigma_table[gt_id, :] > tr)\n # recg start\n gt_str_cur = global_gt_str[idy][gt_id]\n pred_str_cur = global_pred_str[idy][matched_det_id[0].tolist()[\n 0]]\n if pred_str_cur == gt_str_cur:\n hit_str_num += 1\n else:\n if pred_str_cur.lower() == gt_str_cur.lower():\n hit_str_num += 1\n # recg end\n det_flag[0, matched_det_id] = 1\n return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num\n\n def one_to_many(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idy):\n hit_str_num = 0\n for gt_id in range(num_gt):\n # skip the following if the groundtruth was matched\n if gt_flag[0, gt_id] > 0:\n continue\n\n non_zero_in_sigma = np.where(local_sigma_table[gt_id, :] > 0)\n num_non_zero_in_sigma = non_zero_in_sigma[0].shape[0]\n\n if num_non_zero_in_sigma >= k:\n ####search for all detections that overlaps with this groundtruth\n qualified_tau_candidates = np.where((local_tau_table[\n gt_id, :] >= tp) & (det_flag[0, :] == 0))\n num_qualified_tau_candidates = qualified_tau_candidates[\n 0].shape[0]\n\n if num_qualified_tau_candidates == 1:\n if ((local_tau_table[gt_id, qualified_tau_candidates] >= tp)\n and\n (local_sigma_table[gt_id, qualified_tau_candidates] >=\n tr)):\n # became an one-to-one case\n global_accumulative_recall = global_accumulative_recall + 1.0\n global_accumulative_precision = global_accumulative_precision + 1.0\n local_accumulative_recall = local_accumulative_recall + 1.0\n local_accumulative_precision = local_accumulative_precision + 1.0\n\n gt_flag[0, gt_id] = 1\n det_flag[0, qualified_tau_candidates] = 1\n # recg start\n gt_str_cur = global_gt_str[idy][gt_id]\n pred_str_cur = global_pred_str[idy][\n qualified_tau_candidates[0].tolist()[0]]\n if pred_str_cur == gt_str_cur:\n hit_str_num += 1\n else:\n if pred_str_cur.lower() == gt_str_cur.lower():\n hit_str_num += 1\n # recg end\n elif (np.sum(local_sigma_table[gt_id, qualified_tau_candidates])\n >= tr):\n gt_flag[0, gt_id] = 1\n det_flag[0, qualified_tau_candidates] = 1\n # recg start\n gt_str_cur = global_gt_str[idy][gt_id]\n pred_str_cur = global_pred_str[idy][\n qualified_tau_candidates[0].tolist()[0]]\n if pred_str_cur == gt_str_cur:\n hit_str_num += 1\n else:\n if pred_str_cur.lower() == gt_str_cur.lower():\n hit_str_num += 1\n # recg end\n\n global_accumulative_recall = global_accumulative_recall + fsc_k\n global_accumulative_precision = global_accumulative_precision + num_qualified_tau_candidates * fsc_k\n\n local_accumulative_recall = local_accumulative_recall + fsc_k\n local_accumulative_precision = local_accumulative_precision + num_qualified_tau_candidates * fsc_k\n\n return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num\n\n def many_to_one(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idy):\n hit_str_num = 0\n for det_id in range(num_det):\n # skip the following if the detection was matched\n if det_flag[0, det_id] > 0:\n continue\n\n non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)\n num_non_zero_in_tau = non_zero_in_tau[0].shape[0]\n\n if num_non_zero_in_tau >= k:\n ####search for all detections that overlaps with this groundtruth\n qualified_sigma_candidates = np.where((\n local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))\n num_qualified_sigma_candidates = qualified_sigma_candidates[\n 0].shape[0]\n\n if num_qualified_sigma_candidates == 1:\n if ((local_tau_table[qualified_sigma_candidates, det_id] >=\n tp) and\n (local_sigma_table[qualified_sigma_candidates, det_id]\n >= tr)):\n # became an one-to-one case\n global_accumulative_recall = global_accumulative_recall + 1.0\n global_accumulative_precision = global_accumulative_precision + 1.0\n local_accumulative_recall = local_accumulative_recall + 1.0\n local_accumulative_precision = local_accumulative_precision + 1.0\n\n gt_flag[0, qualified_sigma_candidates] = 1\n det_flag[0, det_id] = 1\n # recg start\n pred_str_cur = global_pred_str[idy][det_id]\n gt_len = len(qualified_sigma_candidates[0])\n for idx in range(gt_len):\n ele_gt_id = qualified_sigma_candidates[0].tolist()[\n idx]\n if ele_gt_id not in global_gt_str[idy]:\n continue\n gt_str_cur = global_gt_str[idy][ele_gt_id]\n if pred_str_cur == gt_str_cur:\n hit_str_num += 1\n break\n else:\n if pred_str_cur.lower() == gt_str_cur.lower():\n hit_str_num += 1\n break\n # recg end\n elif (np.sum(local_tau_table[qualified_sigma_candidates,\n det_id]) >= tp):\n det_flag[0, det_id] = 1\n gt_flag[0, qualified_sigma_candidates] = 1\n # recg start\n pred_str_cur = global_pred_str[idy][det_id]\n gt_len = len(qualified_sigma_candidates[0])\n for idx in range(gt_len):\n ele_gt_id = qualified_sigma_candidates[0].tolist()[idx]\n if ele_gt_id not in global_gt_str[idy]:\n continue\n gt_str_cur = global_gt_str[idy][ele_gt_id]\n if pred_str_cur == gt_str_cur:\n hit_str_num += 1\n break\n else:\n if pred_str_cur.lower() == gt_str_cur.lower():\n hit_str_num += 1\n break\n # recg end\n\n global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k\n global_accumulative_precision = global_accumulative_precision + fsc_k\n\n local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k\n local_accumulative_precision = local_accumulative_precision + fsc_k\n return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num\n\n for idx in range(len(global_sigma)):\n local_sigma_table = np.array(global_sigma[idx])\n local_tau_table = global_tau[idx]\n\n num_gt = local_sigma_table.shape[0]\n num_det = local_sigma_table.shape[1]\n\n total_num_gt = total_num_gt + num_gt\n total_num_det = total_num_det + num_det\n\n local_accumulative_recall = 0\n local_accumulative_precision = 0\n gt_flag = np.zeros((1, num_gt))\n det_flag = np.zeros((1, num_det))\n\n #######first check for one-to-one case##########\n local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \\\n gt_flag, det_flag, hit_str_num = one_to_one(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idx)\n\n hit_str_count += hit_str_num\n #######then check for one-to-many case##########\n local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \\\n gt_flag, det_flag, hit_str_num = one_to_many(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idx)\n hit_str_count += hit_str_num\n #######then check for many-to-one case##########\n local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \\\n gt_flag, det_flag, hit_str_num = many_to_one(local_sigma_table, local_tau_table,\n local_accumulative_recall, local_accumulative_precision,\n global_accumulative_recall, global_accumulative_precision,\n gt_flag, det_flag, idx)\n hit_str_count += hit_str_num\n\n try:\n recall = global_accumulative_recall / total_num_gt\n except ZeroDivisionError:\n recall = 0\n\n try:\n precision = global_accumulative_precision / total_num_det\n except ZeroDivisionError:\n precision = 0\n\n try:\n f_score = 2 * precision * recall / (precision + recall)\n except ZeroDivisionError:\n f_score = 0\n\n try:\n seqerr = 1 - float(hit_str_count) / global_accumulative_recall\n except ZeroDivisionError:\n seqerr = 1\n\n try:\n recall_e2e = float(hit_str_count) / total_num_gt\n except ZeroDivisionError:\n recall_e2e = 0\n\n try:\n precision_e2e = float(hit_str_count) / total_num_det\n except ZeroDivisionError:\n precision_e2e = 0\n\n try:\n f_score_e2e = 2 * precision_e2e * recall_e2e / (\n precision_e2e + recall_e2e)\n except ZeroDivisionError:\n f_score_e2e = 0\n\n final = {\n 'total_num_gt': total_num_gt,\n 'total_num_det': total_num_det,\n 'global_accumulative_recall': global_accumulative_recall,\n 'hit_str_count': hit_str_count,\n 'recall': recall,\n 'precision': precision,\n 'f_score': f_score,\n 'seqerr': seqerr,\n 'recall_e2e': recall_e2e,\n 'precision_e2e': precision_e2e,\n 'f_score_e2e': f_score_e2e\n }\n return final\n" ]
[ [ "numpy.squeeze", "numpy.round", "numpy.max", "numpy.mean", "numpy.zeros", "numpy.where" ], [ "numpy.squeeze", "scipy.io.loadmat", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
code-review-doctor/keras
[ "96130040540e1405ffe746ddf2b2cceb9b8b8f65", "96130040540e1405ffe746ddf2b2cceb9b8b8f65", "96130040540e1405ffe746ddf2b2cceb9b8b8f65" ]
[ "keras/api/tests/api_compatibility_test.py", "keras/integration_test/tpu_strategy_test.py", "keras/engine/functional_test.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ==============================================================================\n\"\"\"Keras API compatibility tests.\n\nThis test ensures all changes to the public API of Keras are intended.\n\nIf this test fails, it means a change has been made to the public API. Backwards\nincompatible changes are not allowed. You can run the test with\n\"--update_goldens\" flag set to \"True\" to update goldens when making changes to\nthe public Keras python API.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport argparse\nimport os\nimport re\nimport sys\n\nimport six\n\nfrom google.protobuf import message\nfrom google.protobuf import text_format\n\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.tools.api.lib import api_objects_pb2\nfrom tensorflow.tools.api.lib import python_object_to_proto_visitor\nfrom tensorflow.tools.common import public_api\nfrom tensorflow.tools.common import traverse\n\n\n# FLAGS defined at the bottom:\nFLAGS = None\n# DEFINE_boolean, update_goldens, default False:\n_UPDATE_GOLDENS_HELP = \"\"\"\n Update stored golden files if API is updated. WARNING: All API changes\n have to be authorized by TensorFlow leads.\n\"\"\"\n\n# DEFINE_boolean, verbose_diffs, default True:\n_VERBOSE_DIFFS_HELP = \"\"\"\n If set to true, print line by line diffs on all libraries. If set to\n false, only print which libraries have differences.\n\"\"\"\n\n# Initialized with _InitPathConstants function below.\n_API_GOLDEN_FOLDER_V1 = None\n_API_GOLDEN_FOLDER_V2 = None\n\n\ndef _InitPathConstants():\n global _API_GOLDEN_FOLDER_V1\n global _API_GOLDEN_FOLDER_V2\n root_golden_path_v2 = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(),\n '..', 'golden', 'v2', 'tensorflow.keras.pbtxt')\n\n if FLAGS.update_goldens:\n root_golden_path_v2 = os.path.realpath(root_golden_path_v2)\n # Get API directories based on the root golden file. This way\n # we make sure to resolve symbolic links before creating new files.\n _API_GOLDEN_FOLDER_V2 = os.path.dirname(root_golden_path_v2)\n _API_GOLDEN_FOLDER_V1 = os.path.normpath(\n os.path.join(_API_GOLDEN_FOLDER_V2, '..', 'v1'))\n\n\n_TEST_README_FILE = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(), 'README.txt')\n_UPDATE_WARNING_FILE = os.path.join(\n tf.compat.v1.resource_loader.get_data_files_path(),\n 'API_UPDATE_WARNING.txt')\n\n\ndef _KeyToFilePath(key, api_version):\n \"\"\"From a given key, construct a filepath.\n\n Filepath will be inside golden folder for api_version.\n\n Args:\n key: a string used to determine the file path\n api_version: a number indicating the tensorflow API version, e.g. 1 or 2.\n\n Returns:\n A string of file path to the pbtxt file which describes the public API\n \"\"\"\n\n def _ReplaceCapsWithDash(matchobj):\n match = matchobj.group(0)\n return '-%s' % (match.lower())\n\n case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash,\n six.ensure_str(key))\n api_folder = (\n _API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)\n return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)\n\n\ndef _FileNameToKey(filename):\n \"\"\"From a given filename, construct a key we use for api objects.\"\"\"\n\n def _ReplaceDashWithCaps(matchobj):\n match = matchobj.group(0)\n return match[1].upper()\n\n base_filename = os.path.basename(filename)\n base_filename_without_ext = os.path.splitext(base_filename)[0]\n api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,\n six.ensure_str(base_filename_without_ext))\n return api_object_key\n\n\ndef _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):\n \"\"\"A Visitor that crashes on subclasses of generated proto classes.\"\"\"\n # If the traversed object is a proto Message class\n if not (isinstance(parent, type) and issubclass(parent, message.Message)):\n return\n if parent is message.Message:\n return\n # Check that it is a direct subclass of Message.\n if message.Message not in parent.__bases__:\n raise NotImplementedError(\n 'Object tf.%s is a subclass of a generated proto Message. '\n 'They are not yet supported by the API tools.' % path)\n\n\ndef _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):\n \"\"\"Filter out golden proto dict symbols that should be omitted.\"\"\"\n if not omit_golden_symbols_map:\n return golden_proto_dict\n filtered_proto_dict = dict(golden_proto_dict)\n for key, symbol_list in six.iteritems(omit_golden_symbols_map):\n api_object = api_objects_pb2.TFAPIObject()\n api_object.CopyFrom(filtered_proto_dict[key])\n filtered_proto_dict[key] = api_object\n module_or_class = None\n if api_object.HasField('tf_module'):\n module_or_class = api_object.tf_module\n elif api_object.HasField('tf_class'):\n module_or_class = api_object.tf_class\n if module_or_class is not None:\n for members in (module_or_class.member, module_or_class.member_method):\n filtered_members = [m for m in members if m.name not in symbol_list]\n # Two steps because protobuf repeated fields disallow slice assignment.\n del members[:]\n members.extend(filtered_members)\n return filtered_proto_dict\n\n\nclass ApiCompatibilityTest(tf.test.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(ApiCompatibilityTest, self).__init__(*args, **kwargs)\n\n self._update_golden_warning = file_io.read_file_to_string(\n _UPDATE_WARNING_FILE)\n\n self._test_readme_message = file_io.read_file_to_string(_TEST_README_FILE)\n\n def _AssertProtoDictEquals(self,\n expected_dict,\n actual_dict,\n verbose=False,\n update_goldens=False,\n additional_missing_object_message='',\n api_version=2):\n \"\"\"Diff given dicts of protobufs and report differences a readable way.\n\n Args:\n expected_dict: a dict of TFAPIObject protos constructed from golden files.\n actual_dict: a ict of TFAPIObject protos constructed by reading from the\n TF package linked to the test.\n verbose: Whether to log the full diffs, or simply report which files were\n different.\n update_goldens: Whether to update goldens when there are diffs found.\n additional_missing_object_message: Message to print when a symbol is\n missing.\n api_version: TensorFlow API version to test.\n \"\"\"\n diffs = []\n verbose_diffs = []\n\n expected_keys = set(expected_dict.keys())\n actual_keys = set(actual_dict.keys())\n only_in_expected = expected_keys - actual_keys\n only_in_actual = actual_keys - expected_keys\n all_keys = expected_keys | actual_keys\n\n # This will be populated below.\n updated_keys = []\n\n for key in all_keys:\n diff_message = ''\n verbose_diff_message = ''\n # First check if the key is not found in one or the other.\n if key in only_in_expected:\n diff_message = 'Object %s expected but not found (removed). %s' % (\n key, additional_missing_object_message)\n verbose_diff_message = diff_message\n elif key in only_in_actual:\n diff_message = 'New object %s found (added).' % key\n verbose_diff_message = diff_message\n else:\n # Do not truncate diff\n self.maxDiff = None # pylint: disable=invalid-name\n # Now we can run an actual proto diff.\n try:\n self.assertProtoEquals(expected_dict[key], actual_dict[key])\n except AssertionError as e:\n updated_keys.append(key)\n diff_message = 'Change detected in python object: %s.' % key\n verbose_diff_message = str(e)\n\n # All difference cases covered above. If any difference found, add to the\n # list.\n if diff_message:\n diffs.append(diff_message)\n verbose_diffs.append(verbose_diff_message)\n\n # If diffs are found, handle them based on flags.\n if diffs:\n diff_count = len(diffs)\n logging.error(self._test_readme_message)\n logging.error('%d differences found between API and golden.', diff_count)\n\n if update_goldens:\n # Write files if requested.\n logging.warning(self._update_golden_warning)\n\n # If the keys are only in expected, some objects are deleted.\n # Remove files.\n for key in only_in_expected:\n filepath = _KeyToFilePath(key, api_version)\n tf.io.gfile.remove(filepath)\n\n # If the files are only in actual (current library), these are new\n # modules. Write them to files. Also record all updates in files.\n for key in only_in_actual | set(updated_keys):\n filepath = _KeyToFilePath(key, api_version)\n file_io.write_string_to_file(\n filepath, text_format.MessageToString(actual_dict[key]))\n else:\n # Include the actual differences to help debugging.\n for d, verbose_d in zip(diffs, verbose_diffs):\n logging.error(' %s', d)\n logging.error(' %s', verbose_d)\n # Fail if we cannot fix the test by updating goldens.\n self.fail('%d differences found between API and golden.' % diff_count)\n\n else:\n logging.info('No differences found between API and golden.')\n\n def _checkBackwardsCompatibility(self,\n root,\n golden_file_patterns,\n api_version,\n additional_private_map=None,\n omit_golden_symbols_map=None):\n # Extract all API stuff.\n visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor(\n default_path='tensorflow.keras')\n\n public_api_visitor = public_api.PublicAPIVisitor(visitor)\n if additional_private_map:\n public_api_visitor.private_map.update(additional_private_map)\n public_api_visitor.set_root_name('tf.keras')\n\n traverse.traverse(root, public_api_visitor)\n proto_dict = visitor.GetProtos()\n\n # Read all golden files.\n golden_file_list = tf.compat.v1.gfile.Glob(golden_file_patterns)\n\n def _ReadFileToProto(filename):\n \"\"\"Read a filename, create a protobuf from its contents.\"\"\"\n ret_val = api_objects_pb2.TFAPIObject()\n text_format.Merge(file_io.read_file_to_string(filename), ret_val)\n return ret_val\n\n golden_proto_dict = {\n _FileNameToKey(filename): _ReadFileToProto(filename)\n for filename in golden_file_list\n }\n golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,\n omit_golden_symbols_map)\n\n # Diff them. Do not fail if called with update.\n # If the test is run to update goldens, only report diffs but do not fail.\n self._AssertProtoDictEquals(\n golden_proto_dict,\n proto_dict,\n verbose=FLAGS.verbose_diffs,\n update_goldens=FLAGS.update_goldens,\n api_version=api_version)\n\n def testAPIBackwardsCompatibility(self):\n api_version = 1\n if hasattr(tf, '_major_api_version') and tf._major_api_version == 2:\n api_version = 2\n golden_file_patterns = [\n os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))]\n\n self._checkBackwardsCompatibility(\n tf.keras,\n golden_file_patterns,\n api_version,\n # Skip compat.v1 and compat.v2 since they are validated\n # in separate tests.\n additional_private_map={'tf.compat': ['v1', 'v2']},\n omit_golden_symbols_map={})\n\n def testAPIBackwardsCompatibilityV1(self):\n api_version = 1\n golden_file_patterns = os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))\n self._checkBackwardsCompatibility(\n tf.compat.v1.keras,\n golden_file_patterns,\n api_version,\n additional_private_map={\n 'tf': ['pywrap_tensorflow'],\n 'tf.compat': ['v1', 'v2'],\n },\n omit_golden_symbols_map={})\n\n def testAPIBackwardsCompatibilityV2(self):\n api_version = 2\n golden_file_patterns = [os.path.join(\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources(),\n _KeyToFilePath('*', api_version))]\n self._checkBackwardsCompatibility(\n tf.compat.v2.keras,\n golden_file_patterns,\n api_version,\n additional_private_map={'tf.compat': ['v1', 'v2']},\n omit_golden_symbols_map={})\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)\n parser.add_argument(\n '--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)\n FLAGS, unparsed = parser.parse_known_args()\n _InitPathConstants()\n\n # Now update argv, so that unittest library does not get confused.\n sys.argv = [sys.argv[0]] + unparsed\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TPUStrategy.\"\"\"\n\nimport random\nimport tempfile\n\nfrom absl import flags\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow.python.framework import test_util as tf_test_utils\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string(\"tpu\", \"\", \"Name of TPU to connect to.\")\nflags.DEFINE_string(\"project\", None, \"Name of GCP project with TPU.\")\nflags.DEFINE_string(\"zone\", None, \"Name of GCP zone with TPU.\")\n\n# These vocabularies usually come from TFT or a Beam pipeline.\nFEATURE_VOCAB = [\n \"avenger\", \"ironman\", \"batman\", \"hulk\", \"spiderman\", \"kingkong\",\n \"wonder_woman\"\n]\nLABEL_VOCAB = [\"yes\", \"no\"]\n\n\ndef get_tpu_cluster_resolver():\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n tpu=FLAGS.tpu,\n zone=FLAGS.zone,\n project=FLAGS.project,\n )\n return resolver\n\n\ndef get_tpu_strategy():\n resolver = get_tpu_cluster_resolver()\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n return tf.distribute.experimental.TPUStrategy(resolver)\n\n\nclass TpuStrategyTest(tf.test.TestCase):\n\n def define_kpls_for_training(self, use_adapt):\n if use_adapt:\n feature_lookup_layer = (\n tf.keras.layers.StringLookup(\n num_oov_indices=1))\n feature_lookup_layer.adapt(FEATURE_VOCAB)\n label_lookup_layer = (\n tf.keras.layers.StringLookup(\n num_oov_indices=0, mask_token=None))\n label_lookup_layer.adapt(LABEL_VOCAB)\n else:\n feature_lookup_layer = (\n tf.keras.layers.StringLookup(\n vocabulary=FEATURE_VOCAB, num_oov_indices=1))\n label_lookup_layer = (\n tf.keras.layers.StringLookup(\n vocabulary=LABEL_VOCAB, num_oov_indices=0, mask_token=None))\n\n raw_feature_input = tf.keras.layers.Input(\n shape=(3,), dtype=tf.dtypes.string, name=\"feature\", ragged=True)\n feature_id_input = feature_lookup_layer(raw_feature_input)\n feature_mapper = tf.keras.Model({\"features\": raw_feature_input},\n feature_id_input)\n\n raw_label_input = tf.keras.layers.Input(\n shape=(1,), dtype=tf.dtypes.string, name=\"label\")\n label_id_input = label_lookup_layer(raw_label_input)\n label_mapper = tf.keras.Model({\"label\": raw_label_input}, label_id_input)\n\n return feature_mapper, label_mapper\n\n def define_inverse_lookup_layer(self):\n # Only needed for serving.\n label_inverse_lookup_layer = (\n tf.keras.layers.StringLookup(\n num_oov_indices=0,\n mask_token=None,\n vocabulary=LABEL_VOCAB,\n invert=True))\n return label_inverse_lookup_layer\n\n def test_keras_metric_outside_strategy_scope_per_replica(self):\n if not tf.compat.v1.executing_eagerly():\n self.skipTest(\"connect_to_cluster() can only be called in eager mode\")\n strategy = get_tpu_strategy()\n metric = tf.keras.metrics.Mean(\"test_metric\", dtype=tf.float32)\n\n dataset = tf.data.Dataset.range(strategy.num_replicas_in_sync * 2).batch(2)\n dataset = strategy.experimental_distribute_dataset(dataset)\n\n @tf.function\n def step_fn(i):\n metric.update_state(i)\n\n with self.assertRaisesRegex(\n ValueError, \"Trying to run metric.update_state \"\n \"in replica context\"):\n with strategy.scope():\n for i in dataset:\n strategy.run(step_fn, args=(i,))\n\n @tf_test_utils.disable_mlir_bridge(\n \"TODO(b/168036682): Support dynamic padder\")\n def test_train_and_serve(self):\n if not tf.compat.v1.executing_eagerly():\n self.skipTest(\"connect_to_cluster() can only be called in eager mode\")\n strategy = get_tpu_strategy()\n use_adapt = False\n\n with strategy.scope():\n feature_mapper, label_mapper = self.define_kpls_for_training(use_adapt)\n\n def dataset_fn(_):\n\n def feature_and_label_gen():\n # Generator of dataset.\n while True:\n features = random.sample(FEATURE_VOCAB, 3)\n label = [\"yes\"] if \"avenger\" in features else [\"no\"]\n yield {\"features\": features, \"label\": label}\n\n raw_dataset = tf.data.Dataset.from_generator(\n feature_and_label_gen,\n output_signature={\n \"features\": tf.TensorSpec([3], tf.dtypes.string),\n \"label\": tf.TensorSpec([1], tf.dtypes.string)\n }).shuffle(100).batch(32)\n\n train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda\n {\n \"features\": feature_mapper(x[\"features\"])\n }, label_mapper(x[\"label\"])))\n return train_dataset\n\n # Create the model. The input needs to be compatible with KPLs.\n model_input = tf.keras.layers.Input(\n shape=(3,), dtype=tf.dtypes.int64, name=\"model_input\")\n\n # input_dim includes a mask token and an oov token.\n emb_output = tf.keras.layers.Embedding(\n input_dim=len(FEATURE_VOCAB) + 2, output_dim=20)(\n model_input)\n emb_output = tf.math.reduce_mean(emb_output, axis=1)\n dense_output = tf.keras.layers.Dense(\n units=1, activation=\"sigmoid\")(\n emb_output)\n model = tf.keras.Model({\"features\": model_input}, dense_output)\n\n optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)\n accuracy = tf.keras.metrics.Accuracy()\n\n @tf.function\n def train_step(iterator):\n \"\"\"The step function for one training step.\"\"\"\n\n def step_fn(inputs):\n \"\"\"The computation to run on each TPU device.\"\"\"\n features, labels = inputs\n with tf.GradientTape() as tape:\n pred = model(features, training=True)\n loss = tf.keras.losses.binary_crossentropy(labels, pred)\n loss = tf.nn.compute_average_loss(loss)\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))\n\n actual_pred = tf.cast(tf.math.greater(pred, 0.5), tf.dtypes.int64)\n accuracy.update_state(labels, actual_pred)\n\n strategy.run(step_fn, args=(next(iterator),))\n\n distributed_dataset = strategy.distribute_datasets_from_function(\n dataset_fn)\n distributed_iterator = iter(distributed_dataset)\n num_epochs = 4\n num_steps = 7\n for _ in range(num_epochs):\n accuracy.reset_state()\n for _ in range(num_steps):\n train_step(distributed_iterator)\n\n self.assertGreater(accuracy.result().numpy(), 0.5)\n self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps)\n\n # Create a saved model.\n model.feature_mapper = feature_mapper\n model.label_mapper = label_mapper\n model.label_inverse_lookup_layer = self.define_inverse_lookup_layer()\n\n def create_serving_signature(model):\n\n @tf.function\n def serve_fn(raw_features):\n raw_features = tf.expand_dims(raw_features, axis=0)\n transformed_features = model.feature_mapper(raw_features)\n outputs = model(transformed_features)\n outputs = tf.squeeze(outputs, axis=0)\n outputs = tf.cast(tf.math.greater(outputs, 0.5), tf.dtypes.int64)\n decoded_outputs = model.label_inverse_lookup_layer(outputs)\n return tf.squeeze(decoded_outputs, axis=0)\n\n # Serving does NOT have batch dimension\n return serve_fn.get_concrete_function(\n tf.TensorSpec(shape=(3), dtype=tf.dtypes.string, name=\"example\"))\n\n serving_fn = create_serving_signature(model)\n\n saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n model.save(saved_model_dir, save_format=\"tf\",\n signatures={\"serving_default\": serving_fn})\n\n # Test the saved_model.\n loaded_serving_fn = tf.keras.models.load_model(\n saved_model_dir).signatures[\"serving_default\"]\n\n # Check model calling with serving signature.\n prediction1 = loaded_serving_fn(\n tf.constant([\"avenger\", \"ironman\", \"avenger\"]))[\"output_0\"]\n self.assertIn(prediction1, (\"yes\", \"no\"))\n\n prediction2 = loaded_serving_fn(\n tf.constant([\"ironman\", \"ironman\", \"unknown\"]))[\"output_0\"]\n self.assertIn(prediction2, (\"yes\", \"no\"))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#,============================================================================\n\"\"\"Tests for layer graphs construction & handling.\"\"\"\n\nimport warnings\n\nfrom keras import backend\nfrom keras import layers\nfrom keras import losses\nfrom keras import models\nfrom keras.engine import base_layer\nfrom keras.engine import functional\nfrom keras.engine import input_layer as input_layer_lib\nfrom keras.engine import sequential\nfrom keras.engine import training as training_lib\nfrom keras.testing_infra import test_combinations\nfrom keras.testing_infra import test_utils\nfrom keras.utils import layer_utils\nfrom keras.utils import tf_utils\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nfrom tensorflow.python.framework import extension_type\nfrom tensorflow.python.training.tracking.util import Checkpoint\n\n\n\nclass NetworkConstructionTest(test_combinations.TestCase):\n\n def test_default_model_name(self):\n inputs = input_layer_lib.Input(shape=(1,))\n outputs = layers.Dense(1, activation='relu')(inputs)\n model = training_lib.Model(inputs=inputs, outputs=outputs)\n self.assertEqual(model.name, 'model')\n\n model_2 = training_lib.Model(inputs=inputs, outputs=outputs)\n self.assertEqual(model_2.name, 'model_1')\n\n model_3 = training_lib.Model(inputs=inputs, outputs=outputs)\n self.assertEqual(model_3.name, 'model_2')\n\n def test_get_updates(self):\n\n class MyLayer(layers.Layer):\n\n def build(self, input_shape):\n self.a = self.add_weight('a',\n (1, 1),\n 'float32',\n trainable=False)\n self.b = self.add_weight('b',\n (1, 1),\n 'float32',\n trainable=False)\n self.add_update(tf.compat.v1.assign_add(\n self.a, [[1.]], name='unconditional_update'))\n self.built = True\n\n def call(self, inputs):\n self.add_update(\n tf.compat.v1.assign_add(self.b, inputs, name='conditional_update'))\n return inputs + 1\n\n with tf.Graph().as_default():\n x1 = input_layer_lib.Input(shape=(1,))\n layer = MyLayer()\n _ = layer(x1)\n\n self.assertEqual(len(layer.updates), 2)\n\n x2 = input_layer_lib.Input(shape=(1,))\n y2 = layer(x2)\n\n self.assertEqual(len(layer.updates), 3)\n\n network = functional.Functional(x2, y2)\n self.assertEqual(len(network.updates), 3)\n\n x3 = input_layer_lib.Input(shape=(1,))\n _ = layer(x3)\n self.assertEqual(len(network.updates), 4)\n\n x4 = input_layer_lib.Input(shape=(1,))\n _ = network(x4)\n self.assertEqual(len(network.updates), 5)\n\n network.add_update(tf.compat.v1.assign_add(layer.a, [[1]]))\n self.assertEqual(len(network.updates), 6)\n\n network.add_update(tf.compat.v1.assign_add(layer.b, x4))\n self.assertEqual(len(network.updates), 7)\n\n @test_combinations.generate(test_combinations.combine(mode=['graph']))\n def test_get_updates_bn(self):\n x1 = input_layer_lib.Input(shape=(1,))\n layer = layers.BatchNormalization()\n _ = layer(x1)\n\n self.assertEqual(len(layer.updates), 2)\n\n def test_get_layer(self):\n # create a simple network\n x = input_layer_lib.Input(shape=(32,))\n dense_a = layers.Dense(4, name='dense_a')\n dense_b = layers.Dense(2, name='dense_b')\n y = dense_b(dense_a(x))\n network = functional.Functional(x, y, name='dense_network')\n\n # test various get_layer by index\n self.assertEqual(network.get_layer(index=1), dense_a)\n\n # test invalid get_layer by index\n with self.assertRaisesRegex(\n ValueError, 'Was asked to retrieve layer at index ' + str(3) +\n ' but model only has ' + str(len(network.layers)) + ' layers.'):\n network.get_layer(index=3)\n\n # test that only one between name and index is requested\n with self.assertRaisesRegex(ValueError,\n 'Provide only a layer name or a layer index'):\n network.get_layer(index=1, name='dense_b')\n\n # test that a name or an index must be provided\n with self.assertRaisesRegex(ValueError,\n 'Provide either a layer name or layer index.'):\n network.get_layer()\n\n # test various get_layer by name\n self.assertEqual(network.get_layer(name='dense_a'), dense_a)\n\n # test invalid get_layer by name\n with self.assertRaisesRegex(ValueError, 'No such layer: dense_c.'):\n network.get_layer(name='dense_c')\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testTopologicalAttributes(self):\n # test layer attributes / methods related to cross-layer connectivity.\n a = input_layer_lib.Input(shape=(32,), name='input_a')\n b = input_layer_lib.Input(shape=(32,), name='input_b')\n\n # test input, output, input_shape, output_shape\n test_layer = layers.Dense(16, name='test_layer')\n a_test = test_layer(a)\n self.assertIs(test_layer.input, a)\n self.assertIs(test_layer.output, a_test)\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, (None, 16))\n\n # test `get_*_at` methods\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n self.assertIs(dense.get_input_at(0), a)\n self.assertIs(dense.get_input_at(1), b)\n self.assertIs(dense.get_output_at(0), a_2)\n self.assertIs(dense.get_output_at(1), b_2)\n self.assertEqual(dense.get_input_shape_at(0), (None, 32))\n self.assertEqual(dense.get_input_shape_at(1), (None, 32))\n self.assertEqual(dense.get_output_shape_at(0), (None, 16))\n self.assertEqual(dense.get_output_shape_at(1), (None, 16))\n\n # Test invalid value for attribute retrieval.\n with self.assertRaises(ValueError):\n dense.get_input_at(2)\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n _ = new_dense.input\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n _ = new_dense.output\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n _ = new_dense.output_shape\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n _ = new_dense.input_shape\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n a = input_layer_lib.Input(shape=(3, 32))\n a = input_layer_lib.Input(shape=(5, 32))\n a_2 = dense(a)\n b_2 = dense(b)\n _ = new_dense.input_shape\n with self.assertRaises(AttributeError):\n new_dense = layers.Dense(16)\n a = input_layer_lib.Input(shape=(3, 32))\n a = input_layer_lib.Input(shape=(5, 32))\n a_2 = dense(a)\n b_2 = dense(b)\n _ = new_dense.output_shape\n\n def _assertAllIs(self, a, b):\n self.assertTrue(all(x is y for x, y in zip(a, b)))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testTopologicalAttributesMultiOutputLayer(self):\n\n class PowersLayer(layers.Layer):\n\n def call(self, inputs):\n return [inputs**2, inputs**3]\n\n x = input_layer_lib.Input(shape=(32,))\n test_layer = PowersLayer()\n p1, p2 = test_layer(x) # pylint: disable=not-callable\n\n self.assertIs(test_layer.input, x)\n self._assertAllIs(test_layer.output, [p1, p2])\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testTopologicalAttributesMultiInputLayer(self):\n\n class AddLayer(layers.Layer):\n\n def call(self, inputs):\n assert len(inputs) == 2\n return inputs[0] + inputs[1]\n\n a = input_layer_lib.Input(shape=(32,))\n b = input_layer_lib.Input(shape=(32,))\n test_layer = AddLayer()\n y = test_layer([a, b]) # pylint: disable=not-callable\n\n self._assertAllIs(test_layer.input, [a, b])\n self.assertIs(test_layer.output, y)\n self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])\n self.assertEqual(test_layer.output_shape, (None, 32))\n\n def testBasicNetwork(self):\n with tf.Graph().as_default():\n # minimum viable network\n x = input_layer_lib.Input(shape=(32,))\n dense = layers.Dense(2)\n y = dense(x)\n network = functional.Functional(x, y, name='dense_network')\n\n # test basic attributes\n self.assertEqual(network.name, 'dense_network')\n self.assertEqual(len(network.layers), 2) # InputLayer + Dense\n self.assertEqual(network.layers[1], dense)\n self._assertAllIs(network.weights, dense.weights)\n self._assertAllIs(network.trainable_weights, dense.trainable_weights)\n self._assertAllIs(network.non_trainable_weights,\n dense.non_trainable_weights)\n\n # test callability on Input\n x_2 = input_layer_lib.Input(shape=(32,))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 2])\n\n # test callability on regular tensor\n x_2 = tf.compat.v1.placeholder(dtype='float32', shape=(None, 32))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 2])\n\n # test network `trainable` attribute\n network.trainable = False\n self._assertAllIs(network.weights, dense.weights)\n self.assertEqual(network.trainable_weights, [])\n self._assertAllIs(network.non_trainable_weights,\n dense.trainable_weights + dense.non_trainable_weights)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_trainable_weights(self):\n a = layers.Input(shape=(2,))\n b = layers.Dense(1)(a)\n model = training_lib.Model(a, b)\n\n weights = model.weights\n self._assertAllIs(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self._assertAllIs(model.non_trainable_weights, weights)\n\n model.trainable = True\n self._assertAllIs(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.layers[1].trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self._assertAllIs(model.non_trainable_weights, weights)\n\n # sequential model\n model = sequential.Sequential()\n model.add(layers.Dense(1, input_dim=2))\n weights = model.weights\n\n self._assertAllIs(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self._assertAllIs(model.non_trainable_weights, weights)\n\n model.trainable = True\n self._assertAllIs(model.trainable_weights, weights)\n self.assertListEqual(model.non_trainable_weights, [])\n\n model.layers[0].trainable = False\n self.assertListEqual(model.trainable_weights, [])\n self._assertAllIs(model.non_trainable_weights, weights)\n\n def test_layer_call_arguments(self):\n with tf.Graph().as_default():\n # Test the ability to pass and serialize arguments to `call`.\n inp = layers.Input(shape=(2,))\n x = layers.Dense(3)(inp)\n x = layers.Dropout(0.5)(x, training=True)\n model = training_lib.Model(inp, x)\n # Would be `dropout/cond/Merge` by default\n self.assertIn('dropout', model.output.op.name)\n\n # Test that argument is kept when applying the model\n inp2 = layers.Input(shape=(2,))\n out2 = model(inp2)\n self.assertIn('dropout', out2.op.name)\n\n # Test that argument is kept after loading a model\n config = model.get_config()\n model = training_lib.Model.from_config(config)\n self.assertIn('dropout', model.output.op.name)\n\n def test_node_construction(self):\n # test basics\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n with self.assertRaises(ValueError):\n _ = layers.Input(shape=(32,), batch_shape=(10, 32))\n with self.assertRaises(ValueError):\n _ = layers.Input(shape=(32,), unknown_kwarg=None)\n\n self.assertListEqual(a.shape.as_list(), [None, 32])\n a_layer, a_node_index, a_tensor_index = a._keras_history\n b_layer, _, _ = b._keras_history\n self.assertEqual(len(a_layer._inbound_nodes), 1)\n self.assertEqual(a_tensor_index, 0)\n node = a_layer._inbound_nodes[a_node_index]\n self.assertEqual(node.outbound_layer, a_layer)\n\n self.assertListEqual(node.inbound_layers, [])\n self.assertListEqual(node.input_tensors, [a])\n self.assertListEqual(node.input_shapes, [(None, 32)])\n self.assertListEqual(node.output_tensors, [a])\n self.assertListEqual(node.output_shapes, [(None, 32)])\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n self.assertEqual(len(dense._inbound_nodes), 2)\n self.assertEqual(len(dense._outbound_nodes), 0)\n self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer)\n self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)\n self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer)\n self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)\n self.assertIs(dense._inbound_nodes[0].input_tensors, a)\n self.assertIs(dense._inbound_nodes[1].input_tensors, b)\n\n # test layer properties\n test_layer = layers.Dense(16, name='test_layer')\n a_test = test_layer(a)\n self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16])\n self.assertIs(test_layer.input, a)\n self.assertIs(test_layer.output, a_test)\n self.assertEqual(test_layer.input_shape, (None, 32))\n self.assertEqual(test_layer.output_shape, (None, 16))\n\n self.assertIs(dense.get_input_at(0), a)\n self.assertIs(dense.get_input_at(1), b)\n self.assertIs(dense.get_output_at(0), a_2)\n self.assertIs(dense.get_output_at(1), b_2)\n self.assertEqual(dense.get_input_shape_at(0), (None, 32))\n self.assertEqual(dense.get_input_shape_at(1), (None, 32))\n self.assertEqual(dense.get_output_shape_at(0), (None, 16))\n self.assertEqual(dense.get_output_shape_at(1), (None, 16))\n self.assertEqual(dense.get_input_mask_at(0), None)\n self.assertEqual(dense.get_input_mask_at(1), None)\n self.assertEqual(dense.get_output_mask_at(0), None)\n self.assertEqual(dense.get_output_mask_at(1), None)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_multi_input_layer(self):\n with self.cached_session():\n # test multi-input layer\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n\n merged = layers.concatenate([a_2, b_2], name='merge')\n self.assertListEqual(merged.shape.as_list(), [None, 16 * 2])\n merge_layer, merge_node_index, merge_tensor_index = merged._keras_history\n\n self.assertEqual(merge_node_index, 0)\n self.assertEqual(merge_tensor_index, 0)\n\n self.assertEqual(len(merge_layer._inbound_nodes), 1)\n self.assertEqual(len(merge_layer._outbound_nodes), 0)\n\n self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)\n self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)\n\n c = layers.Dense(64, name='dense_2')(merged)\n d = layers.Dense(5, name='dense_3')(c)\n\n model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')\n self.assertEqual(len(model.layers), 6)\n output_shapes = model.compute_output_shape([(None, 32), (None, 32)])\n self.assertListEqual(output_shapes[0].as_list(), [None, 64])\n self.assertListEqual(output_shapes[1].as_list(), [None, 5])\n self.assertListEqual(\n model.compute_mask([a, b], [None, None]), [None, None])\n\n # we don't check names of first 2 layers (inputs) because\n # ordering of same-level layers is not fixed\n self.assertListEqual([l.name for l in model.layers][2:],\n ['dense_1', 'merge', 'dense_2', 'dense_3'])\n self.assertListEqual([l.name for l in model._input_layers],\n ['input_a', 'input_b'])\n self.assertListEqual([l.name for l in model._output_layers],\n ['dense_2', 'dense_3'])\n\n # actually run model\n fn = backend.function(model.inputs, model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])\n\n # test get_source_inputs\n self._assertAllIs(layer_utils.get_source_inputs(c), [a, b])\n\n # serialization / deserialization\n json_config = model.to_json()\n recreated_model = models.model_from_json(json_config)\n recreated_model.compile('rmsprop', 'mse')\n\n self.assertListEqual([l.name for l in recreated_model.layers][2:],\n ['dense_1', 'merge', 'dense_2', 'dense_3'])\n self.assertListEqual([l.name for l in recreated_model._input_layers],\n ['input_a', 'input_b'])\n self.assertListEqual([l.name for l in recreated_model._output_layers],\n ['dense_2', 'dense_3'])\n\n fn = backend.function(recreated_model.inputs, recreated_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])\n\n def test_multi_output_layer_output_names(self):\n inp = layers.Input(name='inp', shape=(None,), dtype=tf.float32)\n\n class _MultiOutput(layers.Layer):\n\n def call(self, x):\n return x + 1., x + 2.\n\n out = _MultiOutput(name='out')(inp)\n model = training_lib.Model(inp, out)\n self.assertEqual(['out', 'out_1'], model.output_names)\n self.assertAllClose([2., 3.], model(1.))\n\n def test_recursion(self):\n with tf.Graph().as_default(), self.cached_session():\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = layers.concatenate([a_2, b_2], name='merge')\n c = layers.Dense(64, name='dense_2')(merged)\n d = layers.Dense(5, name='dense_3')(c)\n\n model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n e = layers.Input(shape=(32,), name='input_e')\n f = layers.Input(shape=(32,), name='input_f')\n self.assertEqual(len(model.inputs), 2)\n g, h = model([e, f])\n self.assertEqual(len(model.inputs), 2)\n self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')\n\n self.assertListEqual(g.shape.as_list(), c.shape.as_list())\n self.assertListEqual(h.shape.as_list(), d.shape.as_list())\n\n # test separate manipulation of different layer outputs\n i = layers.Dense(7, name='dense_4')(h)\n\n final_model = training_lib.Model(\n inputs=[e, f], outputs=[i, g], name='final')\n self.assertEqual(len(final_model.inputs), 2)\n self.assertEqual(len(final_model.outputs), 2)\n self.assertEqual(len(final_model.layers), 4)\n\n # we don't check names of first 2 layers (inputs) because\n # ordering of same-level layers is not fixed\n self.assertListEqual([layer.name for layer in final_model.layers][2:],\n ['model', 'dense_4'])\n self.assertListEqual(\n model.compute_mask([e, f], [None, None]), [None, None])\n self.assertListEqual(\n final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),\n (10, 64)])\n\n # run recursive model\n fn = backend.function(final_model.inputs, final_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])\n\n # test serialization\n model_config = final_model.get_config()\n recreated_model = models.Model.from_config(model_config)\n\n fn = backend.function(recreated_model.inputs, recreated_model.outputs)\n input_a_np = np.random.random((10, 32))\n input_b_np = np.random.random((10, 32))\n fn_outputs = fn([input_a_np, input_b_np])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_multi_input_multi_output_recursion(self):\n with self.cached_session():\n # test multi-input multi-output\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = layers.concatenate([a_2, b_2], name='merge')\n c = layers.Dense(64, name='dense_2')(merged)\n d = layers.Dense(5, name='dense_3')(c)\n\n model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n _, n = model([j, k])\n\n o = layers.Input(shape=(32,), name='input_o')\n p = layers.Input(shape=(32,), name='input_p')\n q, _ = model([o, p])\n\n self.assertListEqual(n.shape.as_list(), [None, 5])\n self.assertListEqual(q.shape.as_list(), [None, 64])\n s = layers.concatenate([n, q], name='merge_nq')\n self.assertListEqual(s.shape.as_list(), [None, 64 + 5])\n\n # test with single output as 1-elem list\n multi_io_model = training_lib.Model([j, k, o, p], [s])\n\n fn = backend.function(multi_io_model.inputs, multi_io_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n # test with single output as tensor\n multi_io_model = training_lib.Model([j, k, o, p], s)\n\n fn = backend.function(multi_io_model.inputs, multi_io_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n # note that the output of the function will still be a 1-elem list\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n # test serialization\n model_config = multi_io_model.get_config()\n recreated_model = models.Model.from_config(model_config)\n\n fn = backend.function(recreated_model.inputs, recreated_model.outputs)\n fn_outputs = fn([\n np.random.random((10, 32)), np.random.random((10, 32)),\n np.random.random((10, 32)), np.random.random((10, 32))\n ])\n # note that the output of the function will still be a 1-elem list\n self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])\n\n config = model.get_config()\n models.Model.from_config(config)\n\n model.summary()\n json_str = model.to_json()\n models.model_from_json(json_str)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_invalid_graphs(self):\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = layers.concatenate([a_2, b_2], name='merge')\n c = layers.Dense(64, name='dense_2')(merged)\n d = layers.Dense(5, name='dense_3')(c)\n\n model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n # disconnected graph\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n training_lib.Model([j], [m, n])\n\n # redundant outputs\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n\n training_lib.Model([j, k], [m, n, n])\n\n # redundant inputs\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n training_lib.Model([j, k, j], [m, n])\n\n # i have not idea what I'm doing: garbage as inputs/outputs\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n m, n = model([j, k])\n with self.assertRaises(Exception):\n training_lib.Model([j, k], [m, n, 0])\n\n def test_raw_tf_compatibility(self):\n with tf.Graph().as_default():\n # test calling layers/models on TF tensors\n a = layers.Input(shape=(32,), name='input_a')\n b = layers.Input(shape=(32,), name='input_b')\n\n dense = layers.Dense(16, name='dense_1')\n a_2 = dense(a)\n b_2 = dense(b)\n merged = layers.concatenate([a_2, b_2], name='merge')\n c = layers.Dense(64, name='dense_2')(merged)\n d = layers.Dense(5, name='dense_3')(c)\n\n model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')\n\n j = layers.Input(shape=(32,), name='input_j')\n k = layers.Input(shape=(32,), name='input_k')\n self.assertEqual(len(model.inputs), 2)\n m, n = model([j, k])\n self.assertEqual(len(model.inputs), 2)\n tf_model = training_lib.Model([j, k], [m, n])\n\n j_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 32))\n k_tf = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 32))\n m_tf, n_tf = tf_model([j_tf, k_tf])\n self.assertListEqual(m_tf.shape.as_list(), [None, 64])\n self.assertListEqual(n_tf.shape.as_list(), [None, 5])\n\n # test merge\n layers.concatenate([j_tf, k_tf], axis=1)\n layers.add([j_tf, k_tf])\n\n # test tensor input\n x = tf.compat.v1.placeholder(shape=(None, 2), dtype=tf.float32)\n layers.InputLayer(input_tensor=x)\n\n x = layers.Input(tensor=x)\n layers.Dense(2)(x)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_basic_masking(self):\n a = layers.Input(shape=(10, 32), name='input_a')\n b = layers.Masking()(a)\n model = training_lib.Model(a, b)\n self.assertEqual(model.output_mask.shape.as_list(), [None, 10])\n\n def testMaskingSingleInput(self):\n\n class MaskedLayer(layers.Layer):\n\n def call(self, inputs, mask=None):\n if mask is not None:\n return inputs * mask\n return inputs\n\n def compute_mask(self, inputs, mask=None):\n return tf.ones_like(inputs)\n\n if tf.executing_eagerly():\n a = tf.constant([2] * 32)\n mask = tf.constant([0, 1] * 16)\n a._keras_mask = mask\n b = MaskedLayer()(a)\n self.assertTrue(hasattr(b, '_keras_mask'))\n self.assertAllEqual(\n self.evaluate(tf.ones_like(mask)),\n self.evaluate(getattr(b, '_keras_mask')))\n self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))\n else:\n x = input_layer_lib.Input(shape=(32,))\n y = MaskedLayer()(x) # pylint: disable=not-callable\n network = functional.Functional(x, y)\n\n # test callability on Input\n x_2 = input_layer_lib.Input(shape=(32,))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 32])\n\n # test callability on regular tensor\n x_2 = tf.compat.v1.placeholder(dtype='float32', shape=(None, 32))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 32])\n\n def test_activity_regularization_with_model_composition(self):\n\n def reg(x):\n return tf.reduce_sum(x)\n\n net_a_input = input_layer_lib.Input((2,))\n net_a = net_a_input\n net_a = layers.Dense(\n 2, kernel_initializer='ones', use_bias=False, activity_regularizer=reg)(\n net_a)\n model_a = training_lib.Model([net_a_input], [net_a])\n\n net_b_input = input_layer_lib.Input((2,))\n net_b = model_a(net_b_input)\n model_b = training_lib.Model([net_b_input], [net_b])\n\n model_b.compile(optimizer='sgd', loss=None)\n x = np.ones((1, 2))\n loss = model_b.evaluate(x)\n self.assertEqual(loss, 4.)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_layer_sharing_at_heterogenous_depth(self):\n x_val = np.random.random((10, 5))\n\n x = input_layer_lib.Input(shape=(5,))\n a = layers.Dense(5, name='A')\n b = layers.Dense(5, name='B')\n output = a(b(a(b(x))))\n m = training_lib.Model(x, output)\n m.run_eagerly = test_utils.should_run_eagerly()\n\n output_val = m.predict(x_val)\n\n config = m.get_config()\n weights = m.get_weights()\n\n m2 = models.Model.from_config(config)\n m2.set_weights(weights)\n\n output_val_2 = m2.predict(x_val)\n self.assertAllClose(output_val, output_val_2, atol=1e-6)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_layer_sharing_at_heterogenous_depth_with_concat(self):\n input_shape = (16, 9, 3)\n input_layer = input_layer_lib.Input(shape=input_shape)\n\n a = layers.Dense(3, name='dense_A')\n b = layers.Dense(3, name='dense_B')\n c = layers.Dense(3, name='dense_C')\n\n x1 = b(a(input_layer))\n x2 = a(c(input_layer))\n output = layers.concatenate([x1, x2])\n\n m = training_lib.Model(inputs=input_layer, outputs=output)\n m.run_eagerly = test_utils.should_run_eagerly()\n\n x_val = np.random.random((10, 16, 9, 3))\n output_val = m.predict(x_val)\n\n config = m.get_config()\n weights = m.get_weights()\n\n m2 = models.Model.from_config(config)\n m2.set_weights(weights)\n\n output_val_2 = m2.predict(x_val)\n self.assertAllClose(output_val, output_val_2, atol=1e-6)\n\n def test_layer_sharing_maintains_node_order(self):\n # See https://github.com/keras-team/keras/issues/14838.\n inp = input_layer_lib.Input(shape=[5], name='main_input')\n\n zeros = layers.Lambda(tf.zeros_like, name='generate_zeros')(inp)\n ones = layers.Lambda(tf.ones_like, name='generate_ones')(inp)\n\n shared_layer = layers.Layer(name='shared')\n\n ones_result = shared_layer(ones)\n zeros_result = shared_layer(zeros)\n zeros_result = layers.Layer(name='blank')(zeros_result)\n\n m = training_lib.Model(\n inputs=[inp], outputs=[zeros_result, ones_result])\n m2 = models.Model.from_config(m.get_config())\n self.assertAllClose(\n m2.predict_on_batch(tf.zeros([1, 5])),\n m.predict_on_batch(tf.zeros([1, 5])))\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_explicit_training_argument(self):\n a = layers.Input(shape=(2,))\n b = layers.Dropout(0.5)(a)\n base_model = training_lib.Model(a, b)\n\n a = layers.Input(shape=(2,))\n b = base_model(a, training=False)\n model = training_lib.Model(a, b)\n\n x = np.ones((100, 2))\n y = np.ones((100, 2))\n model.compile(\n optimizer='sgd',\n loss='mse',\n run_eagerly=test_utils.should_run_eagerly())\n loss = model.train_on_batch(x, y)\n self.assertEqual(loss, 0) # In inference mode, output is equal to input.\n\n a = layers.Input(shape=(2,))\n b = base_model(a, training=True)\n model = training_lib.Model(a, b)\n preds = model.predict(x)\n self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_mask_derived_from_keras_layer(self):\n inputs = input_layer_lib.Input((5, 10))\n mask = input_layer_lib.Input((5,))\n outputs = layers.RNN(layers.LSTMCell(100))(inputs, mask=mask)\n model = training_lib.Model([inputs, mask], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 5, 10)), np.zeros((10, 5))],\n y=np.zeros((10, 100)),\n batch_size=2)\n # All data is masked, returned values are 0's.\n self.assertEqual(history.history['loss'][0], 0.0)\n history = model.fit(\n x=[np.ones((10, 5, 10)), np.ones((10, 5))],\n y=np.zeros((10, 100)),\n batch_size=2)\n # Data is not masked, returned values are random.\n self.assertGreater(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(model.get_config())\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 5, 10)), np.zeros((10, 5))],\n y=np.zeros((10, 100)),\n batch_size=2)\n # All data is masked, returned values are 0's.\n self.assertEqual(history.history['loss'][0], 0.0)\n history = model.fit(\n x=[np.ones((10, 5, 10)), np.ones((10, 5))],\n y=np.zeros((10, 100)),\n batch_size=2)\n # Data is not masked, returned values are random.\n self.assertGreater(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_call_arg_derived_from_keras_layer(self):\n\n class MyAdd(layers.Layer):\n\n def call(self, x1, x2):\n return x1 + x2\n\n input1 = input_layer_lib.Input(10)\n input2 = input_layer_lib.Input(10)\n outputs = MyAdd()(input1, input2)\n model = training_lib.Model([input1, input2], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n # Check serialization.\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'MyAdd': MyAdd})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(\n test_combinations.keras_mode_combinations(mode='eager'),)\n def test_only_some_in_first_arg_derived_from_keras_layer_keras_tensors(self):\n # This functionality is unsupported in v1 graphs\n\n class MyAddAll(layers.Layer):\n\n def call(self, inputs):\n x = inputs[0]\n for inp in inputs[1:]:\n if inp is not None:\n x = x + inp\n return x\n\n input1 = input_layer_lib.Input(10)\n input2 = input_layer_lib.Input(10)\n layer = MyAddAll()\n outputs = layer([0.0, input1, None, input2, None])\n model = training_lib.Model([input1, input2], outputs)\n self.assertIn(layer, model.layers)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n # Check serialization.\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'MyAddAll': MyAddAll})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_mode_combinations(),\n test_combinations.combine(share_already_used_layer=[True, False])))\n def test_call_kwarg_derived_from_keras_layer(self, share_already_used_layer):\n\n class MaybeAdd(layers.Layer):\n\n def call(self, x1, x2=None):\n if x2 is not None:\n return x1 + x2\n return x1\n\n class IdentityLayer(layers.Layer):\n\n def call(self, x):\n return x\n\n input1 = input_layer_lib.Input(10)\n input2 = input_layer_lib.Input(10)\n identity_layer = IdentityLayer()\n\n if share_already_used_layer:\n # We have had model serialization/deserialization break in the past:\n # when a layer was previously used to construct other functional models\n # and had a non-empty list of inbound nodes before being used to define\n # the model being serialized/deserialized.\n # (The serialization/deserialization was not correctly adjusting\n # the node_index serialization/deserialization).\n # So, we explicitly test this case.\n training_lib.Model([input1], identity_layer(input1))\n\n outputs = MaybeAdd()(input1, x2=identity_layer(input2))\n model = training_lib.Model([input1, input2], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(\n model.get_config(),\n custom_objects={\n 'MaybeAdd': MaybeAdd,\n 'IdentityLayer': IdentityLayer\n })\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_call_kwarg_dtype_serialization(self):\n\n class Double(layers.Layer):\n\n def call(self, x1, dtype=None):\n return tf.cast(x1 + x1, dtype=dtype)\n\n input1 = input_layer_lib.Input(10)\n outputs = Double()(input1, dtype=tf.float16)\n model = training_lib.Model([input1], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10))],\n y=6 * np.ones((10, 10)),\n batch_size=2)\n # Check that input was correctly doubled.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n # Check the output dtype\n self.assertEqual(model(tf.ones((3, 10))).dtype, tf.float16)\n\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'Double': Double})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10))],\n y=6 * np.ones((10, 10)),\n batch_size=2)\n # Check that input was correctly doubled.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n # Check the output dtype\n self.assertEqual(model(tf.ones((3, 10))).dtype, tf.float16)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_call_kwarg_nonserializable(self):\n\n class Double(layers.Layer):\n\n def call(self, x1, kwarg=None):\n return x1 + x1\n\n class NonSerializable:\n\n def __init__(self, foo=None):\n self.foo = foo\n\n input1 = input_layer_lib.Input(10)\n outputs = Double()(input1, kwarg=NonSerializable())\n model = training_lib.Model([input1], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[3 * np.ones((10, 10))],\n y=6 * np.ones((10, 10)),\n batch_size=2)\n # Check that input was correctly doubled.\n self.assertEqual(history.history['loss'][0], 0.0)\n with self.assertRaisesRegex(\n TypeError, 'Layer double was passed non-JSON-serializable arguments.'):\n model.get_config()\n\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_mode_combinations(),\n test_combinations.combine(share_already_used_layer=[True, False])))\n def test_call_kwarg_derived_from_keras_layer_and_first_arg_is_constant(\n self, share_already_used_layer):\n\n class IdentityLayer(layers.Layer):\n\n def call(self, x):\n return x\n\n class MaybeAdd(layers.Layer):\n\n def call(self, x1, x2=None):\n if x2 is not None:\n return x1 + x2\n return x1\n\n input2 = input_layer_lib.Input(10)\n identity_layer = IdentityLayer()\n if share_already_used_layer:\n # We have had model serialization/deserialization break in the past:\n # when a layer was previously used to construct other functional models\n # and had a non-empty list of inbound nodes before being used to define\n # the model being serialized/deserialized.\n # (The serialization/deserialization was not correctly adjusting\n # the node_index serialization/deserialization).\n # So, we explicitly test this case.\n training_lib.Model([input2], identity_layer(input2))\n\n outputs = MaybeAdd()(3., x2=identity_layer(input2))\n model = training_lib.Model([input2], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=7 * np.ones((10, 10)),\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(\n model.get_config(),\n custom_objects={\n 'MaybeAdd': MaybeAdd,\n 'IdentityLayer': IdentityLayer\n })\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=7 * np.ones((10, 10)),\n y=10 * np.ones((10, 10)),\n batch_size=2)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_dont_cast_composite_unless_necessary(self):\n if not tf.executing_eagerly():\n return # Creating Keras inputs from a type_spec only supported in eager.\n\n # TODO(edloper): Change this to tf.experimental.ExtensionTyep once\n # it's been released.\n class MyType(extension_type.ExtensionType):\n # TODO(edloper) Remove _shape and _dtype once Keras has been switched\n # to use .shape and .dtype instead.\n value: tf.Tensor\n _shape = property(lambda self: self.value.shape)\n shape = property(lambda self: self.value.shape)\n _dtype = property(lambda self: self.value.dtype)\n dtype = property(lambda self: self.value.dtype)\n\n class Spec:\n _shape = property(lambda self: self.value.shape)\n shape = property(lambda self: self.value.shape)\n _dtype = property(lambda self: self.value.dtype)\n dtype = property(lambda self: self.value.dtype)\n\n my_spec = MyType.Spec(tf.TensorSpec([5], tf.float32))\n input1 = input_layer_lib.Input(type_spec=my_spec)\n model = training_lib.Model([input1], input1)\n model.compile(run_eagerly=test_utils.should_run_eagerly())\n model(MyType([1., 2., 3., 4., 5.])) # Does not require cast.\n with self.assertRaises((ValueError, TypeError)):\n model(MyType([1, 2, 3, 4, 5]))\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_composite_call_kwarg_derived_from_keras_layer(self):\n\n # Create a test layer that accepts composite tensor inputs.\n class MaybeAdd(layers.Layer):\n\n def call(self, x1, x2=None):\n # We need to convert this to a tensor for loss calculations -\n # losses don't play nicely with ragged tensors yet.\n if x2 is not None:\n return (x1 + x2).to_tensor(default_value=0)\n return x1.to_tensor(default_value=0)\n\n input1 = input_layer_lib.Input((None,), ragged=True)\n input2 = input_layer_lib.Input((None,), ragged=True)\n outputs = MaybeAdd()(input1, x2=input2)\n model = training_lib.Model([input1, input2], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n input_data = [\n tf.ragged.constant([[3.0, 3.0], [3.0, 3.0], [3.0]]),\n tf.ragged.constant([[7.0, 7.0], [7.0, 7.0], [7.0]])\n ]\n expected_data = np.array([[10.0, 10.0], [10.0, 10.0], [10.0, 0.0]])\n\n history = model.fit(x=input_data, y=expected_data)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'MaybeAdd': MaybeAdd})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(x=input_data, y=expected_data)\n # Check that second input was correctly added to first.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(\n test_combinations.keras_mode_combinations(mode='eager'))\n def test_call_some_not_all_nested_in_first_arg_derived_from_keras_layer(self):\n # This functionality is unsupported in v1 graphs\n\n class AddAll(layers.Layer):\n\n def call(self, x1_x2, x3):\n x1, x2 = x1_x2\n out = x1 + x2\n if x3 is not None:\n for t in x3.values():\n out += t\n return out\n\n input1 = input_layer_lib.Input(10)\n input2 = input_layer_lib.Input(10)\n input3 = input_layer_lib.Input(10)\n\n layer = AddAll()\n outputs = layer(\n [input1, 4 * tf.ones((1, 10))],\n x3={\n 'a': input2,\n 'b': input3,\n 'c': 5 * tf.ones((1, 10))\n })\n model = training_lib.Model([input1, input2, input3], outputs)\n self.assertIn(layer, model.layers)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],\n y=15 * np.ones((10, 10)),\n batch_size=2)\n # Check that all inputs were correctly added.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'AddAll': AddAll})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],\n y=15 * np.ones((10, 10)),\n batch_size=2)\n # Check that all inputs were correctly added.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_call_nested_arg_derived_from_keras_layer(self):\n\n class AddAll(layers.Layer):\n\n def call(self, x1, x2, x3=None):\n out = x1 + x2\n if x3 is not None:\n for t in x3.values():\n out += t\n return out\n\n input1 = input_layer_lib.Input(10)\n input2 = input_layer_lib.Input(10)\n input3 = input_layer_lib.Input(10)\n outputs = AddAll()(\n input1,\n 4 * tf.ones((1, 10)),\n x3={\n 'a': input2,\n 'b': input3,\n 'c': 5 * tf.ones((1, 10))\n })\n model = training_lib.Model([input1, input2, input3], outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],\n y=15 * np.ones((10, 10)),\n batch_size=2)\n # Check that all inputs were correctly added.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n model = training_lib.Model.from_config(\n model.get_config(), custom_objects={'AddAll': AddAll})\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(\n x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],\n y=15 * np.ones((10, 10)),\n batch_size=2)\n # Check that all inputs were correctly added.\n self.assertEqual(history.history['loss'][0], 0.0)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_multi_output_model_with_none_masking(self):\n def func(x):\n return [x * 0.2, x * 0.3]\n\n def output_shape(input_shape):\n return [input_shape, input_shape]\n\n i = layers.Input(shape=(3, 2, 1))\n o = layers.Lambda(function=func, output_shape=output_shape)(i)\n\n self.assertEqual(backend.int_shape(o[0]), (None, 3, 2, 1))\n self.assertEqual(backend.int_shape(o[1]), (None, 3, 2, 1))\n\n o = layers.add(o)\n model = training_lib.Model(i, o)\n model.run_eagerly = test_utils.should_run_eagerly()\n\n i2 = layers.Input(shape=(3, 2, 1))\n o2 = model(i2)\n model2 = training_lib.Model(i2, o2)\n model2.run_eagerly = test_utils.should_run_eagerly()\n\n x = np.random.random((4, 3, 2, 1))\n out = model2.predict(x)\n assert out.shape == (4, 3, 2, 1)\n self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_constant_initializer_with_numpy(self):\n initializer = tf.compat.v1.constant_initializer(np.ones((3, 2)))\n model = sequential.Sequential()\n model.add(layers.Dense(2, input_shape=(3,), kernel_initializer=initializer))\n model.add(layers.Dense(3))\n model.compile(\n loss='mse',\n optimizer='sgd',\n metrics=['acc'],\n run_eagerly=test_utils.should_run_eagerly())\n\n json_str = model.to_json()\n models.model_from_json(json_str)\n\n def test_subclassed_error_if_init_not_called(self):\n\n class MyNetwork(training_lib.Model):\n\n def __init__(self):\n self._foo = [layers.Dense(10), layers.Dense(10)]\n\n with self.assertRaisesRegex(RuntimeError, 'forgot to call'):\n MyNetwork()\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_int_input_shape(self):\n inputs = input_layer_lib.Input(10)\n self.assertEqual([None, 10], inputs.shape.as_list())\n\n inputs_with_batch = input_layer_lib.Input(batch_size=20, shape=5)\n self.assertEqual([20, 5], inputs_with_batch.shape.as_list())\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_model_initialization(self):\n # Functional model\n inputs = input_layer_lib.Input(shape=(32,))\n outputs = layers.Dense(4)(inputs)\n\n with self.assertRaisesRegex(TypeError,\n 'Keyword argument not understood'):\n model = training_lib.Model(\n inputs, outputs, name='m', trainable=False, dtype='int64')\n with self.assertRaisesRegex(TypeError,\n 'Keyword argument not understood'):\n model = training_lib.Model(\n inputs, outputs, name='m', trainable=False, dynamic=False)\n\n model = training_lib.Model(inputs, outputs, name='m', trainable=False)\n self.assertEqual('m', model.name)\n self.assertFalse(model.trainable)\n self.assertFalse(model.dynamic)\n\n class SubclassModel(training_lib.Model):\n pass\n # Subclassed model\n model = SubclassModel(\n name='subclassed', trainable=True, dtype='int64', dynamic=True)\n self.assertEqual('subclassed', model.name)\n self.assertTrue(model.dynamic)\n self.assertTrue(model.trainable)\n w = model.add_weight(\n 'w', [], initializer=tf.compat.v1.constant_initializer(1))\n self.assertEqual(tf.int64, w.dtype)\n\n def test_disconnected_inputs(self):\n input_tensor1 = input_layer_lib.Input(shape=[200], name='a')\n input_tensor2 = input_layer_lib.Input(shape=[10], name='b')\n output_tensor1 = layers.Dense(units=10)(input_tensor1)\n\n net = functional.Functional(\n inputs=[input_tensor1, input_tensor2], outputs=[output_tensor1])\n net2 = functional.Functional.from_config(net.get_config())\n self.assertLen(net2.inputs, 2)\n self.assertEqual('a', net2.layers[0].name)\n self.assertEqual('b', net2.layers[1].name)\n\n @test_combinations.generate(test_combinations.keras_model_type_combinations())\n def test_dependency_tracking(self):\n model = test_utils.get_small_mlp(1, 4, input_dim=3)\n model.trackable = Checkpoint()\n self.assertIn('trackable', model._unconditional_dependency_names)\n self.assertEqual(model.trackable, model._lookup_dependency('trackable'))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_model_construction_in_tf_function(self):\n\n d = {'model': None}\n\n @tf.function\n def fn(x):\n if d['model'] is None:\n # Check that Functional can be built in a `tf.function`.\n inputs = input_layer_lib.Input(10)\n outputs = layers.Dense(1)(inputs)\n model = functional.Functional(inputs, outputs)\n d['model'] = model\n else:\n model = d['model']\n\n return model(x)\n\n x = tf.ones((10, 10))\n y = fn(x)\n self.assertEqual(y.shape.as_list(), [10, 1])\n\n def test_save_spec(self):\n \"\"\"Tests that functional model generates the correct save spec.\"\"\"\n\n class MultiInputModel(training_lib.Model):\n\n def call(self, x, y):\n return x\n\n inp = input_layer_lib.Input(shape=(1,))\n inp2 = input_layer_lib.Input(shape=(1,), batch_size=5, dtype=tf.int32)\n out = MultiInputModel()(inp, inp2)\n m = training_lib.Model(inputs={'x': inp, 'y': inp2}, outputs=out)\n input_spec = m.save_spec(dynamic_batch=False)[0][0]\n self.assertIn('x', input_spec)\n self.assertIn('y', input_spec)\n self.assertAllEqual([None, 1], input_spec['x'].shape.as_list())\n self.assertAllEqual(tf.float32, input_spec['x'].dtype)\n self.assertAllEqual([5, 1], input_spec['y'].shape.as_list())\n self.assertAllEqual(tf.int32, input_spec['y'].dtype)\n\n\nclass DeferredModeTest(test_combinations.TestCase):\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testSimpleNetworkBuilding(self):\n inputs = input_layer_lib.Input(shape=(32,))\n if tf.executing_eagerly():\n self.assertEqual(inputs.dtype.name, 'float32')\n self.assertEqual(inputs.shape.as_list(), [None, 32])\n\n x = layers.Dense(2)(inputs)\n if tf.executing_eagerly():\n self.assertEqual(x.dtype.name, 'float32')\n self.assertEqual(x.shape.as_list(), [None, 2])\n\n outputs = layers.Dense(4)(x)\n network = functional.Functional(inputs, outputs)\n self.assertIsInstance(network, functional.Functional)\n\n if tf.executing_eagerly():\n # It should be possible to call such a network on EagerTensors.\n inputs = tf.constant(\n np.random.random((10, 32)).astype('float32'))\n outputs = network(inputs)\n self.assertEqual(outputs.shape.as_list(), [10, 4])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testMultiIONetworkBuilding(self):\n input_a = input_layer_lib.Input(shape=(32,))\n input_b = input_layer_lib.Input(shape=(16,))\n a = layers.Dense(16)(input_a)\n\n class AddLayer(layers.Layer):\n\n def call(self, inputs):\n return inputs[0] + inputs[1]\n\n c = AddLayer()([a, input_b]) # pylint: disable=not-callable\n c = layers.Dense(2)(c)\n\n network = functional.Functional([input_a, input_b], [a, c])\n if tf.executing_eagerly():\n a_val = tf.constant(\n np.random.random((10, 32)).astype('float32'))\n b_val = tf.constant(\n np.random.random((10, 16)).astype('float32'))\n outputs = network([a_val, b_val])\n self.assertEqual(len(outputs), 2)\n self.assertEqual(outputs[0].shape.as_list(), [10, 16])\n self.assertEqual(outputs[1].shape.as_list(), [10, 2])\n\n\nclass DefaultShapeInferenceBehaviorTest(test_combinations.TestCase):\n\n def _testShapeInference(self, model, input_shape, expected_output_shape):\n input_value = np.random.random(input_shape)\n output_value = model.predict(input_value)\n self.assertEqual(output_value.shape, expected_output_shape)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testSingleInputCase(self):\n\n class LayerWithOneInput(layers.Layer):\n\n def build(self, input_shape):\n self.w = tf.ones(shape=(3, 4))\n\n def call(self, inputs):\n return backend.dot(inputs, self.w)\n\n inputs = input_layer_lib.Input(shape=(3,))\n layer = LayerWithOneInput()\n\n if tf.executing_eagerly():\n self.assertEqual(\n layer.compute_output_shape((None, 3)).as_list(), [None, 4])\n # As a side-effect, compute_output_shape builds the layer.\n self.assertTrue(layer.built)\n # We can still query the layer's compute_output_shape with compatible\n # input shapes.\n self.assertEqual(\n layer.compute_output_shape((6, 3)).as_list(), [6, 4])\n\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n self._testShapeInference(model, (2, 3), (2, 4))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testMultiInputOutputCase(self):\n\n class MultiInputOutputLayer(layers.Layer):\n\n def build(self, input_shape):\n self.w = tf.ones(shape=(3, 4))\n\n def call(self, inputs):\n a = backend.dot(inputs[0], self.w)\n b = a + inputs[1]\n return [a, b]\n\n input_a = input_layer_lib.Input(shape=(3,))\n input_b = input_layer_lib.Input(shape=(4,))\n output_a, output_b = MultiInputOutputLayer()([input_a, input_b])\n model = training_lib.Model([input_a, input_b], [output_a, output_b])\n output_a_val, output_b_val = model.predict(\n [np.random.random((2, 3)), np.random.random((2, 4))])\n self.assertEqual(output_a_val.shape, (2, 4))\n self.assertEqual(output_b_val.shape, (2, 4))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testTrainingArgument(self):\n\n class LayerWithTrainingArg(layers.Layer):\n\n def build(self, input_shape):\n self.w = tf.ones(shape=(3, 4))\n\n def call(self, inputs, training):\n return backend.dot(inputs, self.w)\n\n inputs = input_layer_lib.Input(shape=(3,))\n outputs = LayerWithTrainingArg()(inputs, training=False)\n model = training_lib.Model(inputs, outputs)\n self._testShapeInference(model, (2, 3), (2, 4))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testNoneInShape(self):\n\n class Model(training_lib.Model):\n\n def __init__(self):\n super(Model, self).__init__()\n self.conv1 = layers.Conv2D(8, 3)\n self.pool = layers.GlobalAveragePooling2D()\n self.fc = layers.Dense(3)\n\n def call(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.fc(x)\n return x\n\n model = Model()\n model.build(tf.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = tf.ones((1, 10, 10, 1))\n output = model(sample_input)\n self.assertEqual(output.shape, (1, 3))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testNoneInShapeWithCompoundModel(self):\n\n class BasicBlock(training_lib.Model):\n\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = layers.Conv2D(8, 3)\n self.pool = layers.GlobalAveragePooling2D()\n self.dense = layers.Dense(3)\n\n def call(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.dense(x)\n return x\n\n class CompoundModel(training_lib.Model):\n\n def __init__(self):\n super(CompoundModel, self).__init__()\n self.block = BasicBlock()\n\n def call(self, x):\n x = self.block(x) # pylint: disable=not-callable\n return x\n\n model = CompoundModel()\n model.build(tf.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = tf.ones((1, 10, 10, 1))\n output = model(sample_input) # pylint: disable=not-callable\n self.assertEqual(output.shape, (1, 3))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def testNoneInShapeWithFunctionalAPI(self):\n\n class BasicBlock(training_lib.Model):\n # Inheriting from layers.Layer since we are calling this layer\n # inside a model created using functional API.\n\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = layers.Conv2D(8, 3)\n\n def call(self, x):\n x = self.conv1(x)\n return x\n\n input_layer = layers.Input(shape=(None, None, 1))\n x = BasicBlock()(input_layer)\n x = layers.GlobalAveragePooling2D()(x)\n output_layer = layers.Dense(3)(x)\n\n model = training_lib.Model(inputs=input_layer, outputs=output_layer)\n\n model.build(tf.TensorShape((None, None, None, 1)))\n self.assertTrue(model.built, 'Model should be built')\n self.assertTrue(model.weights,\n 'Model should have its weights created as it '\n 'has been built')\n sample_input = tf.ones((1, 10, 10, 1))\n output = model(sample_input)\n self.assertEqual(output.shape, (1, 3))\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_sequential_as_downstream_of_masking_layer(self):\n inputs = layers.Input(shape=(3, 4))\n x = layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)\n\n s = sequential.Sequential()\n s.add(layers.Dense(5, input_shape=(4,)))\n\n x = layers.TimeDistributed(s)(x)\n model = training_lib.Model(inputs=inputs, outputs=x)\n model.compile(\n optimizer='rmsprop',\n loss='mse',\n run_eagerly=test_utils.should_run_eagerly())\n\n model_input = np.random.randint(\n low=1, high=5, size=(10, 3, 4)).astype('float32')\n for i in range(4):\n model_input[i, i:, :] = 0.\n model.fit(model_input,\n np.random.random((10, 3, 5)), epochs=1, batch_size=6)\n\n if not tf.executing_eagerly():\n # Note: this doesn't work in eager due to DeferredTensor/ops compatibility\n # issue.\n mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]\n mask_outputs += [model.layers[2].compute_mask(\n model.layers[2].input, mask_outputs[-1])]\n func = backend.function([model.input], mask_outputs)\n mask_outputs_val = func([model_input])\n self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))\n self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_external_keras_serialization_compat_input_layers(self):\n inputs = input_layer_lib.Input(shape=(10,))\n outputs = layers.Dense(1)(inputs)\n model = training_lib.Model(inputs, outputs)\n config = model.get_config()\n # Checks that single inputs and outputs are still saved as 1-element lists.\n # Saving as 1-element lists or not is equivalent in TF Keras, but only the\n # 1-element list format is supported in TF.js and keras-team/Keras.\n self.assertLen(config['input_layers'], 1)\n self.assertLen(config['output_layers'], 1)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_external_keras_serialization_compat_inbound_nodes(self):\n # Check single Tensor input.\n inputs = input_layer_lib.Input(shape=(10,), name='in')\n outputs = layers.Dense(1)(inputs)\n model = training_lib.Model(inputs, outputs)\n config = model.get_config()\n self.assertEqual(config['layers'][1]['inbound_nodes'], [[['in', 0, 0, {}]]])\n\n # Check multiple Tensor input.\n inputs1 = input_layer_lib.Input(shape=(10,), name='in1')\n inputs2 = input_layer_lib.Input(shape=(10,), name='in2')\n outputs = layers.Add()([inputs1, inputs2])\n model = training_lib.Model([inputs1, inputs2], outputs)\n config = model.get_config()\n self.assertEqual(config['layers'][2]['inbound_nodes'],\n [[['in1', 0, 0, {}], ['in2', 0, 0, {}]]])\n\n @test_combinations.generate(test_combinations.combine(mode=['eager']))\n def test_dict_inputs_tensors(self):\n # Note that this test is running with v2 eager only, since the v1\n # will behave differently wrt to dict input for training.\n inputs = {\n 'sentence2': input_layer_lib.Input(\n shape=(), name='a', dtype=tf.string),\n 'sentence1': input_layer_lib.Input(\n shape=(), name='b', dtype=tf.string),\n }\n strlen = layers.Lambda(tf.strings.length)\n diff = layers.Subtract()(\n [strlen(inputs['sentence1']), strlen(inputs['sentence2'])])\n diff = tf.cast(diff, tf.float32)\n model = training_lib.Model(inputs, diff)\n\n extra_keys = {\n 'sentence1': tf.constant(['brown fox', 'lazy dog']),\n 'sentence2': tf.constant(['owl', 'cheeky cat']),\n 'label': tf.constant([0, 1]),\n }\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model(extra_keys)\n self.assertIn('ignored by the model', str(w[-1].message))\n\n model.compile('sgd', 'mse')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model.fit(extra_keys, y=tf.constant([0, 1]), steps_per_epoch=1)\n self.assertIn('ignored by the model', str(w[-1].message))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model.evaluate(extra_keys, tf.constant([0, 1]))\n self.assertIn('ignored by the model', str(w[-1].message))\n\n # Make sure the model inputs are sorted with the dict keys.\n self.assertEqual(model.inputs[0]._keras_history.layer.name, 'b')\n self.assertEqual(model.inputs[1]._keras_history.layer.name, 'a')\n\n\nclass GraphUtilsTest(tf.test.TestCase):\n\n def testGetReachableFromInputs(self):\n\n with tf.Graph().as_default(), self.cached_session():\n pl_1 = tf.compat.v1.placeholder(shape=None, dtype='float32')\n pl_2 = tf.compat.v1.placeholder(shape=None, dtype='float32')\n pl_3 = tf.compat.v1.placeholder(shape=None, dtype='float32')\n x_1 = pl_1 + pl_2\n x_2 = pl_2 * 2\n x_3 = pl_3 + 1\n x_4 = x_1 + x_2\n x_5 = x_3 * pl_1\n\n self.assertEqual(\n tf_utils.get_reachable_from_inputs([pl_1]),\n {pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})\n self.assertEqual(\n tf_utils.get_reachable_from_inputs([pl_1, pl_2]),\n {pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})\n self.assertEqual(\n tf_utils.get_reachable_from_inputs([pl_3]),\n {pl_3, x_3, x_5, x_3.op, x_5.op})\n self.assertEqual(\n tf_utils.get_reachable_from_inputs([x_3]), {x_3, x_5, x_5.op})\n\n\nclass NestedNetworkTest(test_combinations.TestCase):\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_nested_inputs_network(self):\n inputs = {\n 'x1': input_layer_lib.Input(shape=(1,)),\n 'x2': input_layer_lib.Input(shape=(1,))\n }\n outputs = layers.Add()([inputs['x1'], inputs['x2']])\n network = functional.Functional(inputs, outputs)\n\n network = functional.Functional.from_config(network.get_config())\n\n result_tensor = network({\n 'x1': tf.ones((1, 1), 'float32'),\n 'x2': tf.ones((1, 1), 'float32')\n })\n result = self.evaluate(result_tensor)\n self.assertAllEqual(result, [[2.]])\n\n # TODO(b/122726584): Investigate why concrete batch is flaky in some builds.\n output_shape = network.compute_output_shape({\n 'x1': (None, 1),\n 'x2': (None, 1)\n })\n self.assertListEqual(output_shape.as_list(), [None, 1])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_nested_outputs_network(self):\n inputs = input_layer_lib.Input(shape=(1,))\n outputs = {\n 'x+x': layers.Add()([inputs, inputs]),\n 'x*x': layers.Multiply()([inputs, inputs])\n }\n\n network = functional.Functional(inputs, outputs)\n\n network = functional.Functional.from_config(network.get_config())\n\n result_tensor = network(tf.ones((1, 1), 'float32'))\n result = self.evaluate(result_tensor)\n self.assertAllEqual(result['x+x'], [[2.]])\n self.assertAllEqual(result['x*x'], [[1.]])\n\n output_shape = network.compute_output_shape((None, 1))\n self.assertListEqual(output_shape['x+x'].as_list(), [None, 1])\n self.assertListEqual(output_shape['x*x'].as_list(), [None, 1])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_nested_network_inside_network(self):\n inner_inputs = {\n 'x1': input_layer_lib.Input(shape=(1,)),\n 'x2': input_layer_lib.Input(shape=(1,))\n }\n inner_outputs = {\n 'x1+x2': layers.Add()([inner_inputs['x1'], inner_inputs['x2']]),\n 'x1*x2': layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']])\n }\n inner_network = functional.Functional(\n inner_inputs, inner_outputs)\n\n inputs = [\n input_layer_lib.Input(shape=(1,)),\n input_layer_lib.Input(shape=(1,))\n ]\n middle = inner_network({'x1': inputs[0], 'x2': inputs[1]})\n outputs = layers.Add()([middle['x1+x2'], middle['x1*x2']])\n network = functional.Functional(inputs, outputs)\n\n network = functional.Functional.from_config(network.get_config())\n\n # Computes: `(x1+x2) + (x1*x2)`\n result_tensor = network(\n [tf.ones((1, 1), 'float32'),\n tf.ones((1, 1), 'float32')])\n result = self.evaluate(result_tensor)\n self.assertAllEqual(result, [[3.]])\n\n output_shape = network.compute_output_shape([(None, 1), (None, 1)])\n self.assertListEqual(output_shape.as_list(), [None, 1])\n\n @test_combinations.generate(test_combinations.combine(mode=['graph']))\n def test_updates_with_direct_call(self):\n inputs = input_layer_lib.Input(shape=(10,))\n x = layers.BatchNormalization()(inputs)\n x = layers.Dense(10)(x)\n model = training_lib.Model(inputs, x)\n\n ph = backend.placeholder(shape=(10, 10))\n model(ph)\n\n self.assertLen(model.updates, 4)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_dict_mapping_input(self):\n\n class ReturnFirst(layers.Layer):\n\n def call(self, inputs):\n b, _ = inputs\n return b\n\n # Checks that inputs are put in same order as the\n # Model was constructed with.\n b = input_layer_lib.Input(shape=(10,), name='b')\n a = input_layer_lib.Input(shape=(10,), name='a')\n outputs = ReturnFirst()([b, a])\n\n b_val = tf.ones((10, 10))\n a_val = tf.zeros((10, 10))\n\n model = training_lib.Model([b, a], outputs)\n res = model({'a': a_val, 'b': b_val})\n self.assertAllClose(self.evaluate(res), self.evaluate(b_val))\n\n reversed_model = training_lib.Model([a, b], outputs)\n res = reversed_model({'a': a_val, 'b': b_val})\n self.assertAllClose(self.evaluate(res), self.evaluate(b_val))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_dict_mapping_single_input(self):\n b = input_layer_lib.Input(shape=(1,), name='b')\n outputs = b * 2\n model = training_lib.Model(b, outputs)\n\n b_val = tf.ones((1, 1))\n extra_val = tf.ones((1, 10))\n\n inputs = {'a': extra_val, 'b': b_val}\n res = model(inputs)\n\n # Check that 'b' was used and 'a' was ignored.\n self.assertEqual(res.shape.as_list(), [1, 1])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_nested_dict_mapping(self):\n a = input_layer_lib.Input(shape=(1,), dtype='int32', name='a')\n b = input_layer_lib.Input(shape=(1,), dtype='int32', name='b')\n c = input_layer_lib.Input(shape=(1,), dtype='int32', name='c')\n d = input_layer_lib.Input(shape=(1,), dtype='int32', name='d')\n inputs = {'a': (a, b), 'c': (c, d)}\n outputs = 1000 * a + 100 * b + 10 * c + d\n model = training_lib.Model(inputs, outputs)\n\n a_val = tf.ones((1, 1), dtype='int32')\n b_val = 2 * tf.ones((1, 1), dtype='int32')\n c_val = 3 * tf.ones((1, 1), dtype='int32')\n d_val = 4 * tf.ones((1, 1), dtype='int32')\n\n inputs_val = {'a': (a_val, b_val), 'c': (c_val, d_val)}\n res = model(inputs_val)\n\n # Check that inputs were flattened in the correct order.\n self.assertFalse(model._enable_dict_to_input_mapping)\n self.assertEqual(self.evaluate(res), [1234])\n\n\n@test_combinations.generate(test_combinations.keras_mode_combinations())\nclass AddLossTest(test_combinations.TestCase):\n\n def test_add_loss_outside_call_only_loss(self):\n inputs = input_layer_lib.Input((10,))\n mid = layers.Dense(10)(inputs)\n outputs = layers.Dense(1)(mid)\n model = training_lib.Model(inputs, outputs)\n model.add_loss(tf.reduce_mean(outputs))\n self.assertLen(model.losses, 1)\n\n initial_weights = model.get_weights()\n\n x = np.ones((10, 10))\n model.compile(\n 'sgd',\n run_eagerly=test_utils.should_run_eagerly())\n model.fit(x, batch_size=2, epochs=1)\n\n model2 = model.from_config(model.get_config())\n model2.compile(\n 'sgd',\n run_eagerly=test_utils.should_run_eagerly())\n model2.set_weights(initial_weights)\n model2.fit(x, batch_size=2, epochs=1)\n\n # The TFOpLayer and the AddLoss layer are serialized.\n self.assertLen(model2.layers, 5)\n self.assertAllClose(model.get_weights(), model2.get_weights())\n\n def test_add_loss_outside_call_multiple_losses(self):\n inputs = input_layer_lib.Input((10,))\n x1 = layers.Dense(10)(inputs)\n x2 = layers.Dense(10)(x1)\n outputs = layers.Dense(1)(x2)\n model = training_lib.Model(inputs, outputs)\n model.add_loss(tf.reduce_sum(x1 * x2))\n model.add_loss(tf.reduce_mean(outputs))\n self.assertLen(model.losses, 2)\n\n initial_weights = model.get_weights()\n\n x, y = np.ones((10, 10)), np.ones((10, 1))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n model.fit(x, y, batch_size=2, epochs=1)\n\n model2 = model.from_config(model.get_config())\n model2.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n model2.set_weights(initial_weights)\n model2.fit(x, y, batch_size=2, epochs=1)\n\n self.assertAllClose(model.get_weights(), model2.get_weights())\n\n def test_add_loss_crossentropy_backtracking(self):\n inputs = input_layer_lib.Input((2,))\n labels = input_layer_lib.Input((1,))\n outputs = layers.Dense(1, activation='sigmoid')(inputs)\n model = functional.Functional([inputs, labels], outputs)\n model.add_loss(losses.binary_crossentropy(labels, outputs))\n model.compile('adam')\n x = np.random.random((2, 2))\n y = np.random.random((2, 1))\n model.fit([x, y])\n\n inputs = input_layer_lib.Input((2,))\n labels = input_layer_lib.Input((2,))\n outputs = layers.Dense(2, activation='softmax')(inputs)\n model = functional.Functional([inputs, labels], outputs)\n model.add_loss(losses.categorical_crossentropy(labels, outputs))\n model.compile('adam')\n x = np.random.random((2, 2))\n y = np.random.random((2, 2))\n model.fit([x, y])\n\n inputs = input_layer_lib.Input((2,))\n labels = input_layer_lib.Input((1,), dtype='int32')\n outputs = layers.Dense(2, activation='softmax')(inputs)\n model = functional.Functional([inputs, labels], outputs)\n model.add_loss(losses.sparse_categorical_crossentropy(labels, outputs))\n model.compile('adam')\n x = np.random.random((2, 2))\n y = np.random.randint(0, 2, size=(2, 1))\n model.fit([x, y])\n\n\n@test_combinations.generate(test_combinations.keras_mode_combinations())\nclass WeightAccessTest(test_combinations.TestCase):\n\n def test_functional_model(self):\n inputs = input_layer_lib.Input((10,))\n x1 = layers.Dense(10)(inputs)\n x2 = layers.Dense(10)(x1)\n outputs = layers.Dense(1)(x2)\n model = training_lib.Model(inputs, outputs)\n\n self.assertEqual(len(model.weights), 6)\n\n def test_sequential_model_with_input_shape(self):\n x1 = layers.Dense(10, input_shape=(10,))\n x2 = layers.Dense(10)\n x3 = layers.Dense(1)\n model = sequential.Sequential([x1, x2, x3])\n\n self.assertEqual(len(model.weights), 6)\n\n def test_sequential_model_without_input_shape(self):\n x1 = layers.Dense(10)\n x2 = layers.Dense(10)\n x3 = layers.Dense(1)\n model = sequential.Sequential([x1, x2, x3])\n\n with self.assertRaisesRegex(\n ValueError, 'Weights for model .* have not yet been created'):\n _ = model.weights\n\n def test_subclass_model_with_build_method(self):\n\n class SubclassModel(models.Model):\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=input_shape[-1], initializer='ones')\n\n def call(self, inputs):\n return inputs * self.w\n\n model = SubclassModel()\n\n with self.assertRaisesRegex(\n ValueError, 'Weights for model .* have not yet been created'):\n _ = model.weights\n\n model(input_layer_lib.Input((10,)))\n self.assertEqual(len(model.weights), 1)\n\n def test_subclass_model_without_build_method(self):\n\n class SubclassModel(models.Model):\n\n def __init__(self):\n super(SubclassModel, self).__init__()\n self.w = self.add_weight(shape=(), initializer='ones')\n\n def call(self, inputs):\n return inputs * self.w\n\n model = SubclassModel()\n self.assertEqual(len(model.weights), 1)\n\n\n@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))\nclass DTypeTest(test_combinations.TestCase):\n\n @test_utils.enable_v2_dtype_behavior\n def test_graph_network_dtype(self):\n inputs = input_layer_lib.Input((10,))\n outputs = layers.Dense(10)(inputs)\n network = functional.Functional(inputs, outputs)\n self.assertEqual(network.dtype, 'float32')\n\n @test_utils.enable_v2_dtype_behavior\n def test_subclassed_network_dtype(self):\n\n class IdentityNetwork(training_lib.Model):\n\n def call(self, inputs):\n return inputs\n\n network = IdentityNetwork()\n self.assertEqual(network.dtype, 'float32')\n self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float32')\n\n network = IdentityNetwork(dtype='float16')\n self.assertEqual(network.dtype, 'float16')\n self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float16')\n\n network = IdentityNetwork(autocast=False)\n self.assertEqual(network.dtype, 'float32')\n self.assertEqual(network(tf.constant(1, 'float64')).dtype, 'float64')\n\n\nclass AttrTrackingLayer(base_layer.Layer):\n \"\"\"Count how many times `dynamic` and `stateful` are called.\n\n These counts are used to test that the attribute cache behaves as expected.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.stateful_count = 0\n self.dynamic_count = 0\n super(AttrTrackingLayer, self).__init__(*args, **kwargs)\n\n @base_layer.Layer.stateful.getter\n def stateful(self):\n self.stateful_count += 1\n return super(AttrTrackingLayer, self).stateful\n\n @property\n def dynamic(self):\n self.dynamic_count += 1\n return super(AttrTrackingLayer, self).dynamic\n\n\n@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))\nclass CacheCorrectnessTest(test_combinations.TestCase):\n\n def layer_and_network_test(self):\n # Top level layer\n network = functional.Functional()\n\n layer_0 = AttrTrackingLayer()\n\n sub_network = functional.Functional()\n layer_1 = AttrTrackingLayer(dynamic=True)\n layer_2 = AttrTrackingLayer()\n sub_network.sub_layers = [layer_1, layer_2]\n\n network.sub_layer = layer_0\n\n for _ in range(2):\n self.assertEqual(network.dynamic, False)\n self.assertEqual(network.stateful, False)\n\n # The second pass should be a cache hit.\n self.assertEqual(layer_0.dynamic_count, 1)\n self.assertEqual(layer_0.stateful_count, 1)\n\n # Mutations of the sub-layer should force recalculation of the network's\n # stateful attribute. (mutations bubble up.)\n layer_0.stateful = True\n self.assertEqual(network.stateful, True)\n self.assertEqual(layer_0.stateful_count, 2)\n\n layer_0.stateful = False\n self.assertEqual(network.stateful, False)\n self.assertEqual(layer_0.stateful_count, 3)\n\n # But changing stateful should not affect dynamic.\n self.assertEqual(network.dynamic, False)\n self.assertEqual(layer_0.dynamic_count, 1)\n\n network.sub_network = sub_network\n\n # Adding to the topology should invalidate the cache and reflect in the top\n # level network.\n self.assertEqual(network.dynamic, True)\n self.assertEqual(layer_0.dynamic_count, 2)\n self.assertEqual(layer_1.dynamic_count, 1)\n\n # Still dynamic, but we need to recompute.\n sub_network.sub_layers.pop()\n self.assertEqual(network.dynamic, True)\n self.assertEqual(layer_0.dynamic_count, 3)\n self.assertEqual(layer_1.dynamic_count, 2)\n\n # Now that we've removed the dynamic layer deep in the layer hierarchy, we\n # need to make sure that that bubbles up through all the levels.\n sub_network.sub_layers.pop()\n self.assertEqual(network.dynamic, False)\n self.assertEqual(layer_0.dynamic_count, 4)\n self.assertEqual(layer_1.dynamic_count, 2)\n\n # Now check with a tracked dict.\n sub_network.sub_layers = {\n \"layer_1\": layer_1,\n \"layer_2\": layer_2,\n }\n\n self.assertEqual(network.dynamic, True)\n self.assertEqual(layer_0.dynamic_count, 5)\n self.assertEqual(layer_1.dynamic_count, 3)\n\n # In-place assignment should still invalidate the cache.\n sub_network.sub_layers[\"layer_1\"] = layer_1\n self.assertEqual(network.dynamic, True)\n self.assertEqual(layer_0.dynamic_count, 6)\n self.assertEqual(layer_1.dynamic_count, 4)\n\n sub_network.sub_layers[\"layer_1\"] = None\n for _ in range(2):\n self.assertEqual(network.dynamic, False)\n self.assertEqual(layer_0.dynamic_count, 7)\n self.assertEqual(layer_1.dynamic_count, 4)\n\n layer_3 = AttrTrackingLayer()\n layer_3.stateful = True\n\n sub_network.sub_layers = None\n self.assertEqual(network.dynamic, False)\n self.assertEqual(network.stateful, False)\n\n # Test duplicate layers.\n sub_network.sub_layers = [layer_1, layer_1, layer_1, layer_3]\n self.assertEqual(network.dynamic, True)\n self.assertEqual(network.stateful, True)\n\n for _ in range(3):\n sub_network.sub_layers.pop()\n self.assertEqual(network.dynamic, True)\n self.assertEqual(network.stateful, False)\n\n sub_network.sub_layers.pop()\n self.assertEqual(network.dynamic, False)\n self.assertEqual(network.stateful, False)\n\n def test_compute_output_shape_cache(self):\n # See https://github.com/tensorflow/tensorflow/issues/32029.\n x = input_layer_lib.Input(shape=(None, 32))\n dense = layers.Dense(2)\n y = dense(x)\n network = functional.Functional(x, y, name='dense_network')\n\n for i in range(999, 1024):\n self.assertEqual(network.compute_output_shape((1, i, 32)), (1, i, 2))\n\n def test_2d_inputs_squeezed_to_1d(self):\n input_1d = input_layer_lib.Input(shape=())\n outputs = input_1d * 2.\n net = functional.Functional(input_1d, outputs)\n\n x = np.ones((10, 1))\n y = net(x)\n self.assertEqual(y.shape.rank, 1)\n\n def test_1d_inputs_expanded_to_2d(self):\n input_1d = input_layer_lib.Input(shape=(1,))\n outputs = input_1d * 2.\n net = functional.Functional(input_1d, outputs)\n\n x = np.ones((10,))\n y = net(x)\n self.assertEqual(y.shape.rank, 2)\n\n def test_training_passed_during_construction(self):\n\n def _call(inputs, training):\n if training is None:\n return inputs * -1.0\n elif training:\n return inputs\n else:\n return inputs * 0.0\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=True):\n return _call(inputs, training)\n\n my_layer = MyLayer()\n x = np.ones((1, 10))\n\n # Hard-coded `true` value passed during construction is respected.\n inputs = input_layer_lib.Input(10)\n outputs = my_layer(inputs, training=True)\n network = functional.Functional(inputs, outputs)\n self.assertAllEqual(network(x, training=True), _call(x, True))\n self.assertAllEqual(network(x, training=False), _call(x, True))\n self.assertAllEqual(network(x), _call(x, True))\n\n # Hard-coded `false` value passed during construction is respected.\n inputs = input_layer_lib.Input(10)\n outputs = my_layer(inputs, training=False)\n network = functional.Functional(inputs, outputs)\n self.assertAllEqual(network(x, training=True), _call(x, False))\n self.assertAllEqual(network(x, training=False), _call(x, False))\n self.assertAllEqual(network(x), _call(x, False))\n\n if tf.executing_eagerly():\n # In v2, construction still works when no `training` is specified\n # When no value passed during construction, it uses the local default.\n inputs = input_layer_lib.Input(10)\n outputs = my_layer(inputs)\n network = functional.Functional(inputs, outputs)\n self.assertAllEqual(network(x, training=True), _call(x, True))\n self.assertAllEqual(network(x, training=False), _call(x, False))\n self.assertAllEqual(network(x), _call(x, True)) # Use local default\n\n # `None` value passed positionally during construction is ignored at runtime\n inputs = input_layer_lib.Input(10)\n outputs = my_layer(inputs, None)\n network = functional.Functional(inputs, outputs)\n self.assertAllEqual(network(x, training=True), _call(x, True))\n self.assertAllEqual(network(x, training=False), _call(x, False))\n if tf.executing_eagerly():\n self.assertAllEqual(network(x), _call(x, True)) # Use local default\n else:\n # in v1 training would have defaulted to using the `None` inside the layer\n # if training is not passed at runtime\n self.assertAllEqual(network(x), _call(x, None))\n\n # `None` value passed as kwarg during construction is ignored at runtime.\n inputs = input_layer_lib.Input(10)\n outputs = my_layer(inputs, training=None)\n network = functional.Functional(inputs, outputs)\n self.assertAllEqual(network(x, training=True), _call(x, True))\n self.assertAllEqual(network(x, training=False), _call(x, False))\n if tf.executing_eagerly():\n self.assertAllEqual(network(x), _call(x, True)) # Use local default\n else:\n # in v1 training would have defaulted to using the `None` inside the layer\n # if training is not passed at runtime\n self.assertAllEqual(network(x), _call(x, None))\n\n\nclass InputsOutputsErrorTest(test_combinations.TestCase):\n\n @test_utils.enable_v2_dtype_behavior\n def test_input_error(self):\n inputs = input_layer_lib.Input((10,))\n outputs = layers.Dense(10)(inputs)\n with self.assertRaisesRegex(\n TypeError, \"('Keyword argument not understood:', 'input')\"):\n models.Model(input=inputs, outputs=outputs)\n\n @test_utils.enable_v2_dtype_behavior\n def test_output_error(self):\n inputs = input_layer_lib.Input((10,))\n outputs = layers.Dense(10)(inputs)\n with self.assertRaisesRegex(\n TypeError, \"('Keyword argument not understood:', 'output')\"):\n models.Model(inputs=inputs, output=outputs)\n\n def test_input_spec(self):\n if not tf.executing_eagerly():\n return\n inputs = input_layer_lib.Input((10,))\n outputs = layers.Dense(10)(inputs)\n model = models.Model(inputs, outputs)\n with self.assertRaisesRegex(\n ValueError, r'.*expected shape=.*'):\n model(np.zeros((3, 11)))\n\n def test_input_spec_list_of_inputs(self):\n if not tf.executing_eagerly():\n return\n input_1 = input_layer_lib.Input((10,), name='1')\n input_2 = input_layer_lib.Input((5,), name='2')\n x = layers.Concatenate()([input_1, input_2])\n outputs = layers.Dense(10)(x)\n model = models.Model([input_1, input_2], outputs)\n with self.assertRaisesRegex(\n ValueError, r'.*expects 2 input.*'):\n model(np.zeros((3, 10)))\n with self.assertRaisesRegex(\n ValueError, r'.*expects 2 input.*'):\n model([np.zeros((3, 10)), np.zeros((3, 5)), np.zeros((3, 10))])\n with self.assertRaisesRegex(\n ValueError, r'.*expected shape=.*'):\n model([np.zeros((3, 10)), np.zeros((3, 6))])\n\n # Test passing data via dict keyed by input name\n with self.assertRaisesRegex(\n ValueError, r'Missing data for input.*'):\n model({'1': np.zeros((3, 10))})\n with self.assertRaisesRegex(\n ValueError, r'.*expected shape=.*'):\n model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))})\n\n def test_input_spec_dict(self):\n if not tf.executing_eagerly():\n return\n input_1 = input_layer_lib.Input((10,))\n input_2 = input_layer_lib.Input((5,))\n x = layers.Concatenate()([input_1, input_2])\n outputs = layers.Dense(10)(x)\n model = models.Model({'1': input_1, '2': input_2}, outputs)\n with self.assertRaisesRegex(\n ValueError, r'Missing data for input.*'):\n model({'1': np.zeros((3, 10))})\n with self.assertRaisesRegex(\n ValueError, r'.*expected shape=.*'):\n model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))})\n\n\nclass FunctionalSubclassModel(training_lib.Model):\n\n def __init__(self, *args, **kwargs):\n self.foo = {'foo': 'bar'} # Make sure users can assign dict attributes\n my_input = input_layer_lib.Input(shape=(16,))\n dense = layers.Dense(32, activation='relu')\n output = dense(my_input)\n outputs = {'output': output}\n super().__init__(inputs=[my_input], outputs=outputs, *args, **kwargs)\n\n\nclass MixinClass:\n\n def __init__(self, foo, **kwargs):\n self._foo = foo\n super().__init__(**kwargs)\n\n def get_foo(self):\n return self._foo\n\n\nclass SubclassedModel(training_lib.Model):\n\n def __init__(self, bar, **kwargs):\n self._bar = bar\n super().__init__(**kwargs)\n\n def get_bar(self):\n return self._bar\n\n\nclass MultipleInheritanceModelTest(test_combinations.TestCase):\n\n def testFunctionalSubclass(self):\n m = FunctionalSubclassModel()\n # Some smoke test for the weights and output shape of the model\n self.assertLen(m.weights, 2)\n self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])\n\n def testFunctionalSubclassPreMixin(self):\n class MixedFunctionalSubclassModel(MixinClass, FunctionalSubclassModel):\n pass\n\n m = MixedFunctionalSubclassModel(foo='123')\n self.assertTrue(m._is_graph_network)\n self.assertLen(m.weights, 2)\n self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])\n self.assertEqual(m.get_foo(), '123')\n\n def testFunctionalSubclassPostMixin(self):\n # Make sure the the mixin class is also init correct when the order changed.\n\n class MixedFunctionalSubclassModel(FunctionalSubclassModel, MixinClass):\n pass\n\n m = MixedFunctionalSubclassModel(foo='123')\n self.assertTrue(m._is_graph_network)\n self.assertLen(m.weights, 2)\n self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])\n self.assertEqual(m.get_foo(), '123')\n\n def testSubclassModelPreMixin(self):\n class MixedSubclassModel(MixinClass, SubclassedModel):\n pass\n\n m = MixedSubclassModel(foo='123', bar='456')\n self.assertFalse(m._is_graph_network)\n self.assertEqual(m.get_foo(), '123')\n self.assertEqual(m.get_bar(), '456')\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.platform.tf_logging.error", "tensorflow.test.main", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.tools.common.public_api.PublicAPIVisitor", "tensorflow.python.platform.tf_logging.info", "tensorflow.tools.common.traverse.traverse", "tensorflow.compat.v1.gfile.Glob", "tensorflow.tools.api.lib.python_object_to_proto_visitor.PythonObjectToProtoVisitor", "tensorflow.compat.v1.resource_loader.get_data_files_path", "tensorflow.io.gfile.remove", "tensorflow.compat.v1.resource_loader.get_root_dir_with_all_resources", "tensorflow.tools.api.lib.api_objects_pb2.TFAPIObject" ], [ "tensorflow.compat.v2.keras.Model", "tensorflow.compat.v2.keras.layers.StringLookup", "tensorflow.compat.v2.config.experimental_connect_to_cluster", "tensorflow.compat.v2.squeeze", "tensorflow.compat.v2.math.reduce_mean", "tensorflow.compat.v2.keras.metrics.Mean", "tensorflow.compat.v2.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v2.tpu.experimental.initialize_tpu_system", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v2.data.Dataset.range", "tensorflow.compat.v2.keras.optimizers.RMSprop", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.math.greater", "tensorflow.compat.v2.keras.models.load_model", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.compat.v1.executing_eagerly", "tensorflow.compat.v2.distribute.experimental.TPUStrategy", "tensorflow.compat.v2.keras.metrics.Accuracy", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.keras.layers.Dense", "tensorflow.python.framework.test_util.disable_mlir_bridge", "tensorflow.compat.v2.keras.losses.binary_crossentropy", "tensorflow.compat.v2.keras.layers.Input", "tensorflow.compat.v2.nn.compute_average_loss" ], [ "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.compat.v1.placeholder", "numpy.any", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.compat.v1.constant_initializer", "numpy.random.randint", "tensorflow.compat.v2.Graph", "tensorflow.compat.v2.zeros", "numpy.zeros", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.test.main", "numpy.min", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.ragged.constant", "tensorflow.compat.v2.constant", "numpy.array", "tensorflow.compat.v2.ones_like", "numpy.random.random", "tensorflow.compat.v2.cast", "numpy.ones", "tensorflow.compat.v2.compat.v1.assign_add", "tensorflow.python.training.tracking.util.Checkpoint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8" ] } ]
belldandyxtq/chainer
[ "abffa9a7def07c2e6bcd79d8ddcebeed1e762161", "abffa9a7def07c2e6bcd79d8ddcebeed1e762161", "abffa9a7def07c2e6bcd79d8ddcebeed1e762161" ]
[ "examples/vae/train_vae.py", "chainer/optimizer.py", "chainer/distributions/independent.py" ]
[ "#!/usr/bin/env python\n\"\"\"Chainer example: train a VAE on MNIST\n\"\"\"\nimport argparse\nimport os\nimport warnings\n\nimport numpy as np\n\nimport chainer\nfrom chainer import training\nfrom chainer.training import extensions\nimport chainerx\n\nimport net\n\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer example: VAE')\n parser.add_argument('--initmodel', '-m', type=str,\n help='Initialize the model from given file')\n parser.add_argument('--resume', '-r', type=str,\n help='Resume the optimization from snapshot')\n parser.add_argument('--device', '-d', type=str, default='-1',\n help='Device specifier. Either ChainerX device '\n 'specifier or an integer. If non-negative integer, '\n 'CuPy arrays with specified device id are used. If '\n 'negative integer, NumPy arrays are used')\n parser.add_argument('--out', '-o', default='results',\n help='Directory to output the result')\n parser.add_argument('--epoch', '-e', default=100, type=int,\n help='number of epochs to learn')\n parser.add_argument('--dim-z', '-z', default=20, type=int,\n help='dimention of encoded vector')\n parser.add_argument('--dim-h', default=500, type=int,\n help='dimention of hidden layer')\n parser.add_argument('--beta', default=1.0, type=float,\n help='Regularization coefficient for '\n 'the second term of ELBO bound')\n parser.add_argument('--k', '-k', default=1, type=int,\n help='Number of Monte Carlo samples used in '\n 'encoded vector')\n parser.add_argument('--binary', action='store_true',\n help='Use binarized MNIST')\n parser.add_argument('--batch-size', '-b', type=int, default=100,\n help='learning minibatch size')\n parser.add_argument('--test', action='store_true',\n help='Use tiny datasets for quick tests')\n group = parser.add_argument_group('deprecated arguments')\n group.add_argument('--gpu', '-g', dest='device',\n type=int, nargs='?', const=0,\n help='GPU ID (negative value indicates CPU)')\n args = parser.parse_args()\n\n if chainer.get_dtype() == np.float16:\n warnings.warn(\n 'This example may cause NaN in FP16 mode.', RuntimeWarning)\n\n device = chainer.get_device(args.device)\n device.use()\n\n print('Device: {}'.format(device))\n print('# dim z: {}'.format(args.dim_z))\n print('# Minibatch-size: {}'.format(args.batch_size))\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Prepare VAE model, defined in net.py\n encoder = net.make_encoder(784, args.dim_z, args.dim_h)\n decoder = net.make_decoder(784, args.dim_z, args.dim_h,\n binary_check=args.binary)\n prior = net.make_prior(args.dim_z)\n avg_elbo_loss = net.AvgELBOLoss(encoder, decoder, prior,\n beta=args.beta, k=args.k)\n avg_elbo_loss.to_device(device)\n\n # Setup an optimizer\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(avg_elbo_loss)\n\n # Initialize\n if args.initmodel is not None:\n chainer.serializers.load_npz(args.initmodel, avg_elbo_loss)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist(withlabel=False)\n\n if args.binary:\n # Binarize dataset\n train = (train >= 0.5).astype(np.float32)\n test = (test >= 0.5).astype(np.float32)\n\n if args.test:\n train, _ = chainer.datasets.split_dataset(train, 100)\n test, _ = chainer.datasets.split_dataset(test, 100)\n\n train_iter = chainer.iterators.SerialIterator(train, args.batch_size)\n test_iter = chainer.iterators.SerialIterator(test, args.batch_size,\n repeat=False, shuffle=False)\n\n # Set up an updater. StandardUpdater can explicitly specify a loss function\n # used in the training with 'loss_func' option\n updater = training.updaters.StandardUpdater(\n train_iter, optimizer, device=device, loss_func=avg_elbo_loss)\n\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\n trainer.extend(extensions.Evaluator(\n test_iter, avg_elbo_loss, device=device))\n # TODO(niboshi): Temporarily disabled for chainerx. Fix it.\n if device.xp is not chainerx:\n trainer.extend(extensions.DumpGraph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss',\n 'main/reconstr', 'main/kl_penalty', 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n\n if args.resume is not None:\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\n # Visualize the results\n def save_images(x, filename):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)\n for ai, xi in zip(ax.flatten(), x):\n ai.imshow(xi.reshape(28, 28))\n fig.savefig(filename)\n\n avg_elbo_loss.to_cpu()\n train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]\n x = chainer.Variable(np.asarray(train[train_ind]))\n with chainer.using_config('train', False), chainer.no_backprop_mode():\n x1 = decoder(encoder(x).mean, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'train'))\n save_images(x1.array, os.path.join(args.out, 'train_reconstructed'))\n\n test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]\n x = chainer.Variable(np.asarray(test[test_ind]))\n with chainer.using_config('train', False), chainer.no_backprop_mode():\n x1 = decoder(encoder(x).mean, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'test'))\n save_images(x1.array, os.path.join(args.out, 'test_reconstructed'))\n\n # draw images from randomly sampled z\n z = prior().sample(9)\n x = decoder(z, inference=True).mean\n save_images(x.array, os.path.join(args.out, 'sampled'))\n\n\nif __name__ == '__main__':\n main()\n", "from __future__ import absolute_import\nimport collections\nimport copy\nimport math\nimport warnings\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer import backend\nfrom chainer import link as link_module\nfrom chainer import optimizer_hooks\nfrom chainer import serializer as serializer_module\nfrom chainer import variable\nimport chainerx\n\n\nclass Hyperparameter(object):\n\n \"\"\"Set of hyperparameter entries of an optimizer.\n\n This is a utility class to provide a set of hyperparameter entries for\n update rules and an optimizer. Each entry can be set as an attribute of a\n hyperparameter object.\n\n A hyperparameter object can hold a reference to its parent hyperparameter\n object. When an attribute does not exist in the child hyperparameter, it\n automatically refers to the parent. We typically set the hyperparameter of\n the gradient method as the parent of the hyperparameter of each update\n rule. It enables us to centralize the management of hyperparameters (e.g.\n we can change the learning rate of all update rules just by modifying the\n hyperparameter of the central optimizer object), while users can freely\n customize the hyperparameter of each update rule if needed.\n\n Args:\n parent (Hyperparameter): Parent hyperparameter.\n\n \"\"\"\n\n def __init__(self, parent=None):\n self._parent = parent\n\n def __getattr__(self, name):\n if '_parent' not in self.__dict__:\n raise AttributeError('_parent is not set up yet')\n return getattr(self._parent, name)\n\n def __repr__(self):\n d = self.get_dict()\n keys = sorted(d.keys())\n values_repr = ', '.join('%s=%s' % (k, d[k]) for k in keys)\n return 'Hyperparameter(%s)' % values_repr\n\n @property\n def parent(self):\n \"\"\"Parent hyperparameter object.\"\"\"\n return self._parent\n\n def get_dict(self):\n \"\"\"Converts the hyperparameter into a dictionary.\n\n Returns:\n Dictionary containing all entries that can be referred by this\n hyperparameter object.\n\n \"\"\"\n d = {} if self._parent is None else self._parent.get_dict()\n for k, v in six.iteritems(self.__dict__):\n if k != '_parent':\n d[k] = v\n return d\n\n\nclass UpdateRule(object):\n\n \"\"\"Base class of all update rules.\n\n Update rule is an object that implements how to update one parameter\n variable using the gradient of a loss function. This class provides the\n interface and the common features of any update rules.\n\n An update rule can be set to a :class:`~chainer.Variable` object that\n represents a parameter array of a model. An :class:`~chainer.Optimizer`\n instance defines which parameters to update, and the update rule instance\n of each parameter defines how to update it.\n\n Hook functions can be set to any update rule instance. The hook function is\n called just before or after any updates (configurable) in the order of\n registrations.\n\n An implementation of update rule should override :meth:`update_core` or\n its device-dependent variants (i.e., :meth:`update_core_cpu` and\n :meth:`update_core_gpu`).\n\n The state (e.g. a moving average of the gradient) of the update rule is\n stored into the state dictionary. An implementation of update rule using\n state should also override :meth:`init_state` to initialize the state at\n the first update. The values of the state dictionary are automatically\n copied to the appropriate device before the update based on the data and\n grad arrays.\n\n Args:\n parent_hyperparam (Hyperparameter): Hyperparameter that provides the\n default values.\n\n Attributes:\n enabled (bool): Flag to configure if this update rule is active. If the\n update rule is not active (i.e., ``enabled = False``), the\n :meth:`update` method does not update the parameter.\n hyperparam (Hyperparameter): Hyperparameter of the update rule.\n ~UpdateRule.t (int): Number of updates made by this update rule.\n\n \"\"\"\n\n def __init__(self, parent_hyperparam=None):\n self._pre_update_hooks = collections.OrderedDict()\n self._post_update_hooks = collections.OrderedDict()\n self._state = None\n self.enabled = True\n self.hyperparam = Hyperparameter(parent_hyperparam)\n self.t = 0\n self._use_fp32_update = False\n self._fp32_param = None\n\n @property\n def state(self):\n \"\"\"State dictionary.\"\"\"\n return self._state\n\n def add_hook(self, hook, name=None, timing='auto'):\n \"\"\"Adds a hook function.\n\n The hook function is called before or after any updates (see the timing\n attribute).\n\n Args:\n hook (callable): Hook function to be added. It takes two\n arguments: the update rule object and the parameter variable.\n name (str): Name of the hook function. The name attribute of the\n hook function is used by default.\n timing (str): Specifies when the hook is called. If 'auto', the\n timimg property of the hook will decide the timing.\n If 'pre', the hook will be called before any updates.\n If 'post', the hook will be called after any updates.\n If 'auto' and the timing property of the hook is not\n available, timing will default to 'pre'.\n\n \"\"\"\n if not callable(hook):\n raise TypeError('hook function must be callable')\n if timing not in ('pre', 'post', 'auto'):\n raise ValueError(\n 'timing must be one of (\\'pre\\', \\'post\\', \\'auto\\')')\n if timing == 'auto':\n timing = getattr(hook, 'timing', 'pre')\n\n if name is None:\n name = getattr(hook, 'name', getattr(hook, '__name__', None))\n if name is None:\n raise ValueError(\n 'the name of the hook function is not specified')\n if name in self._pre_update_hooks or name in self._post_update_hooks:\n raise ValueError('hook \"{}\" already exists'.format(name))\n\n if timing == 'pre':\n self._pre_update_hooks[name] = hook\n else:\n self._post_update_hooks[name] = hook\n\n def remove_hook(self, name):\n \"\"\"Removes the specified hook function.\n\n Args:\n name (str): Name of the hook function to be removed. The hook\n function registered with this name will be removed.\n\n \"\"\"\n try:\n del self._pre_update_hooks[name]\n except KeyError:\n del self._post_update_hooks[name]\n\n def update(self, param):\n \"\"\"Invokes hook functions and updates the parameter.\n\n Args:\n param (~chainer.Variable): Variable to be updated.\n\n \"\"\"\n if not self.enabled:\n return\n\n self.t += 1\n\n if self._use_fp32_update and param.dtype == numpy.float16:\n if self._fp32_param is None:\n self._fp32_param = variable.Variable(\n param.array.astype(numpy.float32),\n name=param.name)\n fp32_param = self._fp32_param\n fp32_param.grad = param.grad.astype(numpy.float32)\n\n if fp32_param.data is not None:\n self._prepare(fp32_param)\n if param._loss_scale is not None:\n fp32_param.grad /= param._loss_scale\n for hook in six.itervalues(self._pre_update_hooks):\n hook(self, fp32_param)\n self.update_core(fp32_param)\n for hook in six.itervalues(self._post_update_hooks):\n hook(self, fp32_param)\n\n param.data = fp32_param.data.astype(param.dtype)\n fp32_param.grad = None\n else:\n if param.data is not None:\n self._prepare(param)\n if param._loss_scale is not None:\n param.grad /= param._loss_scale\n for hook in six.itervalues(self._pre_update_hooks):\n hook(self, param)\n self.update_core(param)\n for hook in six.itervalues(self._post_update_hooks):\n hook(self, param)\n\n def update_core(self, param):\n \"\"\"Updates the parameter.\n\n Implementation of UpdateRule should override this method or both of\n :meth:`update_core_cpu` and :meth:`update_core_gpu`.\n\n Args:\n param (~chainer.Variable): Variable to be updated.\n\n \"\"\"\n device = param.device\n with chainer.using_device(device):\n if device.xp is chainerx:\n self.update_core_chainerx(param)\n elif device.xp is numpy:\n self.update_core_cpu(param)\n else:\n self.update_core_gpu(param)\n\n def update_core_cpu(self, param):\n \"\"\"Updates the parameter on CPU.\n\n See :meth:`update_core` for details.\n\n Args:\n param (~chainer.Variable): Variable to be updated.\n\n \"\"\"\n raise NotImplementedError\n\n def update_core_gpu(self, param):\n \"\"\"Updates the parameter on GPU.\n\n See :meth:`update_core` for details.\n\n Args:\n param (~chainer.Variable): Variable to be updated.\n\n \"\"\"\n raise NotImplementedError\n\n def update_core_chainerx(self, param):\n \"\"\"Updates the ChainerX parameter.\n\n This method can be overridden to implement custom update logic.\n The default implementation is to convert the parameter to a\n memory-shared NumPy/CuPy parameter and call the corresponding update\n method.\n\n See :meth:`update_core` for details.\n\n Args:\n param (~chainer.Variable): Variable to be updated.\n\n \"\"\"\n grad_array = param.grad\n backend_name = param.array.device.backend.name\n if backend_name not in ('native', 'cuda'):\n raise RuntimeError(\n 'Default implementation of Optimizer.update_core_chainerx is '\n 'only provided for native or cuda backends (actual: {}). '\n 'Override Optimizer.update_core_chainerx() to implement '\n 'custom update logic.'.format(backend_name))\n\n # Convert state arrays to NumPy/CuPy\n chainerx_state_arrays = {}\n for state_name, st in self.state.items():\n st = self.state[state_name]\n if isinstance(st, chainerx.ndarray):\n fallback_arr = backend.from_chx(st)\n self.state[state_name] = fallback_arr\n chainerx_state_arrays[state_name] = (st, fallback_arr)\n\n # Create a temporary parameter with memory-shared NumPy/CuPy array\n # If the ChainerX parameter has a cached NumPy/CuPy copy, use the\n # cache and avoid redundant conversion. Else, create the cache here\n # and use it.\n if param._chainerx_fallback_array is None:\n param._chainerx_fallback_array = backend.from_chx(\n param.array)\n\n temp_param = variable.Variable._init_unchecked(\n param._chainerx_fallback_array, is_chainerx_array=False)\n\n if grad_array is not None:\n temp_param._set_grad_without_check(\n backend.from_chx(grad_array))\n\n # Update\n self.update_core(temp_param)\n\n # Restore state arrays\n for state_name, (arr, fallback_arr) in chainerx_state_arrays.items():\n cur_arr = self.state[state_name]\n if cur_arr is not fallback_arr:\n # The optimizer altered the reference of the state, instead of\n # updating it in-place. We need to convert the new state back\n # to ChainerX.\n arr = backend.to_chx(cur_arr)\n self.state[state_name] = arr\n\n def init_state(self, param):\n \"\"\"Initializes the state.\n\n Any implementations that use the state should override this mehtod.\n This method is called at the first update.\n\n Args:\n param (~chainer.Variable): Parameter variable. It can be used to\n extract the shape and the data type of the parameter.\n\n \"\"\"\n pass\n\n def serialize(self, serializer):\n \"\"\"Serializes the update rule state.\n\n Be careful that this method only saves/loads the state of the update\n rule. The parameters of the target link is not saved/loaded by this\n method, and so you need to serialize the target link separately if you\n want to fully recover the training state including parameters.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer object.\n\n \"\"\"\n self.t = serializer('t', self.t)\n if self.state is None:\n if isinstance(serializer, serializer_module.Deserializer):\n # try to initialize the state to retrieve state entries\n self._state = {}\n self_copy = copy.copy(self)\n arr = numpy.empty(1, dtype=numpy.float32)\n self_copy.init_state(variable.Variable(arr, grad=arr))\n\n for key in self._state:\n try:\n value = serializer(key, None)\n except KeyError:\n if self.enabled:\n raise\n value = None\n # leave the update rule state as `None` if the keys are not\n # contained in the snapshot, so that these states can be\n # automatically initialized with the `_prepare` method\n if value is None:\n self._state = None\n break\n else:\n self._state[key] = value\n else:\n for key in self._state:\n self._state[key] = serializer(key, self._state[key])\n\n def _prepare(self, param):\n device = param.device\n with chainer.using_device(device):\n state = self.state\n if state is None:\n state = self._state = {}\n self.init_state(param)\n\n for name, value in six.iteritems(state):\n if not isinstance(value, chainer.get_array_types()):\n continue\n state[name] = device.send(value)\n\n def use_fp32_update(self, flag=True):\n \"\"\"Enables use of parameter update in fp32.\n\n This method enables use of parameter update in fp32.\n When it is enabled and data type of original parameter variable is\n fp16, fp32 copy of parameter variable is automatically created and\n retained at self.fp32_param. And the parameter is update in fp32 in\n the following way.\n\n 1. copys the grad of original parameter variable to the grad of fp32\n parameter variable, converting its data type from fp16 to fp32.\n 2. updates the parameter in fp32.\n 3. copys the data of fp32 parameter variable to the data of original\n parameter variable, converting its data type from fp32 to fp16.\n\n See :meth:`update` for details.\n \"\"\"\n self._use_fp32_update = flag\n\n\nclass Optimizer(object):\n \"\"\"Base class of all numerical optimizers.\n\n This class provides basic features for all optimization methods. It\n optimizes parameters of a *target link*. The target link is registered via\n the :meth:`setup` method, and then the :meth:`update` method updates its\n parameters based on a given loss function.\n\n Each optimizer implementation must be defined as a child class of\n Optimizer. It must override :meth:`update` method.\n\n If the optimizer is based on single gradient computation (like\n most first-order methods), then it should inherit :class:`GradientMethod`,\n which adds some features dedicated for the first order methods, including\n the support of :class:`~chainer.UpdateRule`.\n\n Optimizer instance also supports *hook functions*. Hook function is\n registered by the :meth:`add_hook` method. Each hook function is called\n in registration order before of after the actual parameter update\n (configurable). If the hook function has an attribute\n ``call_for_each_param`` and its value is ``True``, the hook function is\n used as a hook function of all update rules (i.e., it is invoked for every\n parameter by passing the corresponding update rule and the parameter).\n\n Attributes:\n ~Optimizer.target: Target link object.\n It is set by the :meth:`setup` method.\n ~Optimizer.t: Number of update steps. It must be incremented by the\n :meth:`update` method.\n ~Optimizer.epoch: Current epoch. It is incremented by the\n :meth:`new_epoch` method.\n ~Optimizer.use_auto_new_epoch: Boolean flag to indicate if\n :meth:`new_epoch` will be called by the updater. Updater should\n set this flag to ``True`` if it automatically calls\n :meth:`new_epoch`.\n\n \"\"\"\n\n target = None\n t = 0\n epoch = 0\n _pre_update_hooks = None\n _post_update_hooks = None\n _loss_scale = None\n _loss_scale_max = 65504 # max representable value with fp16\n _loss_scaling_is_dynamic = False\n use_auto_new_epoch = False\n\n def setup(self, link):\n \"\"\"Sets a target link and initializes the optimizer states.\n\n Given link is set to the :attr:`target` attribute. It also prepares the\n optimizer state dictionaries corresponding to all parameters in the\n link hierarchy. The existing states are discarded.\n\n Args:\n link (~chainer.Link): Target link object.\n\n Returns:\n The optimizer instance.\n\n .. note::\n As of v4.0.0, this function returns the optimizer instance itself\n so that you can instantiate and setup the optimizer in one line,\n e.g., ``optimizer = SomeOptimizer().setup(link)``.\n\n \"\"\"\n if not isinstance(link, link_module.Link):\n raise TypeError('optimization target must be a link')\n self.target = link\n self.t = 0\n self.epoch = 0\n self._pre_update_hooks = collections.OrderedDict()\n self._post_update_hooks = collections.OrderedDict()\n return self\n\n def update(self, lossfun=None, *args, **kwds):\n \"\"\"Updates the parameters.\n\n This method updates the parameters of the target link. The behavior of\n this method is different for the cases either ``lossfun`` is given or\n not.\n\n If ``lossfun`` is given, this method typically clears the gradients,\n calls the loss function with given extra arguments, and calls the\n :meth:`~chainer.Variable.backward` method of its output to compute the\n gradients. The actual implementation might call ``lossfun`` more than\n once.\n\n If ``lossfun`` is not given, then this method assumes that the\n gradients of all parameters are already computed. An implementation\n that requires multiple gradient computations might raise an error on\n this case.\n\n In both cases, this method invokes the update procedure for all\n parameters.\n\n Args:\n lossfun (callable):\n Loss function.\n You can specify one of loss functions from\n :doc:`built-in loss functions </reference/functions>`, or\n your own loss function.\n It should not be an\n :doc:`loss functions with parameters </reference/links>`\n (i.e., :class:`~chainer.Link` instance).\n The function must accept arbitrary arguments\n and return one :class:`~chainer.Variable` object that\n represents the loss (or objective) value.\n Returned value must be a Variable derived from the input\n Variable object.\n ``lossfun`` can be omitted for single gradient-based methods.\n In this case, this method assumes gradient arrays computed.\n args, kwds: Arguments for the loss function.\n\n \"\"\"\n raise NotImplementedError\n\n def new_epoch(self, auto=False):\n \"\"\"Starts a new epoch.\n\n This method increments the :attr:`epoch` count. Note that if the\n optimizer depends on the epoch count, then user should call this method\n appropriately at the beginning of each epoch.\n\n Args:\n auto (bool): Should be ``True`` if this method is called by an\n updater. In this case, :attr:`use_auto_new_epoch` should be set\n to ``True`` by the updater.\n\n \"\"\"\n if auto:\n if not self.use_auto_new_epoch:\n raise RuntimeError(\n 'invalid new_epoch call with auto=True.\\n'\n 'Fix the updater to set '\n 'optimizer.use_auto_new_epoch = True.')\n else:\n if self.use_auto_new_epoch:\n raise RuntimeError(\n 'duplicated new_epoch with the updater.\\n'\n 'Pass auto_new_epoch=False to the updater or stop calling '\n 'new_epoch outside the updater.')\n self.epoch += 1\n\n def add_hook(self, hook, name=None, timing='auto'):\n \"\"\"Registers a hook function.\n\n Hook function is typically called right after the gradient computation,\n though the timing depends on the optimization method, and the timing\n attribute.\n\n Args:\n hook (callable): Hook function. If ``hook.call_for_each_param`` is\n true, this hook function is called for each parameter by\n passing the update rule and the parameter. Otherwise, this hook\n function is called only once each iteration by passing the\n optimizer.\n name (str): Name of the registration. If omitted, ``hook.name`` is\n used by default.\n timing (str): Specifies when the hook is called. If 'auto', the\n timimg property of the hook will decide the timing.\n If 'pre', the hook will be called before any updates.\n If 'post', the hook will be called after any updates.\n\n \"\"\"\n if not callable(hook):\n raise TypeError('hook function is not callable')\n if self._pre_update_hooks is None or self._post_update_hooks is None:\n raise RuntimeError('call `setup` method before `add_hook` method')\n if timing not in ('pre', 'post', 'auto'):\n raise ValueError(\n 'timing must be one of (\\'pre\\', \\'post\\', \\'auto\\')')\n if timing == 'auto':\n timing = getattr(hook, 'timing', None)\n if timing not in ('pre', 'post'):\n warnings.warn(\n 'Hook timing attribute not in (\\'pre\\', \\'post\\'), '\n 'defaulting timing to \\'pre\\'.')\n timing = 'pre'\n\n if name is None:\n name = hook.name\n if name in self._pre_update_hooks or name in self._post_update_hooks:\n raise KeyError('hook \"{}\" already exists'.format(name))\n\n if timing == 'pre':\n self._pre_update_hooks[name] = hook\n else:\n self._post_update_hooks[name] = hook\n\n def remove_hook(self, name):\n \"\"\"Removes a hook function.\n\n Args:\n name (str): Registered name of the hook function to remove.\n\n \"\"\"\n try:\n del self._pre_update_hooks[name]\n except KeyError:\n del self._post_update_hooks[name]\n\n def call_hooks(self, timing='pre'):\n \"\"\"Invokes hook functions in registration order.\"\"\"\n if timing not in ('pre', 'post'):\n raise ValueError('timing must be either \\'pre\\' or \\'post\\'')\n if timing == 'pre':\n hooks = self._pre_update_hooks\n else:\n hooks = self._post_update_hooks\n for hook in six.itervalues(hooks):\n self._call_hook(hook)\n\n def _call_hook(self, hook):\n if getattr(hook, 'call_for_each_param', False):\n for param in self.target.params():\n hook(param.update_rule, param)\n else:\n hook(self)\n\n def serialize(self, serializer):\n \"\"\"Serializes or deserializes the optimizer.\n\n It only saves or loads the following things:\n\n - Optimizer states\n - Global states (:attr:`t` and :attr:`epoch`)\n\n **It does not saves nor loads the parameters of the target link.** They\n should be separately saved or loaded.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer or\n deserializer object.\n\n \"\"\"\n self.t = serializer('t', self.t)\n self.epoch = serializer('epoch', self.epoch)\n for name, param in self.target.namedparams():\n rule = getattr(param, 'update_rule', None)\n if rule is not None:\n rule.serialize(serializer[name])\n\n def loss_scaling(self, interval=1000, scale=None):\n \"\"\"Configures the loss scaling algorithm.\n\n Args:\n interval (int): Number of iterations until scaling factor gets\n doubled. This is effective when \"dynamic\" loss scaling is used.\n scale (float): Loss scaling factor. If ``None``, \"dynamic\" loss\n scaling is used, otherwise \"static\" loss scaling is used.\n \"\"\"\n if scale is None:\n self._loss_scaling_is_dynamic = True\n if interval < 1:\n raise ValueError('interval must be greater than or equal to 1.'\n ' Actual: {}'.format(interval))\n self._loss_scale = 1.0\n self._loss_scaling_multiplier = math.pow(2.0, 1.0 / interval)\n self._loss_scaling_isnan_ever = False\n else:\n if scale <= 0:\n raise ValueError('loss_scale must be a positive number. '\n 'Actual: {}'.format(scale))\n self._loss_scale = scale\n\n def set_loss_scale(self, loss_scale):\n \"\"\"Sets loss scaling factor.\"\"\"\n self.loss_scaling(scale=loss_scale)\n\n def check_nan_in_grads(self):\n \"\"\"Checks if there is NaN in grads when dynamic loss scaling used.\"\"\"\n self._loss_scaling_isnan = False\n if not self._loss_scaling_is_dynamic:\n return\n for name, param in self.target.namedparams():\n xp = param.device.xp\n if not xp.all(xp.isfinite(param.grad)):\n self._loss_scaling_isnan = True\n self._loss_scaling_isnan_ever = True\n warnings.warn(\n 'Non finite number found in param.grad of {}'\n ' (iteration: {}, loss_scale: {})'\n ''.format(name, self.t, self._loss_scale))\n\n def is_safe_to_update(self):\n return not self._loss_scaling_isnan\n\n def update_loss_scale(self):\n if not self._loss_scaling_is_dynamic:\n return\n if self._loss_scaling_isnan:\n multiplier = 0.5\n elif self._loss_scaling_isnan_ever:\n multiplier = self._loss_scaling_multiplier\n else:\n multiplier = 2.0\n self._loss_scale = max(1, min(self._loss_scale_max,\n self._loss_scale * multiplier))\n\n\nclass GradientMethod(Optimizer):\n \"\"\"Base class of all single gradient-based optimizers.\n\n This is an extension of the :class:`Optimizer` class. Typical gradient\n methods that just require the gradient at the current parameter vector on\n an update can be implemented as its child class.\n\n This class uses :class:`~chainer.UpdateRule` to manage the update rule of\n each parameter. A child class of GradientMethod should override\n :meth:`create_update_rule` to create the default update rule of each\n parameter.\n\n This class also provides :attr:`hyperparam`, which is the hyperparameter\n used as the default configuration of each update rule. All built-in\n gradient method implementations also provide proxy properties that act\n as aliases to the attributes of :attr:`hyperparam`. It is recommended that\n you provide such an alias to each attribute. It can be done by only adding\n one line for each attribute using :class:`HyperparameterProxy`.\n\n Attributes:\n hyperparam (Hyperparameter): The hyperparameter of the gradient\n method. It is used as the default configuration of each update\n rule (i.e., the hyperparameter of each update rule refers this\n hyperparameter as its parent).\n\n \"\"\"\n\n def __init__(self):\n super(GradientMethod, self).__init__()\n self.hyperparam = Hyperparameter()\n self._use_fp32_update = False\n\n def setup(self, link):\n super(GradientMethod, self).setup(link)\n for param in link.params():\n param.update_rule = self.create_update_rule()\n if self._use_fp32_update:\n param.update_rule.use_fp32_update()\n return self\n\n def reallocate_cleared_grads(self):\n \"\"\"Reallocate gradients cleared by :meth:`~chainer.Variable.cleargrad`.\n\n This method allocates arrays for all gradients which have :obj:`None`.\n This method is called before and after every optimizer hook.\n If an inheriting optimizer does not require this allocation,\n the optimizer can override this method with a blank function.\n\n \"\"\"\n for name, param in self.target.namedparams(False):\n if param.grad is None:\n device = param.device\n with chainer.using_device(device):\n param.grad = device.xp.zeros_like(param.data)\n\n def call_hooks(self, timing='pre'):\n \"\"\"Invokes hook functions in registration order.\"\"\"\n if timing not in ('pre', 'post'):\n raise ValueError('timing must be either \\'pre\\' or \\'post\\'')\n if timing == 'pre':\n hooks = self._pre_update_hooks\n else:\n hooks = self._post_update_hooks\n for hook in six.itervalues(hooks):\n self._call_hook(hook)\n self.reallocate_cleared_grads()\n\n def update(self, lossfun=None, *args, **kwds):\n \"\"\"Updates parameters based on a loss function or computed gradients.\n\n This method runs in two ways.\n\n - If ``lossfun`` is given, then it is used as a loss function to\n compute gradients.\n - Otherwise, this method assumes that the gradients are already\n computed.\n\n In both cases, the computed gradients are used to update parameters.\n The actual update routines are defined by the update rule of each\n parameter.\n\n \"\"\"\n if lossfun is not None:\n use_cleargrads = getattr(self, '_use_cleargrads', True)\n loss = lossfun(*args, **kwds)\n if use_cleargrads:\n self.target.cleargrads()\n else:\n self.target.zerograds()\n loss.backward(loss_scale=self._loss_scale)\n del loss\n\n self.reallocate_cleared_grads()\n self.check_nan_in_grads()\n self.call_hooks('pre')\n\n self.t += 1\n if self.is_safe_to_update():\n for param in self.target.params():\n param.update()\n\n self.reallocate_cleared_grads()\n\n self.call_hooks('post')\n self.update_loss_scale()\n\n def use_cleargrads(self, use=True):\n \"\"\"Enables or disables use of :func:`~chainer.Link.cleargrads` in `update`.\n\n Args:\n use (bool): If ``True``, this function enables use of\n `cleargrads`. If ``False``, disables use of `cleargrads`\n (`zerograds` is used).\n\n .. deprecated:: v2.0\n Note that :meth:`update` calls :meth:`~Link.cleargrads` by default.\n :meth:`~Link.cleargrads` is more efficient than\n :meth:`~Link.zerograds`, so one does not have to call\n :meth:`use_cleargrads`. This method remains for backward\n compatibility.\n\n \"\"\"\n warnings.warn(\n 'GradientMethod.use_cleargrads is deprecated.',\n DeprecationWarning)\n\n self._use_cleargrads = use\n\n def create_update_rule(self):\n \"\"\"Creates a new update rule object.\n\n This method creates an update rule object. It is called by\n :meth:`setup` to set up an update rule of each parameter.\n Each implementation of the gradient method should override this method\n to provide the default update rule implementation.\n\n Return:\n UpdateRule: Update rule object.\n\n \"\"\"\n raise NotImplementedError\n\n def use_fp32_update(self, flag=True):\n \"\"\"Enables use of parameter update in fp32.\"\"\"\n self._use_fp32_update = flag\n link = getattr(self, 'target', None)\n if link is not None:\n for param in link.params():\n param.update_rule.use_fp32_update()\n\n\nclass HyperparameterProxy(object):\n\n \"\"\"Property that acts as an alias to an attribute of the hyperparameter.\n\n This class is used to define a property of an implementation of\n :class:`GradientMethod` that acts as an alias to an attribute of the\n hyperparameter.\n\n Args:\n attr_name (str): Name of the attribute of the hyperparameter.\n\n \"\"\"\n\n def __init__(self, attr_name):\n self._attr_name = attr_name\n self.__doc__ = 'Alias to ``self.hyperparam.{}``'.format(attr_name)\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n return getattr(obj.hyperparam, self._attr_name)\n\n def __set__(self, obj, value):\n setattr(obj.hyperparam, self._attr_name, value)\n\n\ndef make_deprecation_message(module_name):\n return ('chainer.optimizer.{0} is deprecated from v4. '\n 'Use chainer.optimizer_hooks.{0} instead.'\n ''.format(module_name))\n\n\nclass WeightDecay(optimizer_hooks.WeightDecay):\n\n def __init__(self, *args, **kwargs):\n warnings.warn(make_deprecation_message('WeightDecay'),\n DeprecationWarning)\n return super(WeightDecay, self).__init__(*args, **kwargs)\n\n\nclass Lasso(optimizer_hooks.Lasso):\n\n def __init__(self, *args, **kwargs):\n warnings.warn(make_deprecation_message('Lasso'),\n DeprecationWarning)\n return super(Lasso, self).__init__(*args, **kwargs)\n\n\nclass GradientClipping(optimizer_hooks.GradientClipping):\n\n def __init__(self, *args, **kwargs):\n warnings.warn(make_deprecation_message('GradientClipping'),\n DeprecationWarning)\n return super(GradientClipping, self).__init__(*args, **kwargs)\n\n\nclass GradientNoise(optimizer_hooks.GradientNoise):\n\n def __init__(self, *args, **kwargs):\n warnings.warn(make_deprecation_message('GradientNoise'),\n DeprecationWarning)\n return super(GradientNoise, self).__init__(*args, **kwargs)\n\n\nclass GradientHardClipping(optimizer_hooks.GradientHardClipping):\n\n def __init__(self, *args, **kwargs):\n warnings.warn(make_deprecation_message('GradientHardClipping'),\n DeprecationWarning)\n return super(GradientHardClipping, self).__init__(*args, **kwargs)\n", "import numpy\n\nfrom chainer.backend import cuda\nfrom chainer import distribution\nfrom chainer.functions.array import repeat\nfrom chainer.functions.array import reshape\nfrom chainer.functions.array import transpose\nfrom chainer.functions.math import prod\nfrom chainer.functions.math import sum as sum_mod\nfrom chainer.utils import array\nfrom chainer.utils import cache\n\n\nclass Independent(distribution.Distribution):\n\n \"\"\"Independent distribution.\n\n Args:\n distribution (:class:`~chainer.Distribution`): The base distribution\n instance to transform.\n reinterpreted_batch_ndims (:class:`int`): Integer number of rightmost\n batch dims which will be regarded as event dims. When ``None`` all\n but the first batch axis (batch axis 0) will be transferred to\n event dimensions.\n \"\"\"\n\n def __init__(self, distribution, reinterpreted_batch_ndims=None):\n super(Independent, self).__init__()\n self.__distribution = distribution\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = \\\n self._get_default_reinterpreted_batch_ndims(distribution)\n elif reinterpreted_batch_ndims > len(distribution.batch_shape):\n raise ValueError(\n 'reinterpreted_batch_ndims must be less than or equal to the '\n 'number of dimensions of `distribution.batch_shape`.')\n self.__reinterpreted_batch_ndims = reinterpreted_batch_ndims\n\n batch_ndim = \\\n len(self.distribution.batch_shape) - self.reinterpreted_batch_ndims\n self.__batch_shape = distribution.batch_shape[:batch_ndim]\n self.__event_shape = \\\n distribution.batch_shape[batch_ndim:] + distribution.event_shape\n\n @property\n def distribution(self):\n return self.__distribution\n\n @property\n def reinterpreted_batch_ndims(self):\n return self.__reinterpreted_batch_ndims\n\n @property\n def batch_shape(self):\n return self.__batch_shape\n\n @property\n def event_shape(self):\n return self.__event_shape\n\n @cache.cached_property\n def covariance(self):\n \"\"\" The covariance of the independent distribution.\n\n By definition, the covariance of the new\n distribution becomes block diagonal matrix. Let\n :math:`\\\\Sigma_{\\\\mathbf{x}}` be the covariance matrix of the original\n random variable :math:`\\\\mathbf{x} \\\\in \\\\mathbb{R}^d`, and\n :math:`\\\\mathbf{x}^{(1)}, \\\\mathbf{x}^{(2)}, \\\\cdots \\\\mathbf{x}^{(m)}`\n be the :math:`m` i.i.d. random variables, new covariance matrix\n :math:`\\\\Sigma_{\\\\mathbf{y}}` of :math:`\\\\mathbf{y} =\n [\\\\mathbf{x}^{(1)}, \\\\mathbf{x}^{(2)}, \\\\cdots, \\\\mathbf{x}^{(m)}] \\\\in\n \\\\mathbb{R}^{md}` can be written as\n\n .. math::\n \\\\left[\\\\begin{array}{ccc}\n \\\\Sigma_{\\\\mathbf{x}^{1}} & & 0 \\\\\\\\\n & \\\\ddots & \\\\\\\\\n 0 & & \\\\Sigma_{\\\\mathbf{x}^{m}}\n \\\\end{array} \\\\right].\n\n Note that this relationship holds only if the covariance matrix of the\n original distribution is given analytically.\n\n Returns:\n ~chainer.Variable: The covariance of the distribution.\n \"\"\"\n num_repeat = array.size_of_shape(\n self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])\n dim = array.size_of_shape(self.distribution.event_shape)\n cov = repeat.repeat(\n reshape.reshape(\n self.distribution.covariance,\n ((self.batch_shape) + (1, num_repeat, dim, dim))),\n num_repeat, axis=-4)\n cov = reshape.reshape(\n transpose.transpose(\n cov, axes=(\n tuple(range(len(self.batch_shape))) + (-4, -2, -3, -1))),\n self.batch_shape + (num_repeat * dim, num_repeat * dim))\n block_indicator = self.xp.reshape(\n self._block_indicator,\n tuple([1] * len(self.batch_shape)) + self._block_indicator.shape)\n return cov * block_indicator\n\n @property\n def entropy(self):\n return self._reduce(sum_mod.sum, self.distribution.entropy)\n\n def cdf(self, x):\n return self._reduce(prod.prod, self.distribution.cdf(x))\n\n def icdf(self, x):\n \"\"\"The inverse cumulative distribution function for multivariate variable.\n\n Cumulative distribution function for multivariate variable is not\n invertible. This function always raises :class:`RuntimeError`.\n\n Args:\n x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in\n the codomain of the distribution\n\n Raises:\n :class:`RuntimeError`\n \"\"\"\n\n raise RuntimeError(\n 'Cumulative distribution function for multivariate variable '\n 'is not invertible.')\n\n def log_cdf(self, x):\n return self._reduce(sum_mod.sum, self.distribution.log_cdf(x))\n\n def log_prob(self, x):\n return self._reduce(sum_mod.sum, self.distribution.log_prob(x))\n\n def log_survival_function(self, x):\n return self._reduce(\n sum_mod.sum, self.distribution.log_survival_function(x))\n\n @property\n def mean(self):\n return self.distribution.mean\n\n @property\n def mode(self):\n return self.distribution.mode\n\n @property\n def params(self):\n return self.distribution.params\n\n def perplexity(self, x):\n return self._reduce(prod.prod, self.distribution.perplexity(x))\n\n def prob(self, x):\n return self._reduce(prod.prod, self.distribution.prob(x))\n\n def sample_n(self, n):\n return self.distribution.sample_n(n)\n\n @property\n def stddev(self):\n return self.distribution.stddev\n\n @property\n def support(self):\n return self.distribution.support\n\n def survival_function(self, x):\n return self._reduce(prod.prod, self.distribution.survival_function(x))\n\n @property\n def variance(self):\n return self.distribution.variance\n\n @property\n def xp(self):\n return self.distribution.xp\n\n def _reduce(self, op, stat):\n range_ = tuple(range(-self.reinterpreted_batch_ndims, 0))\n return op(stat, axis=range_)\n\n def _get_default_reinterpreted_batch_ndims(self, distribution):\n ndims = len(distribution.batch_shape)\n return max(0, ndims - 1)\n\n @cache.cached_property\n def _block_indicator(self):\n num_repeat = array.size_of_shape(\n self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])\n dim = array.size_of_shape(self.distribution.event_shape)\n block_indicator = numpy.fromfunction(\n lambda i, j: i // dim == j // dim,\n (num_repeat * dim, num_repeat * dim)).astype(int)\n if self.xp is cuda.cupy:\n block_indicator = cuda.to_gpu(block_indicator)\n return block_indicator\n\n\[email protected]_kl(Independent, Independent)\ndef _kl_independent_independent(dist1, dist2):\n \"\"\"Computes Kullback-Leibler divergence for independent distributions.\n\n We can leverage the fact that\n .. math::\n \\\\mathrm{KL}(\n \\\\mathrm{Independent}(\\\\mathrm{dist1}) ||\n \\\\mathrm{Independent}(\\\\mathrm{dist2}))\n = \\\\mathrm{sum}(\\\\mathrm{KL}(\\\\mathrm{dist1} || \\\\mathrm{dist2}))\n where the sum is over the ``reinterpreted_batch_ndims``.\n\n Args:\n dist1 (:class:`~chainer.distribution.Independent`): Instance of\n `Independent`.\n dist2 (:class:`~chainer.distribution.Independent`): Instance of\n `Independent`.\n\n Returns:\n Batchwise ``KL(dist1 || dist2)``.\n\n Raises:\n :class:`ValueError`: If the event space for ``dist1`` and ``dist2``,\n or their underlying distributions don't match.\n \"\"\"\n\n p = dist1.distribution\n q = dist2.distribution\n\n # The KL between any two (non)-batched distributions is a scalar.\n # Given that the KL between two factored distributions is the sum, i.e.\n # KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute\n # KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.\n if dist1.event_shape == dist2.event_shape:\n if p.event_shape == q.event_shape:\n num_reduce_dims = len(dist1.event_shape) - len(p.event_shape)\n reduce_dims = tuple([-i - 1 for i in range(0, num_reduce_dims)])\n\n return sum_mod.sum(\n distribution.kl_divergence(p, q), axis=reduce_dims)\n else:\n raise NotImplementedError(\n 'KL between Independents with different '\n 'event shapes not supported.')\n else:\n raise ValueError('Event shapes do not match.')\n" ]
[ [ "numpy.asarray", "matplotlib.use", "matplotlib.pyplot.subplots" ], [ "numpy.empty" ], [ "numpy.fromfunction" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]