repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
maccam912/Oddyssey
[ "a9d1eca9ea1dfabd9873eb842eae03f2ed83d405" ]
[ "src/GameManager/gui/subscreen.py" ]
[ "import numpy as np\n\nclass SubScreen():\n def __init__(self, x, y, width, height, curses):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.curses = curses\n \n def put_char(self, x, y, char=' ', foreground='white', background='transparent'):\n if x < self.width and x >= self.x and y < self.height and y >= self.y:\n self.curses.put_char(self.x + x, self.y + y, char, foreground, background)\n else:\n raise ValueError('Error: Out of SubScreen boundary.')\n \n def put_message(self, x, y , message, foreground='white', background='transparent', auto=True, align='left'):\n self.curses.put_message(self.x + x, self.y + y , message, foreground, background, auto, align, box_x=self.x, box_y=self.y, box_width=self.width, box_height=self.height)\n \n def fill_char(self, char=' ', foreground='white', background='transparent'):\n for i in range(self.x, self.x + self.width):\n for j in range(self.y, self.y + self.height):\n self.curses.put_char(i, j, char, foreground, background) \n\nclass MessageScreen(SubScreen):\n def __init__(self, x, y, width, height, curses):\n super(MessageScreen, self).__init__(x, y, width, height, curses)\n self.initialization()\n \n def initialization(self):\n self.message_id = 1\n self.message_size = self.height\n self.message_storage = ['']*self.message_size\n self.color_storage = ['transparent']*self.message_size\n self.idx_storage = ['']*self.message_size\n \n def add_message(self, message, color='white'):\n idx = '[%d] '%(self.message_id)\n message = message\n self.message_id += 1\n self.message_storage.append(message)\n self.color_storage.append(color)\n self.idx_storage.append(idx)\n \n self.message_storage.pop(0)\n self.color_storage.pop(0)\n self.idx_storage.pop(0)\n \n def draw(self):\n self.fill_char()\n for i in range(len(self.message_storage)):\n self.put_message(0, i, self.idx_storage[i], foreground='white', background='transparent', auto=True, align='left')\n self.put_message(len(self.idx_storage[i]), i , self.message_storage[i], foreground=self.color_storage[i], background='transparent', auto=True, align='left')\n\nclass PlayerInfoScreen(SubScreen):\n def __init__(self, x, y, width, height, curses, player):\n super(PlayerInfoScreen, self).__init__(x, y, width, height, curses)\n self.player = player\n self.initialization()\n \n def initialization(self):\n self.full_health_bar_length = 15\n self.draw()\n \n def draw(self):\n # Draw background\n self.fill_char(char='█', foreground='peru', background='transparent')\n # Draw HP bar\n health = self.player.current_health\n interval = self.player.health / self.full_health_bar_length / 3\n level = int(np.ceil(health / interval))\n \n health_title = 'HP '\n \n if level % 3 == 0:\n remainder = ''\n elif level % 3 == 1:\n remainder = '░'\n elif level % 3 == 2: \n remainder = '▒'\n \n health_message = '█' * int((level - level%3)/3) + remainder\n self.put_message(0, 0, health_title, foreground='red', background='peru', auto=True, align='left')\n self.put_message(len(health_title), 0, ' '*self.full_health_bar_length, foreground='red', background='transparent', auto=True, align='left')\n self.put_message(len(health_title), 0, health_message, foreground='red', background='transparent', auto=True, align='left')\n " ]
[ [ "numpy.ceil" ] ]
davidliyutong/Flint
[ "4e2552dac8d781c21e8998ad68bbf1b986b09258" ]
[ "test/test43_tf_official.py" ]
[ "from models import Linear3\nfrom core.Optimizers import sgd, bgd\nfrom core.Functions import one_hot_f\nimport numpy as np\nfrom tensorflow import keras\nfrom core.Dataloader import batch_iterator\n\n\ndef test(model, test_inputs, test_labels):\n num_of_sample = test_inputs.shape[0]\n cnt_correct, cnt_tot = 0, 0\n for i in range(num_of_sample):\n test_input = test_inputs[i:i + 1]\n test_label = test_labels[i]\n res = model.forward_prop(test_input)\n if np.argmax(res) == np.argmax(test_label):\n cnt_correct += 1\n cnt_tot += 1\n\n return cnt_correct / cnt_tot\n\n\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\ntrain_images = np.expand_dims(train_images / 255, axis=-1)\ntest_images = np.expand_dims(test_images / 255, axis=-1)\ntrain_labels = one_hot_f(train_labels, num_classes=10)\ntest_labels = one_hot_f(test_labels, num_classes=10)\n\nLinear3.compile()\nLinear3.cuda()\ntrain_iterator = batch_iterator(batch_sz=256)\noptimizer = bgd(0.01)\noptimizer.fit(Linear3, train_images, train_labels, train_iterator, epoch=50)\nLinear3.save('Linear3_cuda')\n" ]
[ [ "numpy.expand_dims", "numpy.argmax" ] ]
vishalbelsare/neupy
[ "684313cdaddcad326f2169384fb15ec3aa29d991" ]
[ "tests/layers/test_reshape_layer.py" ]
[ "import numpy as np\n\nfrom neupy import layers\n\nfrom base import BaseTestCase\n\n\nclass ReshapeLayerTestCase(BaseTestCase):\n def test_reshape_layer_1d_shape(self):\n x = np.random.random((5, 4, 3, 2, 1))\n\n input_layer = layers.Input((4, 3, 2, 1))\n reshape_layer = layers.Reshape()\n input_layer > reshape_layer\n\n y = reshape_layer.output(x).eval()\n self.assertEqual(y.shape, (5, 4 * 3 * 2 * 1))\n\n def test_reshape_layer_2d_shape(self):\n x = np.random.random((5, 20))\n\n input_layer = layers.Input(20)\n reshape_layer = layers.Reshape((4, 5))\n input_layer > reshape_layer\n\n y = reshape_layer.output(x).eval()\n self.assertEqual(y.shape, (5, 4, 5))\n" ]
[ [ "numpy.random.random" ] ]
baender/gimli
[ "eb9a2204669cf11209b9577472f61ac70217a191" ]
[ "pygimli/physics/traveltime/raplot.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\"WRITEME\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pygimli as pg\n\nfrom pygimli.viewer.mpl import createColorBar # , updateColorBar\n\nfrom .ratools import shotReceiverDistances\n\n\ndef drawTravelTimeData(ax, data, t=None):\n \"\"\"\n Draw first arrival traveltime data into mpl ax a.\n data of type \\ref DataContainer must contain sensorIdx 's' and 'g'\n and thus being numbered internally [0..n)\n \"\"\"\n x = pg.x(data.sensorPositions())\n # z = pg.z(data.sensorPositions())\n\n shots = pg.unique(pg.sort(data('s')))\n geoph = pg.unique(pg.sort(data('g')))\n\n startOffsetIDX = 0\n\n if min(min(shots), min(geoph)) == 1:\n startOffsetIDX = 1\n\n tShow = data('t')\n if t is not None:\n tShow = t\n\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim([max(tShow), -0.002])\n ax.figure.show()\n\n for shot in shots:\n gIdx = pg.find(data('s') == shot)\n sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[gIdx]]\n ax.plot(x[sensorIdx], tShow[gIdx], 'x-')\n\n yPixel = ax.transData.inverted().transform_point((1, 1))[1] - \\\n ax.transData.inverted().transform_point((0, 0))[1]\n xPixel = ax.transData.inverted().transform_point((1, 1))[0] - \\\n ax.transData.inverted().transform_point((0, 0))[0]\n\n # draw shot points\n ax.plot(x[[int(i__ - startOffsetIDX) for i__ in shots]],\n np.zeros(len(shots)) + 8. * yPixel, 'gv', markersize=8)\n\n # draw geophone points\n ax.plot(x[[int(i__ - startOffsetIDX) for i__ in geoph]],\n np.zeros(len(geoph)) + 3. * yPixel, 'r^', markersize=8)\n\n ax.grid()\n ax.set_ylim([max(tShow), +16. * yPixel])\n ax.set_xlim([min(x) - 5. * xPixel, max(x) + 5. * xPixel])\n\n ax.set_xlabel('x-Coordinate [m]')\n ax.set_ylabel('Traveltime [ms]')\n\n\ndef plotFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):\n \"\"\"Naming convention. drawFOO(ax, ... )\"\"\"\n pg.deprecated(\"use drawFirstPicks\")\n return drawFirstPicks(ax=ax, data=data, tt=tt, plotva=plotva,\n marker=marker)\n\n\ndef drawFirstPicks(ax, data, tt=None, plotva=False, marker='x-'):\n \"\"\"plot first arrivals as lines\"\"\"\n px = pg.x(data)\n gx = np.array([px[int(g)] for g in data(\"g\")])\n sx = np.array([px[int(s)] for s in data(\"s\")])\n if tt is None:\n tt = np.array(data(\"t\"))\n if plotva:\n tt = np.absolute(gx - sx) / tt\n\n uns = np.unique(sx)\n\n cols = plt.cm.tab10(np.arange(10))\n\n for i, si in enumerate(uns):\n ti = tt[sx == si]\n gi = gx[sx == si]\n ii = gi.argsort()\n ax.plot(gi[ii], ti[ii], marker, color=cols[i % 10])\n ax.plot(si, 0., 's', color=cols[i % 10], markersize=8)\n\n ax.grid(True)\n if plotva:\n ax.set_ylabel(\"Apparent velocity (m/s)\")\n else:\n ax.set_ylabel(\"Traveltime (s)\")\n ax.set_xlabel(\"x (m)\")\n ax.invert_yaxis()\n\n\ndef _getOffset(data, full=False):\n \"\"\"Return vector of offsets (in m) between shot and receiver.\"\"\"\n pg.deprecated('use shotReceiverDistances') # 190429 ??\n return shotReceiverDistances(data, full)\n\n\ndef showVA(data, usePos=True, ax=None, **kwargs):\n \"\"\"Show apparent velocity as image plot\n\n Parameters\n ----------\n data : pg.DataContainer()\n Datacontainer with 's' and 'g' Sensorindieces and 't' traveltimes.\n \"\"\"\n ax, _ = pg.show(ax=ax)\n gci = drawVA(ax, data=data, usePos=usePos, **kwargs)\n\n cBar = createColorBar(gci, **kwargs)\n\n return gci, cBar\n\n\ndef drawVA(ax, data, vals=None, usePos=True, pseudosection=False, **kwargs):\n \"\"\"Draw apparent velocities as matrix into ax\n\n Parameters\n ----------\n ax : mpl.Axes\n\n data : pg.DataContainer()\n Datacontainer with 's' and 'g' Sensorindieces and 't' traveltimes.\n\n usePos: bool [True]\n Use sensor positions for axes tick labels\n\n pseudosection : bool [False]\n Show in pseudosection style.\n\n vals : iterable\n Traveltimes, if None data need to contain 't' values.\n \"\"\"\n if isinstance(vals, str):\n vals = data(vals)\n \n if vals is None:\n vals = data('t')\n\n px = pg.x(data)\n gx = np.asarray([px[g] for g in data.id(\"g\")])\n sx = np.asarray([px[s] for s in data.id(\"s\")])\n\n offset = shotReceiverDistances(data, full=True)\n\n if min(vals) < 1e-10:\n print(vals)\n pg.error('zero traveltimes found.')\n va = offset / vals\n\n if pseudosection:\n midpoint = (gx + sx) / 2\n gci = pg.viewer.mpl.dataview.drawVecMatrix(ax, midpoint, offset, va,\n queeze=True,\n label=pg.unit('as'))\n else:\n gci = pg.viewer.mpl.dataview.drawVecMatrix(ax, gx, sx, va,\n squeeze=True,\n label=pg.unit('as'))\n\n # A = np.ones((data.sensorCount(), data.sensorCount())) * np.nan\n # for i in range(data.size()):\n # A[int(data('s')[i]), int(data('g')[i])] = va[i]\n # gci = ax.imshow(A, interpolation='nearest')\n # ax.grid(True)\n\n if usePos:\n xt = np.arange(0, data.sensorCount(), 50)\n ax.set_xticks(xt)\n ax.set_xticklabels([str(int(px[xti])) for xti in xt])\n ax.set_yticks(xt)\n ax.set_yticklabels([str(int(px[xti])) for xti in xt])\n\n return gci\n\n\ndef plotLines(ax, line_filename, step=1):\n xz = np.loadtxt(line_filename)\n n_points = xz.shape[0]\n if step == 2:\n for i in range(0, n_points, step):\n x = xz[i:i + step, 0]\n z = xz[i:i + step, 1]\n ax.plot(x, z, 'k-')\n if step == 1:\n ax.plot(xz[:, 0], xz[:, 1], 'k-')\n" ]
[ [ "numpy.arange", "numpy.unique", "numpy.loadtxt", "numpy.absolute" ] ]
saraswat/TensorLog
[ "c56cebfa33b5123d5340a7b429e333da09d223d8" ]
[ "tensorlog/testxcomp.py" ]
[ "# (C) William W. Cohen and Carnegie Mellon University, 2017\n\nimport logging\nimport numpy as np\nimport os\nimport unittest\nimport sys\nimport collections\nimport tempfile\n\nfrom tensorlog import xctargets\n\nif xctargets.tf:\n import tensorflow as tf\n from tensorlog import tensorflowxcomp\nelse: \n tensorflowxcomp=None\nif xctargets.theano:\n import theano\n from tensorlog import theanoxcomp\nelse:\n theanoxcomp=None\n\nfrom tensorlog import bpcompiler\nfrom tensorlog import comline\nfrom tensorlog import dataset\nfrom tensorlog import declare\nfrom tensorlog import matrixdb\nfrom tensorlog import learn\nfrom tensorlog import mutil\nfrom tensorlog import parser\nfrom tensorlog import program\nfrom tensorlog import simple\nfrom tensorlog import testtensorlog\nfrom tensorlog import funs\nfrom tensorlog import ops\nfrom tensorlog import learnxcomp as learnxc\nfrom tensorlog.expt import Expt\n\nif xctargets.tf:\n tf.logging.set_verbosity(tf.logging.WARN)\n \nTESTED_COMPILERS = []\nTESTED_LEARNERS = {}\nif xctargets.theano:\n for c in [\n theanoxcomp.DenseMatDenseMsgCrossCompiler,\n theanoxcomp.SparseMatDenseMsgCrossCompiler\n ]:\n TESTED_COMPILERS.append(c)\n TESTED_LEARNERS[c]=theanoxcomp.FixedRateGDLearner\nif xctargets.tf:\n for c in [\n tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler,\n ]:\n TESTED_COMPILERS.append(c)\n TESTED_LEARNERS[c]=tensorflowxcomp.FixedRateGDLearner\n \nRUN_OLD_INFERENCE_TESTS = False\nSAVE_SUMMARIES = False\n\ndef close_cross_compiler(xc):\n xc.close()\n if xctargets.tf and isinstance(xc,tensorflowxcomp.TensorFlowCrossCompiler):\n tf.reset_default_graph()\n\n\nclass TestXCSmallProofs(testtensorlog.TestSmallProofs):\n\n def test_if(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william', {'susan':1.0})\n\n def test_failure(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).'], 'p(i,o)', 'lottie', {matrixdb.NULL_ENTITY_NAME:1.0})\n\n def test_reverse_if(self):\n self.xcomp_check(['p(X,Y):-sister(Y,X).'], 'p(i,o)', 'rachel', {'william':1.0})\n\n def test_or(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y).', 'p(X,Y):-sister(X,Y).'], 'p(i,o)', 'william',\n {'susan':1.0, 'rachel':1.0, 'lottie':1.0, 'sarah':1.0})\n\n def test_chain(self):\n self.xcomp_check(['p(X,Z):-spouse(X,Y),sister(Y,Z).'], 'p(i,o)', 'susan',\n {'rachel':1.0, 'lottie':1.0, 'sarah':1.0})\n self.xcomp_check(['p(X,Z):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',\n {'charlotte':1.0, 'lucas':1.0, 'poppy':1.0, 'caroline':1.0, 'elizabeth':1.0})\n\n def test_mid(self):\n self.xcomp_check(['p(X,Y):-sister(X,Y),child(Y,Z).'], 'p(i,o)', 'william',\n {'sarah': 1.0, 'rachel': 2.0, 'lottie': 2.0})\n\n def test_nest(self):\n self.xcomp_check(['s(X,Y):-spouse(X,Y).','t(X,Z):-spouse(X,Y),s(Y,Z).'], 't(i,o)', 'susan', {'susan': 1.0})\n\n def test_back1(self):\n # fails for tensorflowxcomp\n self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z).'], 'p(i,o)', 'william', {'susan': 3.0})\n\n def test_back2(self):\n self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z1),sister(X,Z2).'],'p(i,o)','william',{'susan': 9.0})\n\n def test_rec1(self):\n program.DEFAULT_MAXDEPTH=4\n self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 5.0})\n program.DEFAULT_MAXDEPTH=10\n self.xcomp_check(['p(X,Y):-spouse(X,Y).','p(X,Y):-p(Y,X).'], 'p(i,o)','william',{'susan': 11.0})\n\n def test_const_output(self):\n self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'sarah', {'william': 1.0})\n self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'], 'sis(i,o)', 'lottie', {'william': 2.0})\n\n def test_const_chain1(self):\n self.xcomp_check(['p(X,S) :- assign(S,susan),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})\n\n def test_const_chain2(self):\n self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','sarah',{'pos':1.0})\n self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'],'p(i,o)','lottie',{'pos':2.0})\n\n def test_alt_chain(self):\n self.xcomp_check(['p(X,W) :- spouse(X,W),sister(X,Y),child(Y,Z).'],'p(i,o)','william',{'susan': 5.0})\n pass\n\n def test_proppr1(self):\n w = 7*self.db.onehot('r1')+3*self.db.onehot('r2')\n self.proppr_xcomp_check(w,['p(X,Y):-sister(X,Y) {r1}.','p(X,Y):-spouse(X,Y) {r2}.'],'p(i,o)',\n 'william', {'sarah': 7.0, 'rachel': 7.0, 'lottie': 7.0, 'susan': 3.0})\n\n def test_proppr2(self):\n w = 3*self.db.onehot('r2')\n self.proppr_xcomp_check(w,['p(X,Y):-spouse(Y,X) {r2}.'],'p(i,o)',\n 'susan', {'william': 3.0})\n\n def test_reuse1(self):\n self.xcomp_check(['p(X,Y) :- r(X,Z),r(Z,Y).', 'r(X,Y):-spouse(X,Y).'], 'p(i,o)', 'william',\n {'william':1.0})\n\n def _removeZeros(self, sdict):\n if True: return sdict\n e = sdict[None]\n ret = dict([ (k,v-e) for (k,v) in list(sdict.items()) if v != e])\n z = sum(ret.values())\n for k in ret: ret[k] = ret[k]/z\n return ret\n\n def xcomp_check(self,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):\n self._xcomp_check('vanilla',None,ruleStrings,mode_string,input_symbol,expected_result_dict,compare)\n\n def proppr_xcomp_check(self,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict):\n self._xcomp_check('proppr',weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)\n\n def _xcomp_check(self,progType,weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict,compare=False):\n # run the base class check to see that the inference is correct\n if RUN_OLD_INFERENCE_TESTS:\n if progType=='proppr':\n self.proppr_inference_check(weightVec,ruleStrings,mode_string,input_symbol,expected_result_dict)\n else:\n self.inference_check(ruleStrings,mode_string,input_symbol,expected_result_dict)\n # setup the next round of tests by compiling a tensorlog\n # Program - this code is lifted from the testtensorlog\n # inference routines\n print('xcomp inference for mode',mode_string,'on input',input_symbol)\n testtensorlog.softmax_normalize(expected_result_dict)\n rules = parser.RuleCollection()\n for r in ruleStrings:\n rules.add(parser.Parser().parseRule(r))\n if progType=='proppr':\n prog = program.ProPPRProgram(db=self.db,rules=rules,weights=weightVec)\n else:\n prog = program.Program(db=self.db,rules=rules)\n for compilerClass in TESTED_COMPILERS:\n #cross-compile the function\n xc = compilerClass(prog)\n # evaluate the function and get the output y\n #xc.show()\n print('== performing eval with',compilerClass,'==')\n inferenceFun = xc.inferenceFunction(mode_string)\n y = inferenceFun(prog.db.onehot(input_symbol))\n # print 'input',xc.getInputName(mode_string),'args,fun\n # =',xc.inference(mode_string) theano output will a be (probably\n # dense) message, so just compare and check that the maximal\n # elements from these two dicts are the same\n actual_result_dict = self.db.rowAsSymbolDict(y)\n self.check_maxes_in_dicts(actual_result_dict, expected_result_dict)\n # check it's normalized\n l1_error = abs(sum(actual_result_dict.values()) - 1.0)\n #print 'l1_error',l1_error,'actual_result_dict',actual_result_dict,'expected_result_dict',expected_result_dict\n self.assertTrue( l1_error < 0.0001)\n # also test proofCountFun\n proofCountFun = xc.proofCountFunction(mode_string)\n pc = proofCountFun(prog.db.onehot(input_symbol))\n # theano output will a be (probably dense) message, so\n # just compare that maximal elements from these two dicts\n # are the same\n pc_result_dict = self.db.rowAsSymbolDict(pc)\n if len(pc_result_dict)>0:\n self.check_maxes_in_dicts(pc_result_dict, expected_result_dict)\n print('== eval checks passed ==')\n close_cross_compiler(xc)\n\n def check_maxes_in_dicts(self,actual,expected):\n def maximalElements(d):\n m = max(d.values())\n return set(k for k in d if d[k]==m)\n actualMaxes = maximalElements(actual)\n expectedMaxes = maximalElements(expected)\n print('actual',actualMaxes,'expected',expectedMaxes)\n for a in actualMaxes:\n self.assertTrue(a in expectedMaxes)\n for a in expectedMaxes:\n self.assertTrue(a in actualMaxes)\n\n\nclass TestXCGrad(testtensorlog.TestGrad):\n\n def setUp(self):\n self.db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'fam.cfacts'))\n\n def test_if(self):\n rules = ['p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_if2(self):\n rules = ['p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah']), ('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie']), ('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_reverse_if(self):\n rules = ['p(X,Y):-parent(Y,X).']\n mode = 'p(i,o)'\n params = [('parent',2)]\n self.xgrad_check(rules, mode, params,\n [('lottie',['charlotte'])],\n {'parent(charlotte,lottie)': +1,'parent(lucas,lottie)': -1})\n\n def test_chain1(self):\n rules = ['p(X,Z):-sister(X,Y),child(Y,Z).']\n mode = 'p(i,o)'\n self.xgrad_check(rules,mode,\n [('sister',2)],\n [('william',['caroline','elizabeth'])],\n {'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules,mode,\n [('child',2)],\n [('william',['caroline','elizabeth'])],\n {'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1})\n self.xgrad_check(rules,mode,\n [('child',2),('sister',2)],\n [('william',['caroline','elizabeth'])],\n {'child(rachel,elizabeth)': +1,'child(lottie,lucas)': -1, 'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n\n def test_chain2(self):\n rules = ['p(X,Z):-spouse(X,Y),sister(Y,Z).']\n mode = 'p(i,o)'\n self.xgrad_check(rules,mode,\n [('sister',2)],\n [('susan',['rachel'])],\n {'sister(william,rachel)': +1,'sister(william,lottie)': -1})\n\n\n def test_call1(self):\n rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-q(Z,W).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_call2(self):\n rules = ['q(X,Y):-sister(X,Y).','p(Z,W):-r(Z,W).','r(Z,W):-q(Z,W).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': +1,'sister(william,lottie)': -1})\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,rachel)': -1,'sister(william,lottie)': +1})\n\n def test_split(self):\n rules = ['p(X,Y):-sister(X,Y),child(Y,Z),young(Z).']\n mode = 'p(i,o)'\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'child(lottie,lucas)': +1,'child(lottie,charlotte)': +1,'child(sarah,poppy)': -1})\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['lottie'])],\n {'sister(william,lottie)': +1,'sister(william,sarah)': -1})\n\n def test_or(self):\n rules = ['p(X,Y):-child(X,Y).', 'p(X,Y):-sister(X,Y).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': -1,'sister(william,lottie)': -1})\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1})\n params = [('child',2),('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['charlie','rachel'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1,'sister(william,rachel)': +1,'sister(william,sarah)': -1})\n\n\n def test_weighted_vec(self):\n rules = ['p(X,Y):-sister(X,Y),assign(R,r1),feat(R).','p(X,Y):-child(X,Y),assign(R,r2),feat(R).']\n mode = 'p(i,o)'\n params = [('sister',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','charlie'])],\n {'sister(william,rachel)': +1,'sister(william,sarah)': -1})\n params = [('child',2)]\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','charlie'])],\n {'child(william,charlie)': +1,'child(william,josh)': -1})\n params = [('feat',1)]\n self.xgrad_check(rules, mode, params,\n [('william',['josh','charlie'])],\n {'feat(r1)': -1,'feat(r2)': +1})\n self.xgrad_check(rules, mode, params,\n [('william',['rachel','sarah','lottie'])],\n {'feat(r1)': +1,'feat(r2)': -1})\n\n def learnxc_check(self,rule_strings,mode_string,params,xyPairs,expected):\n print(\"XLearner loss/grad eval\")\n rules = testtensorlog.rules_from_strings(rule_strings)\n prog = program.Program(db=self.db,rules=rules)\n mode = declare.ModeDeclaration(mode_string)\n prog.db.clearParameterMarkings()\n for (functor,arity) in params:\n prog.db.markAsParameter(functor,arity)\n # TODO: not working yet for mini-batches so check each example\n # individually\n for x,ys in xyPairs:\n data = testtensorlog.DataBuffer(self.db)\n data.add_data_symbols(x,ys)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(prog)\n print('learner check for compiler',xc.__class__)\n learner = learnxc.XLearner(prog,xc)\n paramsWithUpdates = learner.crossEntropyGrad(mode,data.get_x(),data.get_y())\n updates_with_string_keys = {}\n for (functor,arity),up in paramsWithUpdates:\n print('testxcomp update for',functor,arity,'is',up)\n upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)\n print('upDict',upDict)\n for fact,grad_of_fact in list(upDict.items()):\n # need to flip for cross-compilers\n updates_with_string_keys[str(fact)] = -grad_of_fact\n self.check_directions(updates_with_string_keys,expected)\n \n\n def xgrad_check(self,rule_strings,mode_string,params,xyPairs,expected):\n print(\"direct loss/grad eval\")\n rules = testtensorlog.rules_from_strings(rule_strings)\n prog = program.Program(db=self.db,rules=rules)\n prog.db.clearParameterMarkings()\n for (functor,arity) in params:\n prog.db.markAsParameter(functor,arity)\n for x,ys in xyPairs:\n data = testtensorlog.DataBuffer(self.db)\n data.add_data_symbols(x,ys)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(prog)\n print('grad check for compiler',xc.__class__)\n gradFun = xc.dataLossGradFunction(mode_string)\n updates_with_string_keys = {}\n paramsWithUpdates = gradFun(data.get_x(),data.get_y())\n for (functor,arity),up in paramsWithUpdates:\n upDict = prog.db.matrixAsPredicateFacts(functor,arity,up)\n for fact,grad_of_fact in list(upDict.items()):\n # need to flip for cross-compilers\n updates_with_string_keys[str(fact)] = -grad_of_fact\n self.check_directions(updates_with_string_keys,expected)\n self.learnxc_check(rule_strings,mode_string,params,xyPairs,expected)\n close_cross_compiler(xc)\n\nclass TestXCProPPR(testtensorlog.TestProPPR):\n\n def setUp(self):\n super(TestXCProPPR,self).setUp()\n \n def debug(self):\n return self\n\n def evalxc(self,xc,input):\n inferenceFun = xc.inferenceFunction('predict/io')\n print(inferenceFun)\n rawPred = inferenceFun(input)\n # trim small numbers to zero\n pred = mutil.mapData(lambda d:np.clip((d - 1e-5),0.00,9999.99), rawPred)\n pred.eliminate_zeros()\n return pred\n\n def testNativeRow(self):\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n for i in range(self.numExamples):\n pred = self.evalxc(xc, self.X.getrow(i))\n d = self.prog.db.rowAsSymbolDict(pred)\n uniform = {'pos':0.5,'neg':0.5}\n self.check_dicts(d,uniform)\n close_cross_compiler(xc)\n\n def testNativeMatrix(self):\n\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n xc.ensureCompiled(self.mode,inputs=None)\n pred = self.prog.eval(self.mode,[self.X])\n d0 = self.prog.db.matrixAsSymbolDict(pred)\n for i,d in list(d0.items()):\n uniform = {'pos':0.5,'neg':0.5,}\n self.check_dicts(d,uniform)\n close_cross_compiler(xc)\n\n def testGradVector(self):\n data = testtensorlog.DataBuffer(self.prog.db)\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n learner = learn.OnePredFixedRateGDLearner(self.prog)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n #xc.compile(self.mode)\n gradFun = xc.dataLossGradFunction('predict/io')\n for i in range(X.shape[0]):\n print(\"example\",i)\n \n updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X[i],Y[i])\n w0 = updates[('weighted',1)].sum(axis=0)\n print(w0)\n \n updates = gradFun(X[i],Y[i])\n paramKey,w = updates[0]\n print(w)\n # w is different from the w in the corresponding testtensorlog test,\n # which is a crossEntropy gradient for each example, but it should have\n # opposite directions\n nrow,ncol = w.shape\n for i in range(nrow):\n for j in range(ncol):\n self.assertTrue((w[i,j]==0) == (w0[i,j]==0))\n self.assertTrue(w[i,j] * w0[i,j] <= 0)\n\n def testGradMatrix(self):\n data = testtensorlog.DataBuffer(self.prog.db)\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n learner = learn.OnePredFixedRateGDLearner(self.prog)\n updates = learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)'),X,Y)\n w0 = updates[('weighted',1)].sum(axis=0)\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n #xc.compile(self.mode)\n gradFun = xc.dataLossGradFunction('predict/io')\n updates = gradFun(X,Y)\n paramKey,w = updates[0]\n # w is different from the w in the corresponding testtensorlog test,\n # which is a crossEntropy gradient for each example, but it should have\n # opposite directions\n nrow,ncol = w.shape\n for i in range(nrow):\n for j in range(ncol):\n self.assertTrue((w[i,j]==0) == (w0[i,j]==0),\"i=%d,j=%d,w=%g,w0=%g\"%(i,j,w[i,j],w0[i,j]))\n self.assertTrue(w[i,j] * w0[i,j] <= 0.0,\"i=%d,j=%d,w=%g,w0=%g\"%(i,j,w[i,j],w0[i,j]))\n close_cross_compiler(xc)\n\n def testMultiLearn1(self):\n pass\n\n def testLearn(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n modestr = 'predict/io'\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n for compilerClass in TESTED_COMPILERS:\n self.prog.setRuleWeights()\n self.prog.setFeatureWeights()\n if SAVE_SUMMARIES:\n xc = compilerClass(self.prog,compilerClass.__name__+\".summary\")\n else:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n \n v = self.prog.db.getParameter('weighted',1)\n d = self.prog.db.rowAsSymbolDict(v)\n # sanity check a couple of values\n self.assertTrue(d['little_pos'] == d['little_neg'])\n self.assertTrue(d['big_pos'] == d['big_neg'])\n \n# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n\n lossFun = xc.dataLossFunction('predict/io')\n loss0 = lossFun(X,Y)\n print('initial train data loss',loss0)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n loss1 = lossFun(TX,TY)\n print('initial test data loss',loss1)\n P = learner.predict('predict/io',X)\n #acc0 = xc.accuracy('predict/io',X,Y)\n acc0 = learner.accuracy(Y,P)\n print('initial train accuracy',acc0)\n TP = learner.predict('predict/io',TX)\n #acc1 = xc.accuracy('predict/io',TX,TY)\n acc1 = learner.accuracy(TY,TP)\n print('initial test accuracy',acc1)\n\n print('params to optimize',xc.prog.getParamList())\n print('vars to optimize',xc.getParamVariables('predict/io'))\n \n# xc.optimizeDataLoss('predict/io', optimizer, X, Y, epochs=20)\n learner.trainMode('predict/io',X,Y)\n\n loss2 = lossFun(X,Y)\n print('final train data loss',loss2)\n loss3 = lossFun(TX,TY)\n print('final test data loss',loss3)\n P2 = learner.predict('predict/io',X)\n #acc2 = xc.accuracy('predict/io',X,Y)\n acc2 = learner.accuracy(Y,P2)\n print('final train accuracy',acc2)\n TP2 = learner.predict('predict/io',TX)\n #acc3 = xc.accuracy('predict/io',TX,TY)\n acc3 = learner.accuracy(TY,TP2)\n print('final test accuracy',acc3)\n\n\n xc.exportAllLearnedParams()\n v = self.prog.db.getParameter('weighted',1)\n d = self.prog.db.rowAsSymbolDict(v)\n # sanity check a couple of values\n self.assertTrue(d['little_pos'] > d['little_neg'])\n self.assertTrue(d['big_pos'] < d['big_neg'])\n close_cross_compiler(xc)\n\n self.assertTrue(acc2>=acc0)\n self.assertTrue(acc3>=acc1)\n\n self.assertTrue(loss2<loss0)\n self.assertTrue(loss2<loss1)\n \n self.assertTrue(acc2>=0.9)\n self.assertTrue(acc2==1.0)\n \n def testDatasetPredict(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n modestr = 'predict/io'\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n for compilerClass in TESTED_COMPILERS:\n self.prog.setRuleWeights()\n self.prog.setFeatureWeights()\n if SAVE_SUMMARIES:\n xc = compilerClass(self.prog,compilerClass.__name__+\".summary\")\n else:\n xc = compilerClass(self.prog)\n self.prog.db.markAsParameter('weighted',1)\n \n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n P = learner.predict(mode,X)\n print(\"X\",X.shape)\n print(\"P\",P.shape)\n self.assertTrue(X.shape==P.shape)\n P = learner.datasetPredict(dataset.Dataset({mode:X},{mode:Y}))\n print(\"X\",X.shape)\n print(\"P\",P.getX(mode).shape)\n self.assertTrue(X.shape==P.getX(mode).shape)\n \n return xc,learner,X,Y,P\n\n def testExptScaffold(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n self.prog.setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(self.prog)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc=xc,rate=0.1,epochs=20)\n Expt({'prog':self.prog,\n 'trainData':dataset.Dataset({mode:X},{mode:Y}),\n 'testData':dataset.Dataset({mode:TX},{mode:TY}),\n 'targetMode':mode,\n 'learner':learner\n }).run()\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testExpt(self):\n mode = declare.ModeDeclaration('predict(i,o)')\n X,Y = testtensorlog.matrixAsTrainingData(self.labeledData,'train',2)\n TX,TY = testtensorlog.matrixAsTrainingData(self.labeledData,'test',2)\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(self.prog)\n xc.runExpt(\n prog=self.prog,\n trainData=dataset.Dataset({mode:X},{mode:Y}),\n testData=dataset.Dataset({mode:TX},{mode:TY}),\n targetMode=mode)\n close_cross_compiler(xc)\n\nclass TestXCOpGen(unittest.TestCase):\n\n # TODO tests for other xcompilers?\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyTypes(self):\n matrixdb.conf.ignore_types = False\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n mode = list(trainData.keys())[0]\n docs,labels = trainData[mode]\n xc = tlog.get_cross_compiler()\n ops = xc.possibleOps(docs,'doc')\n print('doc ops',ops)\n self.assertTrue(len(ops)==1)\n (words,wordType) = ops[0]\n self.assertTrue(wordType=='word')\n ops = xc.possibleOps(words,'word')\n self.assertTrue(len(ops)==3)\n pairs = None\n for (expr,exprType) in ops:\n if exprType=='labelWordPair':\n pairs = expr\n break\n self.assertTrue(pairs is not None)\n ops = xc.possibleOps(pairs,'labelWordPair')\n self.assertTrue(len(ops)==2)\n for (expr,exprType) in ops:\n self.assertTrue(exprType=='word')\n close_cross_compiler(xc)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyIgnoringTypes(self):\n matrixdb.conf.ignore_types = True\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n mode = list(trainData.keys())[0]\n docs,labels = trainData[mode]\n xc = tlog.get_cross_compiler()\n ops = xc.possibleOps(docs)\n binary_predicates = [functor for (functor,arity) in tlog.db.matEncoding if arity==2]\n self.assertTrue(len(ops) == len(binary_predicates)*2)\n for x in ops:\n # ops should just be tensors\n self.assertFalse(isinstance(x,tuple))\n close_cross_compiler(xc)\n\nclass TestXCExpt(unittest.TestCase):\n\n\n def testTCToyTypes_wscaffold(self):\n matrixdb.conf.ignore_types = False\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n \n optdict['prog'].setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(optdict['prog'])\n learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)\n Expt({\n 'prog':optdict['prog'],\n 'trainData':optdict['trainData'],\n 'testData':optdict['testData'],\n 'learner':learner,\n 'targetMode':declare.asMode(\"predict/io\")\n }).run()\n pbDoc = xc.db.onehot('pb','doc')\n self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})\n # some checks on the output of pprint\n lines = xc.pprint('predict/io')\n self.assertTrue(lines[0].find(\"SoftMaxFunction\") >= 0)\n self.assertTrue(lines[1].find(\"SumFunction\") >= 0)\n self.assertEqual(len(lines), 16)\n # some checks on misc xcomp API\n self.assertEqual(xc.inferenceOutputType('predict/io'),'label')\n pbId = xc.asSymbolId('pb',typeName='doc')\n pbSym = xc.asSymbol(pbId,typeName='doc')\n self.assertEqual(pbSym,'pb')\n self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyTypes(self):\n matrixdb.conf.ignore_types = False\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(optdict['prog'])\n xc.runExpt(\n prog=optdict['prog'],\n trainData=optdict['trainData'],\n testData=optdict['testData'],\n targetMode=declare.asMode(\"predict/io\"))\n\n # check trainability\n for (functor,arity) in xc.db.matEncoding:\n v = xc.parameterFromDBToVariable(functor,arity)\n if v is not None:\n vIsTrainable = (v in tf.trainable_variables())\n vIsParameter = ((functor,arity) in xc.db.paramSet)\n self.assertEqual(vIsTrainable,vIsParameter)\n\n pbDoc = xc.db.onehot('pb','doc')\n self.checkXC(xc,'predict/io',pbDoc,{'negPair':115,'posPair':115,'hasWord':59,'weighted':115,'label':5})\n # some checks on the output of pprint\n lines = xc.pprint('predict/io')\n self.assertTrue(lines[0].find(\"SoftMaxFunction\") >= 0)\n self.assertTrue(lines[1].find(\"SumFunction\") >= 0)\n self.assertEqual(len(lines), 16)\n # some checks on misc xcomp API\n self.assertEqual(xc.inferenceOutputType('predict/io'),'label')\n pbId = xc.asSymbolId('pb',typeName='doc')\n pbSym = xc.asSymbol(pbId,typeName='doc')\n self.assertEqual(pbSym,'pb')\n self.assertEqual(xc.asSymbolId('this does not appear in the data',typeName='doc'), -1)\n close_cross_compiler(xc)\n\n\n def testTCToyIgnoringTypes_wscaffold(self):\n matrixdb.conf.ignore_types = True\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n optdict['prog'].setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n xc = compilerClass(optdict['prog'])\n learner = TESTED_LEARNERS[compilerClass](optdict['prog'],xc)\n Expt({\n 'prog':optdict['prog'],\n 'trainData':optdict['trainData'],\n 'testData':optdict['testData'],\n 'learner':learner,\n 'targetMode':declare.asMode(\"predict/io\")\n }).run()\n pbDoc = xc.db.onehot('pb')\n self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testTCToyIgnoringTypes(self):\n matrixdb.conf.ignore_types = True\n optdict,args = comline.parseCommandLine(\n [\"--db\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n \"--prog\", os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n \"--trainData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"),\n \"--testData\", os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"),\n \"--proppr\"])\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(optdict['prog'])\n xc.runExpt(\n prog=optdict['prog'],\n trainData=optdict['trainData'],\n testData=optdict['testData'],\n targetMode=declare.asMode(\"predict/io\"))\n pbDoc = xc.db.onehot('pb')\n self.checkXC(xc,'predict/io',pbDoc,collections.defaultdict(lambda:191))\n close_cross_compiler(xc)\n\n def checkXC(self,xc,mode,rawInput,expectedCols):\n print('matrixdb.conf.ignore_types',matrixdb.conf.ignore_types)\n db = xc.db\n for (functor,arity),mat in list(db.matEncoding.items()):\n print(functor,arity,'shape',mat.shape)\n r,c = mat.shape\n self.assertEqual(c,expectedCols[functor])\n inferenceFun = xc.inferenceFunction(mode)\n y = inferenceFun(rawInput)\n r,c = y.shape\n self.assertEqual(c,expectedCols['label'])\n\nclass TestMultiModeXC(unittest.TestCase):\n\n def setUp(self):\n self.db = matrixdb.MatrixDB.loadFile(\n os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy.cfacts'))\n self.prog = program.ProPPRProgram.loadRules(\n os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.ppr\"),db=self.db)\n self.dset = dataset.Dataset.loadExamples(\n self.db, os.path.join(testtensorlog.TEST_DATA_DIR,'matchtoy-train.exam'),proppr=False)\n self.prog.setAllWeights()\n\n def testInScaffold(self):\n print(TESTED_COMPILERS)\n self.assertTrue(self.dset.modesToLearn() > 1)\n self.prog.setAllWeights()\n for compilerClass in TESTED_COMPILERS:\n print(compilerClass)\n xc = compilerClass(self.prog)\n # compile everything\n for mode in self.dset.modesToLearn():\n xc.ensureCompiled(mode)\n learner = TESTED_LEARNERS[compilerClass](self.prog,xc)\n testAcc,testXent = Expt({\n 'prog':self.prog,\n 'trainData':self.dset,\n 'testData':self.dset,\n 'learner':learner,\n 'savedTestPredictions':'TestMultiModeXC.testInScaffold.%s.solutions.txt'%compilerClass.__name__\n }).run()\n print(testAcc)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def testIt(self):\n self.assertTrue(self.dset.modesToLearn() > 1)\n for compilerClass in [tensorflowxcomp.DenseMatDenseMsgCrossCompiler,\n tensorflowxcomp.SparseMatDenseMsgCrossCompiler]:\n xc = compilerClass(self.prog)\n # compile everything\n for mode in self.dset.modesToLearn():\n xc.ensureCompiled(mode,inputs=None)\n # check the variables\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n # set up for training\n trainStep = {}\n for mode in self.dset.modesToLearn():\n (dataLossArgs,dataLossExpr) = xc.dataLoss(mode)\n trainStep[mode] = optimizer.minimize(dataLossExpr, var_list=xc.getParamVariables(mode))\n # train\n for i in range(2): #epochs\n for mode in self.dset.modesToLearn():\n X = self.dset.getX(mode)\n Y = self.dset.getY(mode)\n fd = xc.getFeedDict(mode,X,Y,wrapped=False)\n session.run(trainStep[mode],feed_dict=fd)\n # test\n for mode in self.dset.modesToLearn():\n X = self.dset.getX(mode)\n Y = self.dset.getY(mode)\n Y_ = xc.inferenceFunction(mode)(X)\n acc = xc.accuracy(mode,X,Y)\n print('mode',mode,'acc',acc)\n session.close()\n close_cross_compiler(xc)\n\nclass TestMatParams(unittest.TestCase):\n\n def setUp(self):\n self.cacheDir = tempfile.mkdtemp()\n\n def cacheFile(self,fileName):\n return os.path.join(self.cacheDir,fileName)\n\n def testMToyMatParam(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy.ppr\"))\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"matchtoy-train.exam\"))\n tlog.db.markAsParameter('dabbrev',2)\n factDict = tlog.db.matrixAsPredicateFacts('dabbrev',2,tlog.db.matEncoding[('dabbrev',2)])\n print('before learning',len(factDict),'dabbrevs')\n self.assertTrue(len(factDict)==5)\n for f in sorted(factDict.keys()):\n print('>',str(f),factDict[f])\n\n # expt pipeline\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=TY.shape, name='tensorlog/trueY')\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n for i in range(5):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n tlog.set_all_db_params_to_learned_values(session)\n# params = {'prog':prog,'trainData':trainData, 'testData':testData}\n# result = expt.Expt(params).run()\n# factDict = db.matrixAsPredicateFacts('dabbrev',2,db.matEncoding[('dabbrev',2)])\n# print 'after learning',len(factDict),'dabbrevs'\n# for f in sorted(factDict.keys()):\n# print '>',str(f),factDict[f]\n# self.assertTrue(len(factDict)>5)\n\[email protected](xctargets.tf,\"Tensorflow not available\")\nclass TestSimple(unittest.TestCase):\n\n def testEmptyRules(self):\n # should not throw an error\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n\n def testIncrementalDBLoad(self):\n b = simple.Builder()\n predict,label,hasWord,posPair,negPair = b.predicates(\"predict,label,hasWord,posPair,negPair\")\n doc_t,label_t,word_t,labelWordPair_t = b.types(\"doc_t,label_t,word_t,labelWordPair_t\")\n b.schema += predict(doc_t,label_t) & label(label_t)\n b.schema += hasWord(doc_t,word_t) & posPair(word_t,labelWordPair_t) & negPair(word_t,labelWordPair_t)\n for basename in \"textcattoy_corpus.cfacts textcattoy_labels.cfacts textcattoy_pairs.cfacts\".split(\" \"):\n b.db += os.path.join(testtensorlog.TEST_DATA_DIR, basename)\n tlog = simple.Compiler(db=b.db)\n for (functor,arity,nnz) in [('hasWord',2,99),('label',1,2),('negPair',2,56)]:\n m = tlog.db.matEncoding[(functor,arity)]\n self.assertTrue(m.nnz == nnz)\n\n def testBatch(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n # test a round-trip serialization\n # saves the db\n cacheDir = tempfile.mkdtemp()\n db_file = os.path.join(cacheDir,'simple.db')\n tlog.set_all_db_params_to_learned_values(session)\n tlog.serialize_db(db_file)\n # load everything into a new graph and don't reset the learned params\n new_graph = tf.Graph()\n with new_graph.as_default():\n tlog2 = simple.Compiler(\n db=db_file,\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"),\n autoset_db_params=False)\n # reconstruct the accuracy measure\n inference2 = tlog2.inference(mode)\n trueY2 = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY2')\n correct2 = tf.equal(tf.argmax(trueY2,1), tf.argmax(inference2,1))\n accuracy2 = tf.reduce_mean(tf.cast(correct2, tf.float32))\n # eval accuracy in a new session\n session2 = tf.Session()\n session2.run(tf.global_variables_initializer())\n test_batch_fd2 = {tlog2.input_placeholder_name(mode):UX, trueY2.name:UY}\n acc3 = session2.run(accuracy2, feed_dict=test_batch_fd2)\n print('accuracy after round-trip serialization',acc3)\n self.assertTrue(acc3>=0.9)\n session.close()\n\n def testMinibatch(self):\n tlog = simple.Compiler(\n db=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"),\n prog=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcat3.ppr\"))\n self.runTextCatLearner(tlog)\n\n def runTextCatLearner(self,tlog):\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1, end=' ')\n for mode,(TX,TY) in tlog.minibatches(trainData,batch_size=2):\n print('.', end=' ')\n train_minibatch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session.run(train_step, feed_dict=train_minibatch_fd)\n print('epoch',i+1,'finished')\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n session.close()\n\n def testBuilder1(self):\n b = simple.Builder()\n X,Y,Z = b.variables(\"X Y Z\")\n aunt,parent,sister,wife = b.predicates(\"aunt parent sister wife\")\n uncle = b.predicate(\"uncle\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y)\n b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y)\n r1 = b.rule_id(\"ruleid_t\",\"r1\")\n r2 = b.rule_id(\"ruleid_t\",\"r2\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // r1\n b += aunt(X,Y) <= parent(X,Z) & sister(Z,Y) // r2\n feature,description = b.predicates(\"feature description\")\n weight = b.predicate(\"weight\")\n F = b.variable(\"F\")\n D = b.variable(\"D\")\n b += aunt(X,Y) <= uncle(X,Z) & wife(Z,Y) // (weight(F) | description(X,D) & feature(X,F))\n b.rules.listing()\n rs = b.rules.rulesFor(parser.Goal('aunt',[X,Y]))\n self.assertEqual(str(rs[0]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y).\")\n self.assertEqual(str(rs[1]), \"aunt(X,Y) :- parent(X,Z), sister(Z,Y).\")\n self.assertEqual(str(rs[2]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(R1) : assign(R1,r1,ruleid_t)}.\")\n self.assertEqual(str(rs[3]), \"aunt(X,Y) :- parent(X,Z), sister(Z,Y) {weight(R2) : assign(R2,r2,ruleid_t)}.\")\n self.assertEqual(str(rs[4]), \"aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(F) : description(X,D),feature(X,F)}.\")\n\n def testBuilder2(self):\n b = simple.Builder()\n predict,assign,weighted,hasWord,posPair,negPair = b.predicates(\"predict assign weighted hasWord posPair negPair\")\n X,Pos,Neg,F,W = b.variables(\"X Pos Neg F W\")\n b += predict(X,Pos) <= assign(Pos,'pos','label') // (weighted(F) | hasWord(X,W) & posPair(W,F))\n b += predict(X,Neg) <= assign(Neg,'neg','label') // (weighted(F) | hasWord(X,W) & negPair(W,F))\n dbSpec = os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\")\n self.runTextCatLearner(simple.Compiler(db=dbSpec,prog=b.rules))\n\n def testBuilder3(self):\n b = simple.Builder()\n predict,assign,weighted,hasWord,posPair,negPair,label = b.predicates(\"predict assign weighted hasWord posPair negPair label\")\n doc_t,label_t,word_t,labelWordPair_t = b.types(\"doc_t label_t word_t labelWordPair_t\")\n\n b.schema += predict(doc_t,label_t)\n b.schema += hasWord(doc_t,word_t)\n b.schema += posPair(word_t,labelWordPair_t)\n b.schema += negPair(word_t,labelWordPair_t)\n b.schema += label(label_t)\n\n X,Pos,Neg,F,W = b.variables(\"X Pos Neg F W\")\n b.rules += predict(X,Pos) <= assign(Pos,'pos','label_t') // (weighted(F) | hasWord(X,W) & posPair(W,F))\n b.rules += predict(X,Neg) <= assign(Neg,'neg','label_t') // (weighted(F) | hasWord(X,W) & negPair(W,F))\n\n # use the untyped version of the facts to make sure the schema works\n b.db = os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy.cfacts\")\n\n self.runTextCatLearner(simple.Compiler(db=b.db, prog=b.rules))\n\nclass TestReparameterizationAndTypedLoading(unittest.TestCase):\n\n def testBugWasFixed(self):\n # use the untyped version of the facts to make sure the schema works\n db = matrixdb.MatrixDB()\n db.addLines([\"# :- r(lo_or_hi_t)\\n\",\n \"\\t\".join(\"r low 0.1\".split()) + \"\\n\",\n \"\\t\".join(\"r hi 0.9\".split()) + \"\\n\"])\n db.markAsParameter('r',1)\n prog = program.Program(db=db)\n typeName = db.schema.getArgType(\"r\",1,0)\n idLow = db.schema.getId(typeName,\"low\")\n idHi = db.schema.getId(typeName,\"hi\")\n db_r = db.matEncoding[('r',1)]\n self.approxEqual(db_r[0,idLow], 0.1)\n self.approxEqual(db_r[0,idHi], 0.9)\n\n xc = tensorflowxcomp.SparseMatDenseMsgCrossCompiler(prog)\n v_r = xc._vector(declare.asMode(\"r(i)\"))\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n xc.exportAllLearnedParams()\n print('exported to xc',db.matEncoding[('r',1)])\n db_r = db.matEncoding[('r',1)]\n self.approxEqual(db_r[0,idLow], 0.1)\n self.approxEqual(db_r[0,idHi], 0.9)\n\n def approxEqual(self,a,b):\n self.assertTrue(abs(float(a)-b) < 0.0001)\n\nclass TestPlugins(unittest.TestCase):\n\n def test_identity_io(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp1(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp1(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp1/io', lambda x:x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_identity_oi(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp2(Y,Pos) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp2(Y,Neg) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp2/oi', lambda x:x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_double_io1(self):\n ruleStrings = ['predict(X,Y) :- assign(Pos,pos,label),udp3(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.',\n 'predict(X,Y) :- assign(Neg,neg,label),udp3(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']\n plugins = program.Plugins()\n plugins.define('udp3/io', lambda x:2*x, lambda inputType:'label')\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_double_io2(self):\n ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label) {weighted(F): hasWord(X,W),double(W,W2),posPair(W2,F)}.',\n 'predict(X,Neg) :- assign(Neg,neg,label) {weighted(F2): hasWord(X,W),negPair(W,F),double(F,F2)}.']\n plugins = program.Plugins()\n plugins.define('double/io', lambda x:2*x, lambda inputType:inputType)\n self.check_learning_with_udp(ruleStrings,plugins)\n\n def test_kw_i(self):\n ruleStrings = ['predict(X,Pos) :- assign(Pos,pos,label),hasWord(X,W),poskw(W).',\n 'predict(X,Neg) :- assign(Neg,neg,label),hasWord(X,W),negkw(W).']\n plugins = program.Plugins()\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n poskw_v = (db.onehot('little','word') + db.onehot('red','word')).todense()\n negkw_v = (db.onehot('big','word') + db.onehot('job','word') + db.onehot('huge','word')).todense()\n plugins.define('poskw/i', lambda:poskw_v, lambda:'word')\n plugins.define('negkw/i', lambda:negkw_v, lambda:'word')\n self.check_udp(ruleStrings,plugins)\n \n def check_udp(self,ruleStrings,plugins):\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\"))\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(testData.keys())[0]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n session.close()\n\n\n # TOFIX needs some work to pass\n # - you can't do polytree BP with multiple inputs\n # - so there's not a simple fix\n # - probably do this: (1) treat inputs to leftmost userDef as outputs (2) run message-passing for those outputs\n # (3) add the user def operator (4) repeat .... (5) when there are no more plugins\n def notest_isect_iio(self):\n bpcompiler.conf.trace = True\n ruleStrings = ['predict(X,Y) :- hasWord(X,W),posPair(W,P1),negPair(W,P2),isect(P1,P2,Y).']\n plugins = program.Plugins()\n plugins.define('isect/iio', lambda x1,x2:x1*x2, lambda t1,t2:t1)\n self.assertTrue(plugins.isDefined(declare.asMode('isect/iio')))\n self.check_learning_with_udp(ruleStrings,plugins)\n \n def argmax(self):\n bpcompiler.conf.trace = True\n ruleStrings = ['predict(X,Y):-olympics(X,Z),nations(Z),argmax(Z,Y).']\n plugins = program.Plugins()\n plugins.define('argmax/io',lambda x1:tf.nn.softmax(x1), lambda t1:t1)\n db = matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR,'argmax.cfacts'))\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n prog.setAllWeights()\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n \n data = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"argmax.exam\"))\n mode = list(data.keys())[0]\n UX,UY = data[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0>0.9)\n session.close()\n \n# acc0 = session.run(inference, feed_dict=test_batch_fd)\n# print \"inference results:\"\n# print acc0\n# print np.argmax(acc0,1)\n# print \"trueY:\"\n# print UY\n# print np.argmax(UY,1)\n\n @unittest.skipUnless(xctargets.tf,\"Tensorflow not available\")\n def check_learning_with_udp(self,ruleStrings,plugins,dbfile=os.path.join(testtensorlog.TEST_DATA_DIR,\"textcattoy3.cfacts\")):\n db = matrixdb.MatrixDB.loadFile(dbfile)\n rules = testtensorlog.rules_from_strings(ruleStrings)\n prog = program.ProPPRProgram(rules=rules,db=db,plugins=plugins)\n prog.setAllWeights()\n mode = declare.asMode(\"predict/io\")\n prog.compile(mode)\n fun = prog.function[(mode,0)]\n print(\"\\n\".join(fun.pprint()))\n tlog = simple.Compiler(db=db, prog=prog)\n\n trainData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytrain.exam\"))\n testData = tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR,\"toytest.exam\"))\n mode = list(trainData.keys())[0]\n TX,TY = trainData[mode]\n UX,UY = testData[mode]\n inference = tlog.inference(mode)\n trueY = tf.placeholder(tf.float32, shape=UY.shape, name='tensorlog/trueY')\n correct = tf.equal(tf.argmax(trueY,1), tf.argmax(inference,1))\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n test_batch_fd = {tlog.input_placeholder_name(mode):UX, trueY.name:UY}\n loss = tlog.loss(mode)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\n train_step = optimizer.minimize(loss)\n train_batch_fd = {tlog.input_placeholder_name(mode):TX, tlog.target_output_placeholder_name(mode):TY}\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n acc0 = session.run(accuracy, feed_dict=test_batch_fd)\n print('initial accuracy',acc0)\n self.assertTrue(acc0<0.6)\n for i in range(10):\n print('epoch',i+1)\n session.run(train_step, feed_dict=train_batch_fd)\n acc1 = session.run(accuracy, feed_dict=test_batch_fd)\n print('final accuracy',acc1)\n self.assertTrue(acc1>=0.9)\n session.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n # default is to test on everything adding command line arguments\n # 'tensorflow' 'theano' 'sparse' 'dense' filters the list (so\n # 'testxcomp.py tensorflow sparse' will run just\n # tensorflowxcomp.SparseMatDenseMsgCrossCompiler)\n\n if 'theano' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith(\"theanoxcomp\")]\n if 'tensorflow' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__module__.endswith(\"tensorflowxcomp\")]\n if 'dense' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith(\"Dense\")]\n if 'sparse' in sys.argv[1:]:\n TESTED_COMPILERS = [c for c in TESTED_COMPILERS if c.__name__.startswith(\"Sparse\")]\n sys.argv = [a for a in sys.argv if a not in \"theano tensorflow dense sparse\".split()]\n print('TESTED_COMPILERS',TESTED_COMPILERS)\n \n unittest.main()\n" ]
[ [ "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.logging.set_verbosity", "tensorflow.nn.softmax", "tensorflow.trainable_variables", "tensorflow.Graph", "tensorflow.cast", "numpy.clip", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Session", "tensorflow.argmax", "tensorflow.reset_default_graph" ] ]
Mu-L/kedro
[ "a925fd59187a642e124527f0f1097e92ea8d1819" ]
[ "tests/extras/datasets/pandas/test_json_dataset.py" ]
[ "from pathlib import Path, PurePosixPath\n\nimport pandas as pd\nimport pytest\nfrom adlfs import AzureBlobFileSystem\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom fsspec.implementations.local import LocalFileSystem\nfrom gcsfs import GCSFileSystem\nfrom pandas.testing import assert_frame_equal\nfrom s3fs.core import S3FileSystem\n\nfrom kedro.extras.datasets.pandas import JSONDataSet\nfrom kedro.io import DataSetError\nfrom kedro.io.core import PROTOCOL_DELIMITER, Version\n\n\[email protected]\ndef filepath_json(tmp_path):\n return (tmp_path / \"test.json\").as_posix()\n\n\[email protected]\ndef json_data_set(filepath_json, load_args, save_args, fs_args):\n return JSONDataSet(\n filepath=filepath_json,\n load_args=load_args,\n save_args=save_args,\n fs_args=fs_args,\n )\n\n\[email protected]\ndef versioned_json_data_set(filepath_json, load_version, save_version):\n return JSONDataSet(\n filepath=filepath_json, version=Version(load_version, save_version)\n )\n\n\[email protected]\ndef dummy_dataframe():\n return pd.DataFrame({\"col1\": [1, 2], \"col2\": [4, 5], \"col3\": [5, 6]})\n\n\nclass TestJSONDataSet:\n def test_save_and_load(self, json_data_set, dummy_dataframe):\n \"\"\"Test saving and reloading the data set.\"\"\"\n json_data_set.save(dummy_dataframe)\n reloaded = json_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded)\n\n def test_exists(self, json_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for both existing and\n nonexistent data set.\"\"\"\n assert not json_data_set.exists()\n json_data_set.save(dummy_dataframe)\n assert json_data_set.exists()\n\n @pytest.mark.parametrize(\n \"load_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_load_extra_params(self, json_data_set, load_args):\n \"\"\"Test overriding the default load arguments.\"\"\"\n for key, value in load_args.items():\n assert json_data_set._load_args[key] == value\n\n @pytest.mark.parametrize(\n \"save_args\", [{\"k1\": \"v1\", \"index\": \"value\"}], indirect=True\n )\n def test_save_extra_params(self, json_data_set, save_args):\n \"\"\"Test overriding the default save arguments.\"\"\"\n for key, value in save_args.items():\n assert json_data_set._save_args[key] == value\n\n @pytest.mark.parametrize(\n \"load_args,save_args\",\n [\n ({\"storage_options\": {\"a\": \"b\"}}, {}),\n ({}, {\"storage_options\": {\"a\": \"b\"}}),\n ({\"storage_options\": {\"a\": \"b\"}}, {\"storage_options\": {\"x\": \"y\"}}),\n ],\n )\n def test_storage_options_dropped(self, load_args, save_args, caplog, tmp_path):\n filepath = str(tmp_path / \"test.csv\")\n\n ds = JSONDataSet(filepath=filepath, load_args=load_args, save_args=save_args)\n\n records = [r for r in caplog.records if r.levelname == \"WARNING\"]\n expected_log_message = (\n f\"Dropping 'storage_options' for {filepath}, \"\n f\"please specify them under 'fs_args' or 'credentials'.\"\n )\n assert records[0].getMessage() == expected_log_message\n assert \"storage_options\" not in ds._save_args\n assert \"storage_options\" not in ds._load_args\n\n def test_load_missing_file(self, json_data_set):\n \"\"\"Check the error when trying to load missing file.\"\"\"\n pattern = r\"Failed while loading data from data set JSONDataSet\\(.*\\)\"\n with pytest.raises(DataSetError, match=pattern):\n json_data_set.load()\n\n @pytest.mark.parametrize(\n \"filepath,instance_type,credentials,load_path\",\n [\n (\"s3://bucket/file.json\", S3FileSystem, {}, \"s3://bucket/file.json\"),\n (\"file:///tmp/test.json\", LocalFileSystem, {}, \"/tmp/test.json\"),\n (\"/tmp/test.json\", LocalFileSystem, {}, \"/tmp/test.json\"),\n (\"gcs://bucket/file.json\", GCSFileSystem, {}, \"gcs://bucket/file.json\"),\n (\n \"https://example.com/file.json\",\n HTTPFileSystem,\n {},\n \"https://example.com/file.json\",\n ),\n (\n \"abfs://bucket/file.csv\",\n AzureBlobFileSystem,\n {\"account_name\": \"test\", \"account_key\": \"test\"},\n \"abfs://bucket/file.csv\",\n ),\n ],\n )\n def test_protocol_usage(\n self, filepath, instance_type, credentials, load_path, mocker\n ):\n data_set = JSONDataSet(filepath=filepath, credentials=credentials)\n assert isinstance(data_set._fs, instance_type)\n\n path = filepath.split(PROTOCOL_DELIMITER, 1)[-1]\n\n assert str(data_set._filepath) == path\n assert isinstance(data_set._filepath, PurePosixPath)\n\n mock_pandas_call = mocker.patch(\"pandas.read_json\")\n data_set.load()\n assert mock_pandas_call.call_count == 1\n assert mock_pandas_call.call_args_list[0][0][0] == load_path\n\n def test_catalog_release(self, mocker):\n fs_mock = mocker.patch(\"fsspec.filesystem\").return_value\n filepath = \"test.json\"\n data_set = JSONDataSet(filepath=filepath)\n data_set.release()\n fs_mock.invalidate_cache.assert_called_once_with(filepath)\n\n\nclass TestJSONDataSetVersioned:\n def test_version_str_repr(self, load_version, save_version):\n \"\"\"Test that version is in string representation of the class instance\n when applicable.\"\"\"\n filepath = \"test.json\"\n ds = JSONDataSet(filepath=filepath)\n ds_versioned = JSONDataSet(\n filepath=filepath, version=Version(load_version, save_version)\n )\n assert filepath in str(ds)\n assert \"version\" not in str(ds)\n\n assert filepath in str(ds_versioned)\n ver_str = f\"version=Version(load={load_version}, save='{save_version}')\"\n assert ver_str in str(ds_versioned)\n assert \"JSONDataSet\" in str(ds_versioned)\n assert \"JSONDataSet\" in str(ds)\n assert \"protocol\" in str(ds_versioned)\n assert \"protocol\" in str(ds)\n\n def test_save_and_load(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Test that saved and reloaded data matches the original one for\n the versioned data set.\"\"\"\n versioned_json_data_set.save(dummy_dataframe)\n reloaded_df = versioned_json_data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded_df)\n\n def test_no_versions(self, versioned_json_data_set):\n \"\"\"Check the error if no versions are available for load.\"\"\"\n pattern = r\"Did not find any versions for JSONDataSet\\(.+\\)\"\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.load()\n\n def test_exists(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Test `exists` method invocation for versioned data set.\"\"\"\n assert not versioned_json_data_set.exists()\n versioned_json_data_set.save(dummy_dataframe)\n assert versioned_json_data_set.exists()\n\n def test_prevent_overwrite(self, versioned_json_data_set, dummy_dataframe):\n \"\"\"Check the error when attempting to override the data set if the\n corresponding hdf file for a given save version already exists.\"\"\"\n versioned_json_data_set.save(dummy_dataframe)\n pattern = (\n r\"Save path \\'.+\\' for JSONDataSet\\(.+\\) must \"\n r\"not exist if versioning is enabled\\.\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n @pytest.mark.parametrize(\n \"load_version\", [\"2019-01-01T23.59.59.999Z\"], indirect=True\n )\n @pytest.mark.parametrize(\n \"save_version\", [\"2019-01-02T00.00.00.000Z\"], indirect=True\n )\n def test_save_version_warning(\n self, versioned_json_data_set, load_version, save_version, dummy_dataframe\n ):\n \"\"\"Check the warning when saving to the path that differs from\n the subsequent load path.\"\"\"\n pattern = (\n rf\"Save version '{save_version}' did not match load version \"\n rf\"'{load_version}' for JSONDataSet\\(.+\\)\"\n )\n with pytest.warns(UserWarning, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n def test_http_filesystem_no_versioning(self):\n pattern = r\"HTTP\\(s\\) DataSet doesn't support versioning\\.\"\n\n with pytest.raises(DataSetError, match=pattern):\n JSONDataSet(\n filepath=\"https://example.com/file.json\", version=Version(None, None)\n )\n\n def test_versioning_existing_dataset(\n self, json_data_set, versioned_json_data_set, dummy_dataframe\n ):\n \"\"\"Check the error when attempting to save a versioned dataset on top of an\n already existing (non-versioned) dataset.\"\"\"\n json_data_set.save(dummy_dataframe)\n assert json_data_set.exists()\n assert json_data_set._filepath == versioned_json_data_set._filepath\n pattern = (\n f\"(?=.*file with the same name already exists in the directory)\"\n f\"(?=.*{versioned_json_data_set._filepath.parent.as_posix()})\"\n )\n with pytest.raises(DataSetError, match=pattern):\n versioned_json_data_set.save(dummy_dataframe)\n\n # Remove non-versioned dataset and try again\n Path(json_data_set._filepath.as_posix()).unlink()\n versioned_json_data_set.save(dummy_dataframe)\n assert versioned_json_data_set.exists()\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
acezen/graph-learn
[ "77bd92f960e4d178a3606444684f7f04c7f5b738" ]
[ "examples/data/cora.py" ]
[ "# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Preprocess cora dataset and generate node, edge, train, val, test table.\nUsed by GCN, GAT, GraphSage supervised training.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom utils import download, extract\n\ndef preprocess(dataset):\n # process node table\n node_table = \"{}/node_table\".format(dataset)\n edge_table = \"{}/edge_table\".format(dataset)\n edge_table_with_self_loop = '{}/edge_table_with_self_loop'.format(dataset)\n train_table = \"{}/train_table\".format(dataset)\n val_table = \"{}/val_table\".format(dataset)\n test_table = \"{}/test_table\".format(dataset)\n\n idx_features_labels = np.genfromtxt(dataset + \"/cora.content\",\n dtype=np.dtype(str))\n if not os.path.exists(edge_table_with_self_loop):\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n\n features = sp.csr_matrix(idx_features_labels[:, 1:-1],\n dtype=np.float32)\n features = feature_normalize(features)\n features = np.array(features.todense())\n labels = encode_label(idx_features_labels[:, -1])\n node_idxs = []\n\n with open(node_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"label:int64\" + \"\\t\" + \"feature:string\" + \"\\n\")\n for i in range(idx.shape[0]):\n f.write(str(idx[i]) + \"\\t\" + str(labels[i]) +\n \"\\t\" + str(\":\".join(map(str, features[i]))) + \"\\n\")\n node_idxs.append(str(idx[i]))\n\n with open(train_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(140):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n with open(val_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(200, 500):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n with open(test_table, 'w') as f:\n f.write(\"id:int64\" + \"\\t\" + \"weight:float\" + \"\\n\")\n for i in range(500, 1500):\n f.write(str(idx[i]) + \"\\t\" + str(1.0) + \"\\n\")\n\n # process edge table\n edges = np.genfromtxt(dataset + \"/cora.cites\", dtype=np.int32)\n with open(edge_table, 'w') as f:\n f.write(\"src_id: int64\" + \"\\t\"\n + \"dst_id: int64\" + \"\\t\"\n + \"weight: double\" + \"\\n\")\n for i in range(edges.shape[0]):\n f.write(str(edges[i][0]) + \"\\t\" + str(edges[i][1]) + \"\\t\" + \"0.0\" + \"\\n\")\n\n with open(edge_table_with_self_loop, 'w') as f:\n f.write(\"src_id: int64\" + \"\\t\"\n + \"dst_id: int64\" + \"\\t\"\n + \"weight: double\" + \"\\n\")\n for i in range(edges.shape[0]):\n if edges[i][0] != edges[i][1]:\n f.write(str(edges[i][0]) + \"\\t\" + str(edges[i][1]) + \"\\t\" + \"0.0\" + \"\\n\")\n for idx in node_idxs:\n f.write(idx + '\\t' + idx + '\\t' + '0.0' + '\\n')\n\n print(\"Data Process Done.\")\n return\n print(\"Data {} has exist.\".format(dataset))\n\ndef encode_label(labels):\n classes = list(sorted(set(labels)))\n classes_dict = {c: i for i, c in\n enumerate(classes)}\n labels_int64 = np.array(list(map(classes_dict.get, labels)),\n dtype=np.int64)\n return labels_int64\n\ndef feature_normalize(sparse_matrix):\n \"\"\"Normalize sparse matrix feature by row.\n Reference:\n DGL(https://github.com/dmlc/dgl).\n \"\"\"\n row_sum = np.array(sparse_matrix.sum(1))\n row_norm = np.power(row_sum, -1).flatten()\n row_norm[np.isinf(row_norm)] = 0.\n row_matrix_norm = sp.diags(row_norm)\n sparse_matrix = row_matrix_norm.dot(sparse_matrix)\n return sparse_matrix\n\nif __name__ == \"__main__\":\n download('http://graph-learn-dataset.oss-cn-zhangjiakou.aliyuncs.com/cora.zip', 'cora.zip')\n extract('cora.zip', 'cora')\n preprocess('cora')\n" ]
[ [ "numpy.dtype", "numpy.isinf", "scipy.sparse.csr_matrix", "scipy.sparse.diags", "numpy.power", "numpy.array", "numpy.genfromtxt" ] ]
zjzh/chainer
[ "e9da1423255c58c37be9733f51b158aa9b39dc93" ]
[ "tests/chainer_tests/functions_tests/pooling_tests/test_unpooling_2d.py" ]
[ "import unittest\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer_tests.functions_tests.pooling_tests import pooling_nd_helper\n\n\[email protected](*testing.product_dict(\n [\n # we assume insize as (2, 1)\n # standard output size which is estimated with get_deconv_outsize\n # function\n {'cover_all': False, 'outsize': (4, 2)},\n {'cover_all': True, 'outsize': (3, 1)},\n {'cover_all': False, 'outsize': None, 'expected_outsize': (4, 2)},\n {'cover_all': True, 'outsize': None, 'expected_outsize': (3, 1)},\n # another sizes which can be outsize of insize (2, 1)\n {'cover_all': False, 'outsize': (5, 2)},\n {'cover_all': True, 'outsize': (4, 2)},\n ],\n [\n {'dtype': numpy.float16},\n {'dtype': numpy.float32},\n {'dtype': numpy.float64},\n ],\n))\nclass TestUnpooling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 2\n self.n_channels = 3\n inh, inw = 2, 1\n self.x = pooling_nd_helper.shuffled_linspace(\n (self.N, self.n_channels, inh, inw), self.dtype)\n\n self.ksize = 2\n outh, outw = self.outsize or self.expected_outsize\n self.gy = numpy.random.uniform(\n -1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)\n self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}\n self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}\n self.ggx = numpy.random.uniform(\n -1, 1, self.x.shape).astype(self.dtype)\n\n def check_forward(self, x_data):\n x = chainer.Variable(x_data)\n y = functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n self.assertEqual(y.data.dtype, self.dtype)\n y_data = cuda.to_cpu(y.data)\n\n self.assertEqual(self.gy.shape, y_data.shape)\n for i in six.moves.range(self.N):\n for c in six.moves.range(self.n_channels):\n outsize = self.outsize or self.expected_outsize\n assert y_data.shape[2:] == outsize\n if outsize == (5, 2):\n expect = numpy.zeros(outsize, dtype=self.dtype)\n expect[:2, :] = self.x[i, c, 0, 0]\n expect[2:4, :] = self.x[i, c, 1, 0]\n elif outsize == (4, 2):\n expect = numpy.array([\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n ])\n elif outsize == (3, 1):\n expect = numpy.array([\n [self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0]],\n ])\n else:\n raise ValueError('Unsupported outsize: {}'.format(outsize))\n testing.assert_allclose(expect, y_data[i, c])\n\n def test_forward_cpu(self):\n self.check_forward(self.x)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n def check_backward(self, x_data, y_grad):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n gradient_check.check_backward(\n f, x_data, y_grad, dtype=numpy.float64,\n **self.check_backward_options)\n\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n def check_double_backward(self, x_data, y_grad, x_grad_grad,\n use_cudnn='always'):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n cover_all=self.cover_all)\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(\n self.x, self.gy, self.ggx, 'never')\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_non_contiguous(self):\n self.check_double_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\[email protected](*testing.product_dict(\n [\n {'insize': (2, 1), 'outsize': (4, 2), 'ksize': 2, 'pad': 0},\n {'insize': (4, 5), 'outsize': (4, 6), 'ksize': 2, 'pad': 2},\n ],\n [\n {'dtype': numpy.float16},\n {'dtype': numpy.float32},\n {'dtype': numpy.float64},\n ],\n))\nclass TestIntegerScaleUnpooling2D(unittest.TestCase):\n\n def setUp(self):\n self.N = 2\n self.n_channels = 3\n inh, inw = self.insize\n self.x = pooling_nd_helper.shuffled_linspace(\n (self.N, self.n_channels, inh, inw), self.dtype)\n\n outh, outw = self.outsize or self.expected_outsize\n self.gy = numpy.random.uniform(\n -1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)\n self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}\n self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}\n self.ggx = numpy.random.uniform(\n -1, 1, self.x.shape).astype(self.dtype)\n\n def check_forward(self, x_data):\n x = chainer.Variable(x_data)\n y = functions.unpooling_2d(\n x, self.ksize, outsize=self.outsize, pad=self.pad)\n self.assertEqual(y.data.dtype, self.dtype)\n y_data = cuda.to_cpu(y.data)\n\n self.assertEqual(self.gy.shape, y_data.shape)\n for i in six.moves.range(self.N):\n for c in six.moves.range(self.n_channels):\n outsize = self.outsize or self.expected_outsize\n assert y_data.shape[2:] == outsize\n if outsize == (4, 2):\n expect = numpy.array([\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],\n ])\n elif outsize == (4, 6):\n expect = numpy.array([\n [self.x[i, c, 1, 1], self.x[i, c, 1, 1],\n self.x[i, c, 1, 2], self.x[i, c, 1, 2],\n self.x[i, c, 1, 3], self.x[i, c, 1, 3]],\n [self.x[i, c, 1, 1], self.x[i, c, 1, 1],\n self.x[i, c, 1, 2], self.x[i, c, 1, 2],\n self.x[i, c, 1, 3], self.x[i, c, 1, 3]],\n [self.x[i, c, 2, 1], self.x[i, c, 2, 1],\n self.x[i, c, 2, 2], self.x[i, c, 2, 2],\n self.x[i, c, 2, 3], self.x[i, c, 2, 3]],\n [self.x[i, c, 2, 1], self.x[i, c, 2, 1],\n self.x[i, c, 2, 2], self.x[i, c, 2, 2],\n self.x[i, c, 2, 3], self.x[i, c, 2, 3]],\n ])\n else:\n raise ValueError('Unsupported outsize: {}'.format(outsize))\n testing.assert_allclose(expect, y_data[i, c])\n\n def test_forward_cpu(self):\n self.check_forward(self.x)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n def check_backward(self, x_data, y_grad):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n pad=self.pad)\n gradient_check.check_backward(\n f, x_data, y_grad, dtype=numpy.float64,\n **self.check_backward_options)\n\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n def check_double_backward(self, x_data, y_grad, x_grad_grad,\n use_cudnn='always'):\n def f(x):\n return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,\n pad=self.pad)\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(\n self.x, self.gy, self.ggx, 'never')\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_non_contiguous(self):\n self.check_double_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'h': [5],\n 'k': [3],\n 's': [3],\n 'p': [0],\n 'cover_all': [True, False],\n}))\nclass TestMaxPoolingUnpooling(unittest.TestCase):\n\n def check_left_inverse(self, xp, use_cudnn='never'):\n x = xp.arange(self.h * self.h).reshape(\n (1, 1, self.h, self.h)).astype(self.dtype)\n with chainer.using_config('use_cudnn', use_cudnn):\n y = chainer.functions.unpooling_2d(\n x, self.k, self.s, self.p, None, self.cover_all)\n x_ = chainer.functions.max_pooling_2d(\n y, self.k, self.s, self.p, self.cover_all).data\n\n self.assertEqual(x.shape, x_.shape)\n self.assertEqual(x.dtype, x_.dtype)\n chainer.testing.assert_allclose(x, x_)\n\n def test_left_inverse_cpu(self):\n self.check_left_inverse(numpy)\n\n @attr.gpu\n def test_left_inverse_cupy(self):\n self.check_left_inverse(cuda.cupy)\n\n @attr.gpu\n def test_left_inverse_cudnn(self):\n self.check_left_inverse(cuda.cupy, 'always')\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'h': [5],\n 'k': [3],\n 's': [3],\n 'p': [0],\n}))\nclass TestAveragePoolingUnpooling(unittest.TestCase):\n\n def check_left_inverse(self, xp, use_cudnn='never'):\n x = xp.arange(self.h * self.h).reshape(\n (1, 1, self.h, self.h)).astype(self.dtype)\n with chainer.using_config('use_cudnn', use_cudnn):\n # average_pooling_2d does not have cover_all option\n # as max_pooling_2d has.\n y = chainer.functions.unpooling_2d(\n x, self.k, self.s, self.p, None, False)\n x_ = chainer.functions.average_pooling_2d(\n y, self.k, self.s, self.p).data\n\n self.assertEqual(x.shape, x_.shape)\n self.assertEqual(x.dtype, x_.dtype)\n chainer.testing.assert_allclose(x, x_)\n\n def test_left_inverse_cpu(self):\n self.check_left_inverse(numpy)\n\n @attr.gpu\n def test_left_inverse_cupy(self):\n self.check_left_inverse(cuda.cupy)\n\n @attr.gpu\n def test_left_inverse_cudnn(self):\n self.check_left_inverse(cuda.cupy, 'always')\n\n\ntesting.run_module(__name__, __file__)\n" ]
[ [ "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
Substancia/FDTD-Huygens-metasurface
[ "dfb46f43c0653b394b63e7af92a331ae4824d9be" ]
[ "quartz_sphere.py" ]
[ "from fdtd_venv import fdtd_mod as fdtd\nfrom numpy import arange, flip, meshgrid, array\nfrom matplotlib.pyplot import plot, show\n\ndef main():\n\tgrid = fdtd.Grid(shape=(200, 200, 1), grid_spacing=155e-9)\n\n\tlens_width = 10\n\tlens_order = 3\n\tlens_radius = 25\n\tx, y = arange(-90, 90, 1), arange(lens_radius-lens_order*lens_width/2, lens_radius, 1)\n\tX, Y = meshgrid(x, y)\n\tlens_mask = X**2 + Y**2 <= lens_radius**2\n\tfor j, col in enumerate(lens_mask.T):\n\t\tfor i, val in enumerate(flip(col)):\n\t\t\tif val:\n\t\t\t\tgrid[50+i%(lens_width//2):50+lens_width-i%(lens_width//2), j+10:j+11, 0] = fdtd.Object(permittivity=1.5**2, name=str(i)+\",\"+str(j))\n\t\t\t\tbreak\n\n\tgrid[25, 80:120, 0] = fdtd.LineSource(period=1550e-9/3e8, name=\"source\")\n\n\tgrid[30:130, 100, 0] = fdtd.LineDetector(name=\"LineDetector\")\n\tgrid[30:130, 75:125, 0] = fdtd.BlockDetector(name=\"BlockDetector\")\n\n\tgrid[0:10, :, :] = fdtd.PML(name=\"pml_xlow\")\n\tgrid[-10:, :, :] = fdtd.PML(name=\"pml_xhigh\")\n\tgrid[:, 0:10, :] = fdtd.PML(name=\"pml_ylow\")\n\tgrid[:, -10:, :] = fdtd.PML(name=\"pml_yhigh\")\n\n\tgrid.run(total_time=300)\n\tgrid.visualize(z=0, show=True)\n\t#E_val = array(grid.detector.detector_values()['E'])\n\t#arr = []\n\t#for i in range(100):\n\t\t#temp = E_val[:, i, 2]\n\t\t#arr.append(max(temp) - min(temp))\n\t#print(\"Max index:\", 30+arr.index(max(arr)))\n\t#plot(arange(30, 130, 1), arr)\n\t#show()\n\tfdtd.dB_map_2D(array(grid.detectors[1].detector_values()['E'][200:]))\n\nif __name__ == \"__main__\":\n\tmain()\n" ]
[ [ "numpy.arange", "numpy.meshgrid", "numpy.flip" ] ]
medicode/tensor2tensor
[ "3386fa537957fcf8133536322fcadec0630dde11" ]
[ "tensor2tensor/utils/usr_dir.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility to load code from an external user-supplied directory.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\nimport os\nimport sys\nimport tensorflow as tf\n\nINTERNAL_USR_DIR_PACKAGE = \"t2t_usr_dir_internal\"\n\n\ndef import_usr_dir(usr_dir):\n \"\"\"Import module at usr_dir, if provided.\"\"\"\n if not usr_dir:\n return\n if usr_dir == INTERNAL_USR_DIR_PACKAGE:\n # The package has been installed with pip under this name for Cloud ML\n # Engine so just import it.\n importlib.import_module(INTERNAL_USR_DIR_PACKAGE)\n return\n\n dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip(\"/\"))\n containing_dir, module_name = os.path.split(dir_path)\n tf.logging.info(\"Importing user module %s from path %s\", module_name,\n containing_dir)\n sys.path.insert(0, containing_dir)\n importlib.import_module(module_name)\n sys.path.pop(0)\n" ]
[ [ "tensorflow.logging.info" ] ]
dmitryduev/broker
[ "7b9582fae6cd37bbd334bca228ef429d96e0e498", "7b9582fae6cd37bbd334bca228ef429d96e0e498" ]
[ "kowalski/alert_watcher_zuds.py", "kowalski/dev/ingest_known_lenses_20180901.py" ]
[ "import argparse\nimport os\nimport sys\nimport io\nimport time\nimport json\nfrom bson.json_util import dumps\nimport traceback\n\nimport confluent_kafka\nfrom ast import literal_eval\nimport avro.schema\nimport fastavro\nimport subprocess\nimport datetime\nimport multiprocessing\n# import threading\n\nimport pymongo\nimport pytz\nfrom numba import jit\nimport numpy as np\n\nfrom tensorflow.keras.models import load_model\nimport gzip\nimport io\nfrom astropy.io import fits\nfrom copy import deepcopy\n\n\n''' load config and secrets '''\nwith open('/app/config.json') as cjson:\n config = json.load(cjson)\n\nwith open('/app/secrets.json') as sjson:\n secrets = json.load(sjson)\n\nfor k in secrets:\n config[k].update(secrets.get(k, {}))\n\n\ndef utc_now():\n return datetime.datetime.now(pytz.utc)\n\n\ndef time_stamps():\n \"\"\"\n\n :return: local time, UTC time\n \"\"\"\n return datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S'), \\\n datetime.datetime.utcnow().strftime('%Y%m%d_%H:%M:%S')\n\n\n@jit\ndef deg2hms(x):\n \"\"\"Transform degrees to *hours:minutes:seconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [0, 360) to be written as a sexagesimal string.\n\n Returns\n -------\n out : str\n The input angle written as a sexagesimal string, in the\n form, hours:minutes:seconds.\n\n \"\"\"\n assert 0.0 <= x < 360.0, 'Bad RA value in degrees'\n # ac = Angle(x, unit='degree')\n # hms = str(ac.to_string(unit='hour', sep=':', pad=True))\n # print(str(hms))\n _h = np.floor(x * 12.0 / 180.)\n _m = np.floor((x * 12.0 / 180. - _h) * 60.0)\n _s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0\n hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)\n # print(hms)\n return hms\n\n\n@jit\ndef deg2dms(x):\n \"\"\"Transform degrees to *degrees:arcminutes:arcseconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [-90, 90] to be converted.\n\n Returns\n -------\n out : str\n The input angle as a string, written as degrees:minutes:seconds.\n\n \"\"\"\n assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'\n # ac = Angle(x, unit='degree')\n # dms = str(ac.to_string(unit='degree', sep=':', pad=True))\n # print(dms)\n _d = np.floor(abs(x)) * np.sign(x)\n _m = np.floor(np.abs(x - _d) * 60.0)\n _s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0\n dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)\n # print(dms)\n return dms\n\n\n@jit\ndef great_circle_distance(ra1_deg, dec1_deg, ra2_deg, dec2_deg):\n \"\"\"\n Distance between two points on the sphere\n :param ra1_deg:\n :param dec1_deg:\n :param ra2_deg:\n :param dec2_deg:\n :return: distance in degrees\n \"\"\"\n # this is orders of magnitude faster than astropy.coordinates.Skycoord.separation\n DEGRA = np.pi / 180.0\n ra1, dec1, ra2, dec2 = ra1_deg * DEGRA, dec1_deg * DEGRA, ra2_deg * DEGRA, dec2_deg * DEGRA\n delta_ra = np.abs(ra2 - ra1)\n distance = np.arctan2(np.sqrt((np.cos(dec2) * np.sin(delta_ra)) ** 2\n + (np.cos(dec1) * np.sin(dec2) - np.sin(dec1) * np.cos(dec2) * np.cos(\n delta_ra)) ** 2),\n np.sin(dec1) * np.sin(dec2) + np.cos(dec1) * np.cos(dec2) * np.cos(delta_ra))\n\n return distance * 180.0 / np.pi\n\n\n@jit\ndef in_ellipse(alpha, delta0, alpha1, delta01, d0, axis_ratio, PA0):\n \"\"\"\n Check if a given point (alpha, delta0)\n is within an ellipse specified by\n center (alpha1, delta01), maj_ax (d0), axis ratio and positional angle\n All angles are in decimal degrees\n Adapted from q3c: https://github.com/segasai/q3c/blob/master/q3cube.c\n :param alpha:\n :param delta0:\n :param alpha1:\n :param delta01:\n :param d0:\n :param axis_ratio:\n :param PA0:\n :return:\n \"\"\"\n DEGRA = np.pi / 180.0\n\n # convert degrees to radians\n d_alpha = (alpha1 - alpha) * DEGRA\n delta1 = delta01 * DEGRA\n delta = delta0 * DEGRA\n PA = PA0 * DEGRA\n d = d0 * DEGRA\n e = np.sqrt(1.0 - axis_ratio * axis_ratio)\n\n t1 = np.cos(d_alpha)\n t22 = np.sin(d_alpha)\n t3 = np.cos(delta1)\n t32 = np.sin(delta1)\n t6 = np.cos(delta)\n t26 = np.sin(delta)\n t9 = np.cos(d)\n t55 = np.sin(d)\n\n if (t3 * t6 * t1 + t32 * t26) < 0:\n return False\n\n t2 = t1 * t1\n\n t4 = t3 * t3\n t5 = t2 * t4\n\n t7 = t6 * t6\n t8 = t5 * t7\n\n t10 = t9 * t9\n t11 = t7 * t10\n t13 = np.cos(PA)\n t14 = t13 * t13\n t15 = t14 * t10\n t18 = t7 * t14\n t19 = t18 * t10\n\n t24 = np.sin(PA)\n\n t31 = t1 * t3\n\n t36 = 2.0 * t31 * t32 * t26 * t6\n t37 = t31 * t32\n t38 = t26 * t6\n t45 = t4 * t10\n\n t56 = t55 * t55\n t57 = t4 * t7\n t60 = -t8 + t5 * t11 + 2.0 * t5 * t15 - t5 * t19 - \\\n 2.0 * t1 * t4 * t22 * t10 * t24 * t13 * t26 - t36 + \\\n 2.0 * t37 * t38 * t10 - 2.0 * t37 * t38 * t15 - t45 * t14 - t45 * t2 + \\\n 2.0 * t22 * t3 * t32 * t6 * t24 * t10 * t13 - t56 + t7 - t11 + t4 - t57 + t57 * t10 + t19 - t18 * t45\n t61 = e * e\n t63 = t60 * t61 + t8 + t57 - t4 - t7 + t56 + t36\n\n return t63 > 0\n\n\n\"\"\"Utilities for manipulating Avro data and schemas.\n\"\"\"\n\n\ndef _loadSingleAvsc(file_path, names):\n \"\"\"Load a single avsc file.\n \"\"\"\n with open(file_path) as file_text:\n json_data = json.load(file_text)\n schema = avro.schema.SchemaFromJSONData(json_data, names)\n return schema\n\n\ndef combineSchemas(schema_files):\n \"\"\"Combine multiple nested schemas into a single schema.\n\n Parameters\n ----------\n schema_files : `list`\n List of files containing schemas.\n If nested, most internal schema must be first.\n\n Returns\n -------\n `dict`\n Avro schema\n \"\"\"\n known_schemas = avro.schema.Names()\n\n for s in schema_files:\n schema = _loadSingleAvsc(s, known_schemas)\n return schema.to_json()\n\n\ndef writeAvroData(json_data, json_schema):\n \"\"\"Encode json into Avro format given a schema.\n\n Parameters\n ----------\n json_data : `dict`\n The JSON data containing message content.\n json_schema : `dict`\n The writer Avro schema for encoding data.\n\n Returns\n -------\n `_io.BytesIO`\n Encoded data.\n \"\"\"\n bytes_io = io.BytesIO()\n fastavro.schemaless_writer(bytes_io, json_schema, json_data)\n return bytes_io\n\n\ndef readAvroData(bytes_io, json_schema):\n \"\"\"Read data and decode with a given Avro schema.\n\n Parameters\n ----------\n bytes_io : `_io.BytesIO`\n Data to be decoded.\n json_schema : `dict`\n The reader Avro schema for decoding data.\n\n Returns\n -------\n `dict`\n Decoded data.\n \"\"\"\n bytes_io.seek(0)\n message = fastavro.schemaless_reader(bytes_io, json_schema)\n return message\n\n\ndef readSchemaData(bytes_io):\n \"\"\"Read data that already has an Avro schema.\n\n Parameters\n ----------\n bytes_io : `_io.BytesIO`\n Data to be decoded.\n\n Returns\n -------\n `dict`\n Decoded data.\n \"\"\"\n bytes_io.seek(0)\n message = fastavro.reader(bytes_io)\n return message\n\n\nclass AlertError(Exception):\n \"\"\"Base class for exceptions in this module.\n \"\"\"\n pass\n\n\nclass EopError(AlertError):\n \"\"\"Exception raised when reaching end of partition.\n\n Parameters\n ----------\n msg : Kafka message\n The Kafka message result from consumer.poll().\n \"\"\"\n def __init__(self, msg):\n message = 'topic:%s, partition:%d, status:end, ' \\\n 'offset:%d, key:%s, time:%.3f\\n' \\\n % (msg.topic(), msg.partition(),\n msg.offset(), str(msg.key()), time.time())\n self.message = message\n\n def __str__(self):\n return self.message\n\n\nclass AlertConsumer(object):\n \"\"\"Creates an alert stream Kafka consumer for a given topic.\n\n Parameters\n ----------\n topic : `str`\n Name of the topic to subscribe to.\n schema_files : Avro schema files\n The reader Avro schema files for decoding data. Optional.\n **kwargs\n Keyword arguments for configuring confluent_kafka.Consumer().\n \"\"\"\n\n def __init__(self, topic, schema_files=None, **kwargs):\n\n # keep track of disconnected partitions\n self.num_disconnected_partitions = 0\n self.topic = topic\n\n def error_cb(err, _self=self):\n print(*time_stamps(), 'error_cb -------->', err)\n # print(err.code())\n if err.code() == -195:\n _self.num_disconnected_partitions += 1\n if _self.num_disconnected_partitions == _self.num_partitions:\n print(*time_stamps(), 'all partitions got disconnected, killing thread')\n sys.exit()\n else:\n print(*time_stamps(), '{:s}: disconnected from partition.'.format(_self.topic),\n 'total:', self.num_disconnected_partitions)\n\n # 'error_cb': error_cb\n kwargs['error_cb'] = error_cb\n\n self.consumer = confluent_kafka.Consumer(**kwargs)\n self.num_partitions = 0\n\n def on_assign(consumer, partitions, _self=self):\n # force-reset offsets when subscribing to a topic:\n for part in partitions:\n # -2 stands for beginning and -1 for end\n part.offset = -2\n # keep number of partitions. when reaching end of last partition, kill thread and start from beginning\n _self.num_partitions += 1\n print(consumer.get_watermark_offsets(part))\n\n self.consumer.subscribe([topic], on_assign=on_assign)\n # self.consumer.subscribe([topic])\n\n # fixme?\n # if schema_files is not None:\n # self.alert_schema = combineSchemas(schema_files)\n\n # MongoDB:\n self.config = config\n self.collection_alerts = 'ZUDS_alerts'\n self.collection_alerts_aux = 'ZUDS_alerts_aux'\n self.db = None\n self.connect_to_db()\n\n # indexes\n self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('candid', pymongo.DESCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('objectId', pymongo.DESCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('objectId', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candid', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.ztfname', pymongo.ASCENDING)], background=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jdstartstack', pymongo.DESCENDING),\n ('candidate.jdendstack', pymongo.ASCENDING)],\n background=True, sparse=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jd', pymongo.DESCENDING),\n ('candidate.drb', pymongo.DESCENDING),\n ('candid', pymongo.DESCENDING)],\n background=True, sparse=True)\n self.db['db'][self.collection_alerts].create_index([('candidate.jd', 1),\n ('candidate.drb', 1),\n ('candidate.isdiffpos', 1),\n ('candidate.ndethist', 1)],\n name='jd__braai__magpsf__isdiffpos__ndethist',\n background=True, sparse=True)\n\n # ML models:\n self.ml_models = dict()\n for m in config['ml_models']:\n try:\n m_v = config[\"ml_models\"][m][\"version\"]\n self.ml_models[m] = {'model': load_model(f'/app/models/{m}_{m_v}.h5'),\n 'version': m_v}\n except Exception as e:\n print(*time_stamps(), f'Error loading ML model {m}')\n traceback.print_exc()\n print(e)\n continue\n\n def connect_to_db(self):\n \"\"\"\n Connect to mongo\n :return:\n \"\"\"\n\n _config = self.config\n\n try:\n # there's only one instance of DB, it's too big to be replicated\n _client = pymongo.MongoClient(host=_config['database']['host'],\n port=_config['database']['port'], connect=False)\n # grab main database:\n _db = _client[_config['database']['db']]\n except Exception as _e:\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(_config['database']['user'], _config['database']['pwd'])\n except Exception as _e:\n raise ConnectionRefusedError\n\n self.db = dict()\n self.db['client'] = _client\n self.db['db'] = _db\n\n def insert_db_entry(self, _collection=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n self.db['db'][_collection].insert_one(_db_entry)\n except Exception as _e:\n print(*time_stamps(), 'Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n def insert_multiple_db_entries(self, _collection=None, _db_entries=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _db:\n :param _collection:\n :param _db_entries:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entries is not None, 'Must specify documents'\n try:\n # ordered=False ensures that every insert operation will be attempted\n # so that if, e.g., a document already exists, it will be simply skipped\n self.db['db'][_collection].insert_many(_db_entries, ordered=False)\n except pymongo.errors.BulkWriteError as bwe:\n print(*time_stamps(), bwe.details)\n except Exception as _e:\n traceback.print_exc()\n print(_e)\n\n def replace_db_entry(self, _collection=None, _filter=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _filter:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n self.db['db'][_collection].replace_one(_filter, _db_entry, upsert=True)\n except Exception as _e:\n print(*time_stamps(), 'Error replacing {:s} in {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n @staticmethod\n def alert_mongify(alert):\n\n doc = dict(alert)\n\n # let mongo create a unique id\n # candid+objectId is a unique combination:\n # doc['_id'] = f\"{alert['candid']}_{alert['objectId']}\"\n\n # placeholders for cross-matches and classifications\n # doc['cross_matches'] = dict()\n doc['classifications'] = dict()\n\n # GeoJSON for 2D indexing\n doc['coordinates'] = {}\n _ra = doc['candidate']['ra']\n _dec = doc['candidate']['dec']\n _radec = [_ra, _dec]\n # string format: H:M:S, D:M:S\n # tic = time.time()\n _radec_str = [deg2hms(_ra), deg2dms(_dec)]\n # print(time.time() - tic)\n # print(_radec_str)\n doc['coordinates']['radec_str'] = _radec_str\n # for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)\n _radec_geojson = [_ra - 180.0, _dec]\n doc['coordinates']['radec_geojson'] = {'type': 'Point',\n 'coordinates': _radec_geojson}\n # radians and degrees:\n # doc['coordinates']['radec_rad'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]\n # doc['coordinates']['radec_deg'] = [_ra, _dec]\n\n light_curve = deepcopy(doc['light_curve'])\n doc.pop('light_curve', None)\n if light_curve is None:\n light_curve = []\n for lc in light_curve:\n if lc['flux'] > 0:\n lc['mag'] = -2.5 * np.log10(lc['flux']) + lc['zp']\n\n return doc, light_curve\n\n def poll(self, path_alerts=None, path_tess=None, datestr=None, save_packets=True):\n \"\"\"\n Polls Kafka broker to consume topic.\n :param path_alerts:\n :param path_tess:\n :param datestr:\n :return:\n \"\"\"\n # msg = self.consumer.poll(timeout=timeout)\n msg = self.consumer.poll()\n\n if msg is None:\n print(*time_stamps(), 'Caught error: msg is None')\n\n if msg.error():\n print('Caught error:', msg.error())\n # if msg.value() is not None:\n # print(*time_stamps(), msg.value())\n raise EopError(msg)\n\n elif msg is not None:\n # decode avro packet\n msg_decoded = self.decodeMessage(msg)\n for record in msg_decoded:\n\n candid = record['candid']\n objectId = record['objectId']\n\n print(*time_stamps(), self.topic, objectId, candid)\n\n # check that candid not in collection_alerts\n if self.db['db'][self.collection_alerts].count_documents({'candid': candid}, limit=1) == 0:\n # candid not in db, ingest\n\n if save_packets:\n # save avro packet to disk\n path_alert_dir = os.path.join(path_alerts, datestr)\n # mkdir if does not exist\n if not os.path.exists(path_alert_dir):\n os.makedirs(path_alert_dir)\n path_avro = os.path.join(path_alert_dir, f'{candid}.avro')\n print(*time_stamps(), f'saving {candid} to disk')\n with open(path_avro, 'wb') as f:\n f.write(msg.value())\n\n # ingest decoded avro packet into db\n alert, light_curve = self.alert_mongify(record)\n\n # alert filters:\n\n # ML models:\n scores = alert_filter__ml(record, ml_models=self.ml_models)\n alert['classifications'] = scores\n\n print(*time_stamps(), f'ingesting {alert[\"candid\"]} into db')\n self.insert_db_entry(_collection=self.collection_alerts, _db_entry=alert)\n\n # light_curve: pop nulls - save space\n light_curve = [{kk: vv for kk, vv in lc.items() if vv is not None} for lc in light_curve]\n\n # cross-match with external catalogs if objectId not in collection_alerts_aux:\n if self.db['db'][self.collection_alerts_aux].count_documents({'_id': objectId}, limit=1) == 0:\n # tic = time.time()\n xmatches = alert_filter__xmatch(self.db['db'], alert)\n # CLU cross-match:\n xmatches = {**xmatches, **alert_filter__xmatch_clu(self.db['db'], alert)}\n # alert['cross_matches'] = xmatches\n # toc = time.time()\n # print(f'xmatch for {alert[\"candid\"]} took {toc-tic:.2f} s')\n\n alert_aux = {'_id': objectId,\n 'cross_matches': xmatches,\n 'light_curve': light_curve}\n\n self.insert_db_entry(_collection=self.collection_alerts_aux, _db_entry=alert_aux)\n\n else:\n self.db['db'][self.collection_alerts_aux].update_one({'_id': objectId},\n {'$addToSet':\n {'light_curve':\n {'$each': light_curve}}},\n upsert=True)\n\n # dump packet as json to disk if in a public TESS sector\n if 'TESS' in alert['candidate']['programpi']:\n # put light_curve back\n alert['light_curve'] = light_curve\n\n # get cross-matches\n # xmatches = self.db['db'][self.collection_alerts_aux].find_one({'_id': objectId})\n xmatches = self.db['db'][self.collection_alerts_aux].find({'_id': objectId},\n {'cross_matches': 1},\n limit=1)\n xmatches = list(xmatches)[0]\n alert['cross_matches'] = xmatches['cross_matches']\n\n if save_packets:\n path_tess_dir = os.path.join(path_tess, datestr)\n # mkdir if does not exist\n if not os.path.exists(path_tess_dir):\n os.makedirs(path_tess_dir)\n\n print(*time_stamps(), f'saving {alert[\"candid\"]} to disk')\n try:\n with open(os.path.join(path_tess_dir, f\"{alert['candid']}.json\"), 'w') as f:\n f.write(dumps(alert))\n except Exception as e:\n print(time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n\n def decodeMessage(self, msg):\n \"\"\"Decode Avro message according to a schema.\n\n Parameters\n ----------\n msg : Kafka message\n The Kafka message result from consumer.poll().\n\n Returns\n -------\n `dict`\n Decoded message.\n \"\"\"\n # print(msg.topic(), msg.offset(), msg.error(), msg.key(), msg.value())\n message = msg.value()\n # print(message)\n try:\n bytes_io = io.BytesIO(message)\n decoded_msg = readSchemaData(bytes_io)\n # print(decoded_msg)\n # decoded_msg = readAvroData(bytes_io, self.alert_schema)\n # print(decoded_msg)\n except AssertionError:\n # FIXME this exception is raised but not sure if it matters yet\n bytes_io = io.BytesIO(message)\n decoded_msg = None\n except IndexError:\n literal_msg = literal_eval(str(message, encoding='utf-8')) # works to give bytes\n bytes_io = io.BytesIO(literal_msg) # works to give <class '_io.BytesIO'>\n decoded_msg = readSchemaData(bytes_io) # yields reader\n except Exception:\n decoded_msg = message\n finally:\n return decoded_msg\n\n\ndef msg_text(message):\n \"\"\"Remove postage stamp cutouts from an alert message.\n \"\"\"\n message_text = {k: message[k] for k in message\n if k not in ['cutoutDifference', 'cutoutTemplate', 'cutoutScience']}\n return message_text\n\n\ndef write_stamp_file(stamp_dict, output_dir):\n \"\"\"Given a stamp dict that follows the cutout schema,\n write data to a file in a given directory.\n \"\"\"\n try:\n filename = stamp_dict['fileName']\n try:\n os.makedirs(output_dir)\n except OSError:\n pass\n out_path = os.path.join(output_dir, filename)\n with open(out_path, 'wb') as f:\n f.write(stamp_dict['stampData'])\n except TypeError:\n sys.stderr.write('%% Cannot get stamp\\n')\n return\n\n\ndef alert_filter(alert, stampdir=None):\n \"\"\"Filter to apply to each alert.\n See schemas: https://github.com/ZwickyTransientFacility/ztf-avro-alert\n \"\"\"\n data = msg_text(alert)\n if data: # Write your condition statement here\n print(data) # Print all main alert data to screen\n if stampdir is not None: # Collect all postage stamps\n write_stamp_file(\n alert.get('cutoutDifference'), stampdir)\n write_stamp_file(\n alert.get('cutoutTemplate'), stampdir)\n write_stamp_file(\n alert.get('cutoutScience'), stampdir)\n return\n\n\ndef make_triplet(alert, to_tpu: bool = False):\n \"\"\"\n Feed in alert packet\n \"\"\"\n cutout_dict = dict()\n\n for cutout in ('science', 'template', 'difference'):\n # cutout_data = loads(dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]\n # cutout_data = alert[f'cutout{cutout.capitalize()}']['stampData']\n cutout_data = alert[f'cutout{cutout.capitalize()}']\n\n # unzip\n with gzip.open(io.BytesIO(cutout_data), 'rb') as f:\n with fits.open(io.BytesIO(f.read())) as hdu:\n data = hdu[0].data\n # replace nans with zeros\n cutout_dict[cutout] = np.nan_to_num(data)\n # L2-normalize\n cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])\n\n # pad to 63x63 if smaller\n shape = cutout_dict[cutout].shape\n if shape != (63, 63):\n # print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')\n cutout_dict[cutout] = np.pad(cutout_dict[cutout], [(0, 63 - shape[0]), (0, 63 - shape[1])],\n mode='constant', constant_values=1e-9)\n\n triplet = np.zeros((63, 63, 3))\n triplet[:, :, 0] = cutout_dict['science']\n triplet[:, :, 1] = cutout_dict['template']\n triplet[:, :, 2] = cutout_dict['difference']\n\n if to_tpu:\n # Edge TPUs require additional processing\n triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()\n\n return triplet\n\n\ndef alert_filter__ml(alert, ml_models: dict = None):\n \"\"\"Filter to apply to each alert.\n \"\"\"\n\n scores = dict()\n\n try:\n ''' braai '''\n triplet = make_triplet(alert)\n triplets = np.expand_dims(triplet, axis=0)\n braai = ml_models['braai']['model'].predict(x=triplets)[0]\n # braai = 1.0\n scores['braai'] = float(braai)\n scores['braai_version'] = ml_models['braai']['version']\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return scores\n\n\n# cone search radius:\ncone_search_radius = float(config['xmatch']['cone_search_radius'])\n# convert to rad:\nif config['xmatch']['cone_search_unit'] == 'arcsec':\n cone_search_radius *= np.pi / 180.0 / 3600.\nelif config['xmatch']['cone_search_unit'] == 'arcmin':\n cone_search_radius *= np.pi / 180.0 / 60.\nelif config['xmatch']['cone_search_unit'] == 'deg':\n cone_search_radius *= np.pi / 180.0\nelif config['xmatch']['cone_search_unit'] == 'rad':\n cone_search_radius *= 1\nelse:\n raise Exception('Unknown cone search unit. Must be in [deg, rad, arcsec, arcmin]')\n\n\ndef alert_filter__xmatch(db, alert):\n \"\"\"\n Filter to apply to each alert.\n \"\"\"\n\n xmatches = dict()\n\n try:\n ra_geojson = float(alert['candidate']['ra'])\n # geojson-friendly ra:\n ra_geojson -= 180.0\n dec_geojson = float(alert['candidate']['dec'])\n\n ''' catalogs '''\n for catalog in config['xmatch']['catalogs']:\n catalog_filter = config['xmatch']['catalogs'][catalog]['filter']\n catalog_projection = config['xmatch']['catalogs'][catalog]['projection']\n\n object_position_query = dict()\n object_position_query['coordinates.radec_geojson'] = {\n '$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius]}}\n s = db[catalog].find({**object_position_query, **catalog_filter},\n {**catalog_projection})\n xmatches[catalog] = list(s)\n\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return xmatches\n\n\n# cone search radius in deg:\ncone_search_radius_clu = 3.0\n# convert deg to rad:\ncone_search_radius_clu *= np.pi / 180.0\n\n\ndef alert_filter__xmatch_clu(database, alert, size_margin=3, clu_version='CLU_20190625'):\n \"\"\"\n Filter to apply to each alert.\n :param size_margin: multiply galaxy size by this much before looking for a match\n :param clu_version: CLU catalog version\n \"\"\"\n\n xmatches = dict()\n\n try:\n ra = float(alert['candidate']['ra'])\n dec = float(alert['candidate']['dec'])\n\n # geojson-friendly ra:\n ra_geojson = float(alert['candidate']['ra']) - 180.0\n dec_geojson = dec\n\n catalog_filter = {}\n catalog_projection = {\"_id\": 1, \"name\": 1, \"ra\": 1, \"dec\": 1,\n \"a\": 1, \"b2a\": 1, \"pa\": 1, \"z\": 1,\n \"sfr_fuv\": 1, \"mstar\": 1, \"sfr_ha\": 1,\n \"coordinates.radec_str\": 1}\n\n # first do a coarse search of everything that is around\n object_position_query = dict()\n object_position_query['coordinates.radec_geojson'] = {\n '$geoWithin': {'$centerSphere': [[ra_geojson, dec_geojson], cone_search_radius_clu]}}\n s = database[clu_version].find({**object_position_query, **catalog_filter},\n {**catalog_projection})\n galaxies = list(s)\n\n # these guys are very big, so check them separately\n M31 = {'_id': 596900, 'name': 'PGC2557',\n 'ra': 10.6847, 'dec': 41.26901, 'a': 6.35156, 'b2a': 0.32, 'pa': 35.0,\n 'sfr_fuv': None, 'mstar': 253816876.412914, 'sfr_ha': 0,\n 'coordinates': {'radec_geojson': [\"00:42:44.3503\", \"41:16:08.634\"]}\n }\n M33 = {'_id': 597543, 'name': 'PGC5818',\n 'ra': 23.46204, 'dec': 30.66022, 'a': 2.35983, 'b2a': 0.59, 'pa': 23.0,\n 'sfr_fuv': None, 'mstar': 4502777.420493, 'sfr_ha': 0,\n 'coordinates': {'radec_geojson': [\"01:33:50.8900\", \"30:39:36.800\"]}\n }\n\n # do elliptical matches\n matches = []\n\n for galaxy in galaxies + [M31, M33]:\n alpha1, delta01 = galaxy['ra'], galaxy['dec']\n d0, axis_ratio, PA0 = galaxy['a'], galaxy['b2a'], galaxy['pa']\n\n # no shape info for galaxy? replace with median values\n if d0 < -990:\n d0 = 0.0265889\n if axis_ratio < -990:\n axis_ratio = 0.61\n if PA0 < -990:\n PA0 = 86.0\n\n in_galaxy = in_ellipse(ra, dec, alpha1, delta01, size_margin * d0, axis_ratio, PA0)\n\n if in_galaxy:\n match = galaxy\n distance_arcsec = round(great_circle_distance(ra, dec, alpha1, delta01) * 3600, 2)\n match['coordinates']['distance_arcsec'] = distance_arcsec\n matches.append(match)\n\n xmatches[clu_version] = matches\n\n except Exception as e:\n print(*time_stamps(), str(e))\n\n return xmatches\n\n\ndef listener(topic, bootstrap_servers='', offset_reset='earliest',\n group=None, path_alerts=None, path_tess=None, save_packets=True):\n \"\"\"\n Listen to a topic\n :param topic:\n :param bootstrap_servers:\n :param offset_reset:\n :param group:\n :param path_alerts:\n :return:\n \"\"\"\n\n # def error_cb(err):\n # print(*time_stamps(), 'error_cb -------->', err)\n # # print(err.code())\n # if err.code() == -195:\n # print(*time_stamps(), 'got disconnected, killing thread')\n # sys.exit()\n\n # Configure consumer connection to Kafka broker\n conf = {'bootstrap.servers': bootstrap_servers,\n # 'error_cb': error_cb,\n 'default.topic.config': {'auto.offset.reset': offset_reset}}\n if group is not None:\n conf['group.id'] = group\n else:\n conf['group.id'] = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else 'kowalski.caltech.edu'\n\n # make it unique:\n conf['group.id'] = '{:s}_{:s}'.format(conf['group.id'], datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f'))\n\n # Configure Avro reader schema\n schema_files = [\"ztf-avro-alert/schema/candidate.avsc\",\n \"ztf-avro-alert/schema/cutout.avsc\",\n \"ztf-avro-alert/schema/light_curve.avsc\",\n \"ztf-avro-alert/schema/alert.avsc\"]\n\n # date string:\n datestr = topic.split('_')[1]\n\n # Start alert stream consumer\n stream_reader = AlertConsumer(topic, schema_files, **conf)\n\n # todo: Subscribe alert filters to stream_readers\n # todo: they will be notified when an alert arrived/got x-matched\n\n while True:\n try:\n # poll!\n # print(*time_stamps(), 'Polling')\n stream_reader.poll(path_alerts=path_alerts, path_tess=path_tess,\n datestr=datestr, save_packets=save_packets)\n\n except EopError as e:\n # Write when reaching end of partition\n # sys.stderr.write(e.message)\n print(*time_stamps(), e.message)\n except IndexError:\n # sys.stderr.write('%% Data cannot be decoded\\n')\n print(*time_stamps(), '%% Data cannot be decoded\\n')\n except UnicodeDecodeError:\n # sys.stderr.write('%% Unexpected data format received\\n')\n print(*time_stamps(), '%% Unexpected data format received\\n')\n except KeyboardInterrupt:\n # sys.stderr.write('%% Aborted by user\\n')\n print(*time_stamps(), '%% Aborted by user\\n')\n sys.exit()\n except Exception as e:\n print(*time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n sys.exit()\n\n\ndef main(_obs_date=None, _save_packets=True):\n\n topics_on_watch = dict()\n\n while True:\n\n try:\n if True:\n # get kafka topic names with kafka-topics command\n kafka_cmd = [config['kafka-topics']['cmd'],\n '--zookeeper', config['kafka-topics']['zookeeper'], '-list']\n # print(kafka_cmd)\n\n topics = subprocess.run(kafka_cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').split('\\n')[:-1]\n # print(topics)\n\n if _obs_date is None:\n datestr = datetime.datetime.utcnow().strftime('%Y%m%d')\n else:\n datestr = _obs_date\n # as of 20180403 naming convention is ztf_%Y%m%d_programidN\n # topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t)]\n # ZUDS only\n topics_tonight = [t for t in topics if (datestr in t) and ('programid' in t) and ('zuds' in t)]\n print(*time_stamps(), topics_tonight)\n\n if False:\n # for testing\n topics_tonight = ['ztf_20180604_programid3']\n\n for t in topics_tonight:\n if t not in topics_on_watch:\n print(*time_stamps(), f'starting listener thread for {t}')\n offset_reset = config['kafka']['default.topic.config']['auto.offset.reset']\n bootstrap_servers = config['kafka']['bootstrap.servers']\n group = '{:s}'.format(config['kafka']['group'])\n # print(group)\n path_alerts = config['path']['path_alerts']\n path_tess = config['path']['path_tess']\n save_packets = _save_packets\n # topics_on_watch[t] = threading.Thread(target=listener,\n # args=(t, bootstrap_servers,\n # offset_reset, group, path_alerts))\n topics_on_watch[t] = multiprocessing.Process(target=listener,\n args=(t, bootstrap_servers,\n offset_reset, group,\n path_alerts, path_tess,\n save_packets))\n topics_on_watch[t].daemon = True\n topics_on_watch[t].start()\n\n else:\n print(*time_stamps(), f'performing thread health check for {t}')\n try:\n # if not topics_on_watch[t].isAlive():\n if not topics_on_watch[t].is_alive():\n print(*time_stamps(), f'{t} died, removing')\n # topics_on_watch[t].terminate()\n topics_on_watch.pop(t, None)\n else:\n print(*time_stamps(), f'{t} appears normal')\n except Exception as _e:\n print(*time_stamps(), 'Failed to perform health check', str(_e))\n pass\n\n except Exception as e:\n print(*time_stamps(), str(e))\n _err = traceback.format_exc()\n print(*time_stamps(), str(_err))\n\n if _obs_date is None:\n time.sleep(300)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Fetch AVRO packets from Kafka streams and ingest them into DB')\n parser.add_argument('--obsdate', help='observing date')\n parser.add_argument('--noio', help='reduce i/o - do not save packets', action='store_true')\n\n args = parser.parse_args()\n obs_date = args.obsdate\n save = False if args.noio else True\n # print(obs_date)\n\n main(_obs_date=obs_date, _save_packets=save)\n", "import csv\nimport os\nimport glob\nimport time\n# from astropy.coordinates import Angle\nimport numpy as np\nimport pandas as pd\nimport pymongo\nimport inspect\nimport json\nimport argparse\n# import timeout_decorator\nimport signal\nimport traceback\nimport datetime\nimport pytz\nfrom numba import jit\n# import fastavro as avro\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import ProcessPoolExecutor\nimport time\n\n\n''' load config and secrets '''\nwith open('/app/config.json') as cjson:\n config = json.load(cjson)\n\nwith open('/app/secrets.json') as sjson:\n secrets = json.load(sjson)\n\nfor k in secrets:\n config[k].update(secrets.get(k, {}))\n\n\ndef utc_now():\n return datetime.datetime.now(pytz.utc)\n\n\ndef connect_to_db():\n \"\"\" Connect to the mongodb database\n\n :return:\n \"\"\"\n try:\n # there's only one instance of DB, it's too big to be replicated\n _client = pymongo.MongoClient(host=config['database']['host'],\n port=config['database']['port'])\n # grab main database:\n _db = _client[config['database']['db']]\n except Exception as _e:\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(config['database']['user'], config['database']['pwd'])\n except Exception as _e:\n raise ConnectionRefusedError\n\n return _client, _db\n\n\ndef insert_db_entry(_db, _collection=None, _db_entry=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _collection:\n :param _db_entry:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entry is not None, 'Must specify document'\n try:\n _db[_collection].insert_one(_db_entry)\n except Exception as _e:\n print('Error inserting {:s} into {:s}'.format(str(_db_entry['_id']), _collection))\n traceback.print_exc()\n print(_e)\n\n\ndef insert_multiple_db_entries(_db, _collection=None, _db_entries=None):\n \"\"\"\n Insert a document _doc to collection _collection in DB.\n It is monitored for timeout in case DB connection hangs for some reason\n :param _db:\n :param _collection:\n :param _db_entries:\n :return:\n \"\"\"\n assert _collection is not None, 'Must specify collection'\n assert _db_entries is not None, 'Must specify documents'\n try:\n _db[_collection].insert_many(_db_entries, ordered=False)\n except pymongo.errors.BulkWriteError as bwe:\n print(bwe.details)\n except Exception as _e:\n traceback.print_exc()\n print(_e)\n\n\n@jit\ndef deg2hms(x):\n \"\"\"Transform degrees to *hours:minutes:seconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [0, 360) to be written as a sexagesimal string.\n\n Returns\n -------\n out : str\n The input angle written as a sexagesimal string, in the\n form, hours:minutes:seconds.\n\n \"\"\"\n assert 0.0 <= x < 360.0, 'Bad RA value in degrees'\n # ac = Angle(x, unit='degree')\n # hms = str(ac.to_string(unit='hour', sep=':', pad=True))\n # print(str(hms))\n _h = np.floor(x * 12.0 / 180.)\n _m = np.floor((x * 12.0 / 180. - _h) * 60.0)\n _s = ((x * 12.0 / 180. - _h) * 60.0 - _m) * 60.0\n hms = '{:02.0f}:{:02.0f}:{:07.4f}'.format(_h, _m, _s)\n # print(hms)\n return hms\n\n\n@jit\ndef deg2dms(x):\n \"\"\"Transform degrees to *degrees:arcminutes:arcseconds* strings.\n\n Parameters\n ----------\n x : float\n The degree value c [-90, 90] to be converted.\n\n Returns\n -------\n out : str\n The input angle as a string, written as degrees:minutes:seconds.\n\n \"\"\"\n assert -90.0 <= x <= 90.0, 'Bad Dec value in degrees'\n # ac = Angle(x, unit='degree')\n # dms = str(ac.to_string(unit='degree', sep=':', pad=True))\n # print(dms)\n _d = np.floor(abs(x)) * np.sign(x)\n _m = np.floor(np.abs(x - _d) * 60.0)\n _s = np.abs(np.abs(x - _d) * 60.0 - _m) * 60.0\n dms = '{:02.0f}:{:02.0f}:{:06.3f}'.format(_d, _m, _s)\n # print(dms)\n return dms\n\n\ndef process_file(_file, _collection, _batch_size=2048, verbose=False, _dry_run=False):\n\n # connect to MongoDB:\n if verbose:\n print('Connecting to DB')\n _client, _db = connect_to_db()\n if verbose:\n print('Successfully connected')\n\n print(f'processing {_file}')\n documents = []\n batch_num = 1\n\n try:\n df = pd.read_csv(_file)\n\n for index, row in df.iterrows():\n try:\n # nan -> None\n tmp = row.where((pd.notnull(row)), None)\n # convert to dict:\n doc = tmp.to_dict()\n\n doc['_id'] = doc['system_name']\n\n doc['coordinates'] = {}\n doc['coordinates']['epoch'] = 2015.5\n _ra = doc['ra']\n _dec = doc['dec']\n _radec = [_ra, _dec]\n # string format: H:M:S, D:M:S\n # tic = time.time()\n _radec_str = [deg2hms(_ra), deg2dms(_dec)]\n # print(time.time() - tic)\n # print(_radec_str)\n doc['coordinates']['radec_str'] = _radec_str\n # for GeoJSON, must be lon:[-180, 180], lat:[-90, 90] (i.e. in deg)\n _radec_geojson = [_ra - 180.0, _dec]\n doc['coordinates']['radec_geojson'] = {'type': 'Point',\n 'coordinates': _radec_geojson}\n # radians:\n doc['coordinates']['radec'] = [_ra * np.pi / 180.0, _dec * np.pi / 180.0]\n\n # print(doc['coordinates'])\n\n documents.append(doc)\n\n # time.sleep(1)\n\n # insert batch, then flush\n if len(documents) == _batch_size:\n print(f'inserting batch #{batch_num}')\n if not _dry_run:\n insert_multiple_db_entries(_db, _collection=_collection, _db_entries=documents)\n # flush:\n documents = []\n batch_num += 1\n\n except Exception as e:\n traceback.print_exc()\n print(e)\n continue\n\n except Exception as e:\n traceback.print_exc()\n print(e)\n\n # stuff left from the last file?\n if len(documents) > 0:\n print(f'inserting batch #{batch_num}')\n if not _dry_run:\n insert_multiple_db_entries(_db, _collection=_collection, _db_entries=documents)\n\n # disconnect from db:\n try:\n _client.close()\n finally:\n if verbose:\n print('Successfully disconnected from db')\n\n\nif __name__ == '__main__':\n ''' Create command line argument parser '''\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description='')\n\n parser.add_argument('--dryrun', action='store_true', help='enforce execution')\n\n args = parser.parse_args()\n\n dry_run = args.dryrun\n\n # connect to MongoDB:\n print('Connecting to DB')\n client, db = connect_to_db()\n print('Successfully connected')\n\n collection = 'Known_lenses_20180901'\n\n # create 2d index:\n print('Creating 2d index')\n if not dry_run:\n db[collection].create_index([('coordinates.radec_geojson', '2dsphere'),\n ('_id', pymongo.ASCENDING)], background=True)\n\n # number of records to insert\n batch_size = 4096\n\n _location = '/_tmp/'\n\n files = glob.glob(os.path.join(_location, 'known_lenses_wradii.csv'))\n\n print(f'# files to process: {len(files)}')\n\n # init threaded operations\n # pool = ThreadPoolExecutor(2)\n # pool = ProcessPoolExecutor(20)\n pool = ProcessPoolExecutor(1)\n\n # for ff in files[::-1]:\n for ff in sorted(files):\n pool.submit(process_file, _file=ff, _collection=collection, _batch_size=batch_size,\n verbose=True, _dry_run=dry_run)\n # process_file(_file=ff, _collection=collection, _batch_size=batch_size,\n # verbose=True, _dry_run=dry_run)\n\n # wait for everything to finish\n pool.shutdown(wait=True)\n\n print('All done')\n" ]
[ [ "numpy.rint", "numpy.sign", "numpy.zeros", "tensorflow.keras.models.load_model", "numpy.floor", "numpy.cos", "numpy.abs", "numpy.expand_dims", "numpy.log10", "numpy.sqrt", "numpy.sin", "numpy.pad", "numpy.nan_to_num", "numpy.linalg.norm" ], [ "numpy.sign", "pandas.read_csv", "pandas.notnull", "numpy.floor", "numpy.abs" ] ]
jshede/Cirq
[ "5db0f6aa8c009735a9ce0b0b7909ffe2532c396d" ]
[ "cirq/google/api/v1/programs.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nfrom typing import (Any, cast, Dict, Iterable, Optional, Sequence, Tuple,\n TYPE_CHECKING)\nimport numpy as np\nimport sympy\n\nfrom cirq import devices, ops, protocols, value\nfrom cirq.schedules import Schedule, ScheduledOperation\nfrom cirq.value import Timestamp\n\nif TYPE_CHECKING:\n import cirq\n from cirq.google import xmon_device\n\n\ndef _load_json_bool(b: Any):\n \"\"\"Converts a json field to bool. If already a bool, pass through.\"\"\"\n if isinstance(b, bool):\n return b\n return json.loads(b)\n\n\ndef gate_to_proto_dict(gate: 'cirq.Gate',\n qubits: Tuple['cirq.Qid', ...]) -> Dict:\n if isinstance(gate, ops.MeasurementGate):\n return _measure_to_proto_dict(gate, qubits)\n\n if isinstance(gate, ops.XPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _x_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.YPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _y_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.PhasedXPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _phased_x_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.ZPowGate):\n if len(qubits) != 1:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _z_to_proto_dict(gate, qubits[0])\n\n if isinstance(gate, ops.CZPowGate):\n if len(qubits) != 2:\n # coverage: ignore\n raise ValueError('Wrong number of qubits.')\n return _cz_to_proto_dict(gate, *qubits)\n\n raise ValueError(\"Don't know how to serialize this gate: {!r}\".format(gate))\n\n\ndef _x_to_proto_dict(gate: 'cirq.XPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns': _parameterized_value_to_proto_dict(0),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _y_to_proto_dict(gate: 'cirq.YPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns': _parameterized_value_to_proto_dict(0.5),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _phased_x_to_proto_dict(gate: 'cirq.PhasedXPowGate', q: 'cirq.Qid') -> Dict:\n exp_w = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'axis_half_turns':\n _parameterized_value_to_proto_dict(gate.phase_exponent),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_w': exp_w}\n\n\ndef _z_to_proto_dict(gate: 'cirq.ZPowGate', q: 'cirq.Qid') -> Dict:\n exp_z = {\n 'target': cast(devices.GridQubit, q).to_proto_dict(),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent),\n }\n return {'exp_z': exp_z}\n\n\ndef _cz_to_proto_dict(gate: 'cirq.CZPowGate', p: 'cirq.Qid',\n q: 'cirq.Qid') -> Dict:\n exp_11 = {\n 'target1': cast(devices.GridQubit, p).to_proto_dict(),\n 'target2': cast(devices.GridQubit, q).to_proto_dict(),\n 'half_turns': _parameterized_value_to_proto_dict(gate.exponent)\n }\n return {'exp_11': exp_11}\n\n\ndef _measure_to_proto_dict(gate: 'cirq.MeasurementGate',\n qubits: Sequence['cirq.Qid']):\n if len(qubits) == 0:\n raise ValueError('Measurement gate on no qubits.')\n\n invert_mask = None\n if gate.invert_mask:\n invert_mask = gate.invert_mask + (False,) * (gate.num_qubits() -\n len(gate.invert_mask))\n\n if invert_mask and len(invert_mask) != len(qubits):\n raise ValueError('Measurement gate had invert mask of length '\n 'different than number of qubits it acts on.')\n measurement = {\n 'targets': [cast(devices.GridQubit, q).to_proto_dict() for q in qubits],\n 'key': protocols.measurement_key(gate),\n }\n if invert_mask:\n measurement['invert_mask'] = [json.dumps(x) for x in invert_mask]\n return {'measurement': measurement}\n\n\ndef schedule_to_proto_dicts(schedule: Schedule) -> Iterable[Dict]:\n \"\"\"Convert a schedule into an iterable of proto dictionaries.\n\n Args:\n schedule: The schedule to convert to a proto dict. Must contain only\n gates that can be cast to xmon gates.\n\n Yields:\n A proto dictionary corresponding to an Operation proto.\n \"\"\"\n last_time_picos: Optional[int] = None\n for so in schedule.scheduled_operations:\n op = gate_to_proto_dict(\n cast(ops.GateOperation, so.operation).gate, so.operation.qubits)\n time_picos = so.time.raw_picos()\n if last_time_picos is None:\n op['incremental_delay_picoseconds'] = time_picos\n else:\n op['incremental_delay_picoseconds'] = time_picos - last_time_picos\n last_time_picos = time_picos\n yield op\n\n\ndef schedule_from_proto_dicts(\n device: 'xmon_device.XmonDevice',\n ops: Iterable[Dict],\n) -> Schedule:\n \"\"\"Convert proto dictionaries into a Schedule for the given device.\"\"\"\n scheduled_ops = []\n last_time_picos = 0\n for op in ops:\n delay_picos = 0\n if 'incremental_delay_picoseconds' in op:\n delay_picos = op['incremental_delay_picoseconds']\n time_picos = last_time_picos + delay_picos\n last_time_picos = time_picos\n xmon_op = xmon_op_from_proto_dict(op)\n scheduled_ops.append(\n ScheduledOperation.op_at_on(\n operation=xmon_op,\n time=Timestamp(picos=time_picos),\n device=device,\n ))\n return Schedule(device, scheduled_ops)\n\n\ndef pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes:\n \"\"\"Pack measurement results into a byte string.\n\n Args:\n measurements: A sequence of tuples, one for each measurement, consisting\n of a string key and an array of boolean data. The data should be\n a 2-D array indexed by (repetition, qubit_index). All data for all\n measurements must have the same number of repetitions.\n\n Returns:\n Packed bytes, as described in the unpack_results docstring below.\n\n Raises:\n ValueError if the measurement data do not have the compatible shapes.\n \"\"\"\n if not measurements:\n return b''\n\n shapes = [(key, np.shape(data)) for key, data in measurements]\n if not all(len(shape) == 2 for _, shape in shapes):\n raise ValueError(\"Expected 2-D data: shapes={}\".format(shapes))\n\n reps = shapes[0][1][0]\n if not all(shape[0] == reps for _, shape in shapes):\n raise ValueError(\n \"Expected same reps for all keys: shapes={}\".format(shapes))\n\n bits = np.hstack([np.asarray(data, dtype=bool) for _, data in measurements])\n bits = bits.reshape(-1)\n\n # Pad length to multiple of 8 if needed.\n remainder = len(bits) % 8\n if remainder:\n bits = np.pad(bits, (0, 8 - remainder), 'constant')\n\n # Pack in little-endian bit order.\n bits = bits.reshape((-1, 8))[:, ::-1]\n byte_arr = np.packbits(bits, axis=1).reshape(-1)\n\n return byte_arr.tobytes()\n\n\ndef unpack_results(data: bytes, repetitions: int,\n key_sizes: Sequence[Tuple[str, int]]\n ) -> Dict[str, np.ndarray]:\n \"\"\"Unpack data from a bitstring into individual measurement results.\n\n Args:\n data: Packed measurement results, in the form <rep0><rep1>...\n where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...\n with bits packed in little-endian order in each byte.\n repetitions: number of repetitions.\n key_sizes: Keys and sizes of the measurements in the data.\n\n Returns:\n Dict mapping measurement key to a 2D array of boolean results. Each\n array has shape (repetitions, size) with size for that measurement.\n \"\"\"\n bits_per_rep = sum(size for _, size in key_sizes)\n total_bits = repetitions * bits_per_rep\n\n byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))\n bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)\n bits = bits[:total_bits].reshape((repetitions, bits_per_rep))\n\n results = {}\n ofs = 0\n for key, size in key_sizes:\n results[key] = bits[:, ofs:ofs + size]\n ofs += size\n\n return results\n\n\ndef is_native_xmon_op(op: 'cirq.Operation') -> bool:\n \"\"\"Check if the gate corresponding to an operation is a native xmon gate.\n\n Args:\n op: Input operation.\n\n Returns:\n True if the operation is native to the xmon, false otherwise.\n \"\"\"\n return (isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate))\n\n\ndef is_native_xmon_gate(gate: 'cirq.Gate') -> bool:\n \"\"\"Check if a gate is a native xmon gate.\n\n Args:\n gate: Input gate.\n\n Returns:\n True if the gate is native to the xmon, false otherwise.\n \"\"\"\n return isinstance(gate,\n (ops.CZPowGate, ops.MeasurementGate, ops.PhasedXPowGate,\n ops.XPowGate, ops.YPowGate, ops.ZPowGate))\n\n\ndef xmon_op_from_proto_dict(proto_dict: Dict) -> 'cirq.Operation':\n \"\"\"Convert the proto dictionary to the corresponding operation.\n\n See protos in api/google/v1 for specification of the protos.\n\n Args:\n proto_dict: Dictionary representing the proto. Keys are always\n strings, but values may be types correspond to a raw proto type\n or another dictionary (for messages).\n\n Returns:\n The operation.\n\n Raises:\n ValueError if the dictionary does not contain required values\n corresponding to the proto.\n \"\"\"\n\n def raise_missing_fields(gate_name: str):\n raise ValueError('{} missing required fields: {}'.format(\n gate_name, proto_dict))\n\n param = _parameterized_value_from_proto_dict\n qubit = devices.GridQubit.from_proto_dict\n if 'exp_w' in proto_dict:\n exp_w = proto_dict['exp_w']\n if ('half_turns' not in exp_w or 'axis_half_turns' not in exp_w or\n 'target' not in exp_w):\n raise_missing_fields('ExpW')\n return ops.PhasedXPowGate(\n exponent=param(exp_w['half_turns']),\n phase_exponent=param(exp_w['axis_half_turns']),\n ).on(qubit(exp_w['target']))\n if 'exp_z' in proto_dict:\n exp_z = proto_dict['exp_z']\n if 'half_turns' not in exp_z or 'target' not in exp_z:\n raise_missing_fields('ExpZ')\n return ops.Z(qubit(exp_z['target']))**param(exp_z['half_turns'])\n if 'exp_11' in proto_dict:\n exp_11 = proto_dict['exp_11']\n if ('half_turns' not in exp_11 or 'target1' not in exp_11 or\n 'target2' not in exp_11):\n raise_missing_fields('Exp11')\n return ops.CZ(qubit(exp_11['target1']),\n qubit(exp_11['target2']))**param(exp_11['half_turns'])\n if 'measurement' in proto_dict:\n meas = proto_dict['measurement']\n invert_mask = cast(Tuple[Any, ...], ())\n if 'invert_mask' in meas:\n invert_mask = tuple(_load_json_bool(x) for x in meas['invert_mask'])\n if 'key' not in meas or 'targets' not in meas:\n raise_missing_fields('Measurement')\n return ops.MeasurementGate(\n num_qubits=len(meas['targets']),\n key=meas['key'],\n invert_mask=invert_mask).on(*[qubit(q) for q in meas['targets']])\n\n raise ValueError('invalid operation: {}'.format(proto_dict))\n\n\ndef _parameterized_value_from_proto_dict(message: Dict) -> value.TParamVal:\n parameter_key = message.get('parameter_key', None)\n if parameter_key:\n return sympy.Symbol(parameter_key)\n if 'raw' in message:\n return message['raw']\n raise ValueError('No value specified for parameterized float. '\n 'Expected \"raw\" or \"parameter_key\" to be set. '\n 'message: {!r}'.format(message))\n\n\ndef _parameterized_value_to_proto_dict(param: value.TParamVal) -> Dict:\n out = {} # type: Dict\n if isinstance(param, sympy.Symbol):\n out['parameter_key'] = str(param.free_symbols.pop())\n else:\n out['raw'] = float(param)\n return out\n" ]
[ [ "numpy.packbits", "numpy.asarray", "numpy.unpackbits", "numpy.shape", "numpy.pad", "numpy.frombuffer" ] ]
gideont/TensorFlow-Object-Detection-API-Tutorial-Train-Multiple-Objects-Windows-10
[ "f8b24ccba44e3a55cc20da2ed0ad44d7ad2216bf" ]
[ "detect_single_image.py" ]
[ "######## Image Object Detection Using Tensorflow-trained Classifier #########\n#\n# Author: Evan Juras\n# Date: 1/15/18\n# Description: \n# This program uses a TensorFlow-trained neural network to perform object detection.\n# It loads the classifier and uses it to perform object detection on an image.\n# It draws boxes, scores, and labels around the objects of interest in the image.\n\n## Some of the code is copied from Google's example at\n## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\n## and some is copied from Dat Tran's example at\n## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py\n\n## but I changed it to make it more understandable to me.\n\n# Import packages\nimport os\n#os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n#os.environ['CUDA_VISIBLE_DEVICES'] = '1'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} to supress warnings\nimport cv2\nimport numpy as np\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport logging\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\nimport sys\nimport time\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'inference_graph'\nIMAGE_NAME = 'test_orig.jpg'\nIMAGE_RESULT_NAME = 'test_result.jpg'\n\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# patch tf1 into `utils.ops`\n#utils_ops.tf = tf.compat.v1\n\n# Patch the location of gfile\ntf.gfile = tf.io.gfile\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\n\n# Path to image\n#PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\n\n# Number of classes the object detector can identify\nNUM_CLASSES = 90\n\n# Load the label map.\n# Label maps map indices to category names, so that when our convolution\n# network predicts `5`, we know that this corresponds to `king`.\n# Here we use internal utility functions, but anything that returns a\n# dictionary mapping integers to appropriate string labels would be fine\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n # CPU only\n #sess = tf.compat.v1.Session(graph=detection_graph)\n\n # GPU options to avoid GPU out-of-memory crash\n #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.5) \n #gpu_options = tf.GPUOptions(allow_growth = True)\n # for tf2\n gpu_options = tf.compat.v1.GPUOptions(allow_growth = True)\n sess = tf.compat.v1.Session(graph=detection_graph,config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\nstart = time.time()\n# Load image using OpenCV and\n# expand image dimensions to have shape: [1, None, None, 3]\n# i.e. a single-column array, where each item in the column has the pixel RGB value\nPATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\nprint(\"Detecting objects in file:\", PATH_TO_IMAGE)\nimage = cv2.imread(PATH_TO_IMAGE)\nimage_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage_expanded = np.expand_dims(image_rgb, axis=0)\n\n# Perform the actual detection by running the model with the image as input\n(boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded})\n\n# Draw the results of the detection (aka 'visulaize the results')\n\nvis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4,\n min_score_thresh=0.50)\n\nend = time.time()\nprint(end - start)\n# All the results have been drawn on image. Now display the image.\n#cv2.imshow('Object detector', image)\ncv2.imwrite(IMAGE_RESULT_NAME, image)\nprint(\"Saving result image to: \", IMAGE_RESULT_NAME)\n\n# Clean up\ncv2.destroyAllWindows()\n" ]
[ [ "tensorflow.compat.v1.GPUOptions", "numpy.squeeze", "tensorflow.io.gfile.GFile", "tensorflow.compat.v1.ConfigProto", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.compat.v1.GraphDef", "tensorflow.import_graph_def", "tensorflow.compat.v1.logging.set_verbosity" ] ]
farzana0/pgm_graph_inference
[ "37f1ea68f191d4f3021e7fdc8dd246d945e37ead" ]
[ "experiments/saved_exp_res/exp_helpers.py" ]
[ "\"\"\"\n\nExperiment specifications:\nan experiment is defined by train,test dataset pair,\neach dataset is loaded from graphical_models/datasets.\nAuthors: [email protected]\n\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom graphical_models import BinaryMRF\nfrom inference import get_algorithm\nfrom graphical_models.data_gen import struct_names\nfrom constants import *\n\n\n# Give specs in form structure->size\n# when used for train, the same is model name\ndata_specs = {\n \"debug\": \n {\"star\": [5],\n \"fc\": []},\n \"larger_debug\": \n {\"star\": [10],\n \"fc\": []},\n}\n\n# add simple datasets\ndata_specs.update({struct+\"_small\": {struct: [9]} for struct in struct_names})\nassert \"star_small\" in data_specs\n\n# add compound datasets\ndata_specs.update({struct+\"_medium\": {struct: [15,16,17]} for struct in struct_names})\ndata_specs.update({\"trees_medium\": {\"star\": [15, 16, 17],\n \"path\": [15, 16, 17],\n },\n \"conn_medium\": {\"bipart\": [15, 16, 17],\n # \"tripart\": [15, 16, 17],\n \"fc\": [15, 16, 17],\n },\n })\ndata_specs.update({\"grid_large\":{\"grid\":[49]},\n \"path_large\": {\"path\": [9,10,100]},\n \"fc_large\": {\"fc\": [15,16,17]},\n \"barbell_large\": {\"barbell\": [15,16,17]},\n \"ladder_large\": {\"ladder\": [15,16,17]},\n \"random_tree_large\": {\"random_tree\": [15,16,17]},\n \"wheel_large\": {\"wheel\": [15,16,17,100]},\n })\n\n\n# Add experiments for part 2: Trees+BP\ndata_specs.update({\"trees_approx\": {\"random_tree\": [100]},\n })\n\n# Add experiments for part 2: NonTrees+MCMC\ndata_specs.update({\"nontrees_approx\": \n {\"barbell\": [100],\n \"fc\": [100]},\n \"barbell_approx\": \n {\"barbell\": [100]},\n \"fc_approx\": \n {\"fc\": [100]}\n })\n\n# Data loading ----------------------------------------------------------------\ndef get_dataset_by_name(specs_name, data_dir, mode=None):\n \"\"\"\n Assumes graphs live as\n graphical_models/datasets/{train/val/test} <-- data_dir\n |-- star/\n | |- 9/<file1.npy>, <file2.npy> ...\n | |- 10/\n |- 11/\n ... ...\n Loads all graphs of given size and structure,\n this needs to be updated in the future\n (so that we can train and test on the same structures)\n\n Arguments:\n specs_name - key to the data_specs dictionary\n data_dir - train or test directory\n mode - map or marginal\n \"\"\"\n if specs_name not in data_specs:\n raise ValueError(\"Specification {} not supported\".format(specs_name))\n specs = data_specs[specs_name]\n graphs = []\n for struct in specs:\n size_list = specs[struct]\n for size in size_list:\n # go to specified dir, load and append\n directory = os.path.join(data_dir, struct, str(size))\n\n for filename in os.listdir(directory):\n if filename.endswith(\".npy\"):\n path_to_graph = os.path.join(directory, filename)\n data_dict = np.load(path_to_graph, allow_pickle=True)[()] # funny indexing\n graph = BinaryMRF(data_dict[\"W\"], data_dict[\"b\"])\n graph.set_ground_truth(marginal_est=data_dict[\"marginal\"],\n map_est=data_dict[\"map\"])\n graph.struct = struct\n graphs.append(graph)\n\n if mode is not None:\n graphs = [g for g in graphs if getattr(g, mode) is not None]\n print(\"Loaded {} graphs\".format(len(graphs)))\n return graphs\n\n\n# Some simple checks ----------------------------------------------------------\nif __name__ == \"__main__\":\n train_data = get_dataset_by_name(\"debug\")\n print(train_data[0])\n print(\"W, b:\", train_data[0].W, train_data[0].b)\n print(\"Marginals:\", train_data[0].marginal)\n print(\"MAP:\", train_data[0].map)\n\n" ]
[ [ "numpy.load" ] ]
jasperroebroek/sklearn-quantile
[ "d357240527f32b04b0fec3dcd308bb23de517209" ]
[ "tests/test_weighted_quantile.py" ]
[ "import numpy as np\nimport pytest\n\nfrom sklearn_quantile.utils import weighted_quantile\n\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_almost_equal\nfrom numpy.testing import assert_raises\n\n\ndef test_quantile_equal_weights():\n rng = np.random.RandomState(0)\n x = rng.randn(10)\n weights = 0.1 * np.ones(10)\n\n # since weights are equal, quantiles lie in the midpoint.\n sorted_x = np.sort(x)\n expected = 0.5 * (sorted_x[1:] + sorted_x[:-1])\n actual = np.asarray([weighted_quantile(x, q, weights) for q in np.arange(0.1, 1.0, 0.1)])\n\n assert_array_almost_equal(expected, actual)\n\n # check quantiles at (0.05, 0.95) at intervals of 0.1\n actual = np.asarray([weighted_quantile(x, q, weights) for q in np.arange(0.05, 1.05, 0.1)])\n assert_array_almost_equal(sorted_x, actual)\n\n # it should be the same the calculated all quantiles at the same time instead of looping over them\n assert_array_almost_equal(actual, weighted_quantile(x, weights=weights, q=np.arange(0.05, 1.05, 0.1)))\n\n\ndef test_quantile_toy_data():\n x = [1, 2, 3]\n weights = [1, 4, 5]\n\n assert_equal(weighted_quantile(x, 0.0, weights), 1)\n assert_equal(weighted_quantile(x, 1.0, weights), 3)\n\n assert_equal(weighted_quantile(x, 0.05, weights), 1)\n assert_almost_equal(weighted_quantile(x, 0.30, weights), 2)\n assert_equal(weighted_quantile(x, 0.75, weights), 3)\n assert_almost_equal(weighted_quantile(x, 0.50, weights), 2.44, 2)\n\n\[email protected]('q', [0, 0.1, 0.5, 0.9, 1])\ndef test_zero_weights(q):\n x = [1, 2, 3, 4, 5]\n w = [0, 0, 0, 0.1, 0.1]\n\n assert_equal(\n weighted_quantile(x, q, w),\n weighted_quantile([4, 5], q, [0.1, 0.1])\n )\n\n\[email protected](\"keepdims\", [True, False])\ndef test_return_shapes(keepdims):\n rng = np.random.RandomState(0)\n x = rng.randn(100, 10, 20)\n weights = 0.01 * np.ones_like(x)\n\n # shape should be the same as the output of np.quantile. Without weights it is actually the same calculation\n assert (\n weighted_quantile(x, 0.5, weights, axis=0, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=0, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights, axis=2, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=2, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, (0.5, 0.8), weights, axis=0, keepdims=keepdims).shape ==\n np.quantile(x, (0.5, 0.8), axis=0, keepdims=keepdims).shape\n )\n if keepdims:\n assert (\n weighted_quantile(x, 0.5, weights, axis=None, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=None, keepdims=keepdims).shape\n )\n else:\n assert isinstance(weighted_quantile(x, 0.5, weights, axis=None, keepdims=keepdims), (np.float32, float))\n\n\[email protected](\"keepdims\", [True, False])\ndef test_return_shapes_empty_dims(keepdims):\n rng = np.random.RandomState(0)\n x = rng.randn(1, 100, 1)\n weights = 0.01 * np.ones_like(x)\n\n assert (\n weighted_quantile(x, 0.5, weights, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n assert (\n weighted_quantile(x, 0.5, weights=None, axis=1, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, axis=1, keepdims=keepdims).shape\n )\n\n if keepdims:\n assert (\n weighted_quantile(x, 0.5, weights, keepdims=keepdims).shape ==\n np.quantile(x, 0.5, keepdims=keepdims).shape\n )\n\n\ndef test_errors():\n rng = np.random.RandomState(0)\n x = rng.randn(100, 10, 20)\n weights = 0.01 * np.ones_like(x)\n\n # axis should be integer\n assert_raises(NotImplementedError, weighted_quantile, x, 0.5, weights, axis=(1, 2))\n" ]
[ [ "numpy.testing.assert_raises", "numpy.ones", "numpy.quantile", "numpy.ones_like", "numpy.random.RandomState", "numpy.arange", "numpy.testing.assert_array_almost_equal", "numpy.sort" ] ]
ghanashyamchalla/cis_interface
[ "7b59439276eacb66f1f6ea4177d3a85cc061eed5" ]
[ "yggdrasil/drivers/CModelDriver.py" ]
[ "import os\nimport re\nimport warnings\nimport copy\nimport shutil\nimport subprocess\nimport numpy as np\nimport sysconfig\nfrom collections import OrderedDict\nfrom yggdrasil import platform, tools\nfrom yggdrasil.drivers.CompiledModelDriver import (\n CompiledModelDriver, CompilerBase, ArchiverBase)\nfrom yggdrasil.metaschema.properties.ScalarMetaschemaProperties import (\n _valid_types)\nfrom yggdrasil.languages import get_language_dir\nfrom yggdrasil.config import ygg_cfg\nfrom numpy import distutils as numpy_distutils\n\n\n_default_internal_libtype = 'object'\n# if platform._is_win: # pragma: windows\n# _default_internal_libtype = 'static'\n\n\ndef get_OSX_SYSROOT():\n r\"\"\"Determin the path to the OSX SDK.\n\n Returns:\n str: Full path to the SDK directory if one is located. None\n otherwise.\n\n \"\"\"\n fname = None\n if platform._is_mac:\n try:\n xcode_dir = subprocess.check_output(\n 'echo \"$(xcode-select -p)\"', shell=True).decode(\"utf-8\").strip()\n except BaseException: # pragma: debug\n xcode_dir = None\n fname_try = []\n cfg_sdkroot = ygg_cfg.get('c', 'macos_sdkroot', None)\n if cfg_sdkroot:\n fname_try.append(cfg_sdkroot)\n if xcode_dir is not None:\n fname_base = os.path.join(xcode_dir, 'Platforms',\n 'MacOSX.platform', 'Developer',\n 'SDKs', 'MacOSX%s.sdk')\n fname_try += [\n fname_base % os.environ.get('MACOSX_DEPLOYMENT_TARGET', ''),\n fname_base % '',\n os.path.join(xcode_dir, 'SDKs', 'MacOSX.sdk')]\n if os.environ.get('SDKROOT', False):\n fname_try.insert(0, os.environ['SDKROOT'])\n for fcheck in fname_try:\n if os.path.isdir(fcheck):\n fname = fcheck\n break\n return fname\n\n\n_osx_sysroot = get_OSX_SYSROOT()\n\n\nclass CCompilerBase(CompilerBase):\n r\"\"\"Base class for C compilers.\"\"\"\n languages = ['c']\n default_executable_env = 'CC'\n default_flags_env = 'CFLAGS'\n default_flags = ['-g', '-Wall']\n # GCC & CLANG have similar call patterns\n linker_attributes = {'default_flags_env': 'LDFLAGS',\n 'search_path_envvar': ['LIBRARY_PATH', 'LD_LIBRARY_PATH']}\n search_path_envvar = ['C_INCLUDE_PATH']\n search_path_flags = ['-E', '-v', '-xc', '/dev/null']\n search_regex_begin = '#include \"...\" search starts here:'\n search_regex_end = 'End of search list.'\n search_regex = [r'(?:#include <...> search starts here:)|'\n r'(?: ([^\\n]+?)(?: \\(framework directory\\))?)\\n']\n\n @staticmethod\n def before_registration(cls):\n r\"\"\"Operations that should be performed to modify class attributes prior\n to registration including things like platform dependent properties and\n checking environment variables for default settings.\n \"\"\"\n if platform._is_mac:\n cls.linker_attributes = dict(cls.linker_attributes,\n search_path_flags=['-Xlinker', '-v'],\n search_regex=[r'\\t([^\\t\\n]+)\\n'],\n search_regex_begin='Library search paths:')\n elif platform._is_linux:\n cls.linker_attributes = dict(cls.linker_attributes,\n search_path_flags=['-Xlinker', '--verbose'],\n search_regex=[r'SEARCH_DIR\\(\"=([^\"]+)\"\\);'])\n CompilerBase.before_registration(cls)\n\n @classmethod\n def set_env(cls, *args, **kwargs):\n r\"\"\"Set environment variables required for compilation.\n\n Args:\n *args: Arguments are passed to the parent class's method.\n **kwargs: Keyword arguments are passed to the parent class's\n method.\n\n Returns:\n dict: Environment variables for the model process.\n\n \"\"\"\n out = super(CCompilerBase, cls).set_env(*args, **kwargs)\n if _osx_sysroot is not None:\n out['CONDA_BUILD_SYSROOT'] = _osx_sysroot\n out['SDKROOT'] = _osx_sysroot\n grp = re.search(r'MacOSX(?P<target>[0-9]+\\.[0-9]+)?',\n _osx_sysroot).groupdict()\n # This is only utilized on local installs where a\n # non-default SDK is installed in addition to the default\n if grp['target']: # pragma: debug\n out['MACOSX_DEPLOYMENT_TARGET'] = grp['target']\n return out\n \n @classmethod\n def call(cls, args, **kwargs):\n r\"\"\"Call the compiler with the provided arguments. For |yggdrasil| C\n models will always be linked using the C++ linker since some parts of\n the interface library are written in C++.\"\"\"\n if not kwargs.get('dont_link', False):\n kwargs.setdefault('linker_language', 'c++')\n return super(CCompilerBase, cls).call(args, **kwargs)\n \n\nclass GCCCompiler(CCompilerBase):\n r\"\"\"Interface class for gcc compiler/linker.\"\"\"\n toolname = 'gcc'\n platforms = ['MacOS', 'Linux', 'Windows']\n default_archiver = 'ar'\n\n\nclass ClangCompiler(CCompilerBase):\n r\"\"\"clang compiler on Apple Mac OS.\"\"\"\n toolname = 'clang'\n platforms = ['MacOS']\n default_archiver = 'libtool'\n flag_options = OrderedDict(list(CCompilerBase.flag_options.items())\n + [('sysroot', '--sysroot'),\n ('isysroot', {'key': '-isysroot',\n 'prepend': True}),\n ('mmacosx-version-min',\n '-mmacosx-version-min=%s')])\n\n\nclass MSVCCompiler(CCompilerBase):\n r\"\"\"Microsoft Visual Studio C Compiler.\"\"\"\n toolname = 'cl'\n languages = ['c', 'c++']\n platforms = ['Windows']\n default_flags_env = ['CFLAGS', 'CXXFLAGS']\n # TODO: Currently everything compiled as C++ on windows to allow use\n # of complex types. Use '/TC' instead of '/TP' for strictly C\n default_flags = ['/W4', # Display all errors\n '/Zi', # Symbolic debug in .pdb (implies debug)\n # '/MTd', # Use LIBCMTD.lib to create multithreaded .exe\n # '/Z7', # Symbolic debug in .obj (implies debug)\n \"/EHsc\", # Catch C++ exceptions only (C don't throw C++)\n '/TP', # Treat all files as C++\n \"/nologo\", # Suppress startup banner\n # Don't show errors from using scanf, strcpy, etc.\n \"-D_CRT_SECURE_NO_WARNINGS\"]\n output_key = '/Fo%s'\n output_first = True\n default_linker = 'LINK'\n default_archiver = 'LIB'\n linker_switch = '/link'\n search_path_envvar = 'INCLUDE'\n search_path_flags = None\n version_flags = []\n product_exts = ['.dir', '.ilk', '.pdb', '.sln', '.vcxproj', '.vcxproj.filters']\n combine_with_linker = True # Must be explicit; linker is separate .exe\n linker_attributes = dict(GCCCompiler.linker_attributes,\n default_executable=None,\n default_executable_env=None,\n default_flags_env=None,\n output_key='/OUT:%s',\n output_first=True,\n output_first_library=False,\n flag_options=OrderedDict(\n [('library_libs', ''),\n ('library_dirs', '/LIBPATH:%s')]),\n shared_library_flag='/DLL',\n search_path_envvar='LIB',\n search_path_flags=None)\n \n @classmethod\n def language_version(cls, **kwargs): # pragma: windows\n r\"\"\"Determine the version of this language.\n\n Args:\n **kwargs: Keyword arguments are passed to cls.call.\n\n Returns:\n str: Version of compiler/interpreter for this language.\n\n \"\"\"\n out = cls.call(cls.version_flags, skip_flags=True,\n allow_error=True, **kwargs)\n if 'Copyright' not in out: # pragma: debug\n raise RuntimeError(\"Version call failed: %s\" % out)\n return out.split('Copyright')[0]\n\n \n# C Archivers\nclass ARArchiver(ArchiverBase):\n r\"\"\"Archiver class for ar tool.\"\"\"\n toolname = 'ar'\n languages = ['c', 'c++']\n default_executable_env = 'AR'\n default_flags_env = None\n static_library_flag = 'rcs'\n output_key = ''\n output_first_library = True\n\n\nclass LibtoolArchiver(ArchiverBase):\n r\"\"\"Archiver class for libtool tool.\"\"\"\n toolname = 'libtool'\n languages = ['c', 'c++']\n default_executable_env = 'LIBTOOL'\n static_library_flag = '-static' # This is the default\n \n\nclass MSVCArchiver(ArchiverBase):\n r\"\"\"Microsoft Visual Studio C Archiver.\"\"\"\n toolname = 'LIB'\n languages = ['c', 'c++']\n platforms = ['Windows']\n static_library_flag = None\n output_key = '/OUT:%s'\n \n\n_top_lang_dir = get_language_dir('c')\n_incl_interface = _top_lang_dir\n_incl_seri = os.path.join(_top_lang_dir, 'serialize')\n_incl_comm = os.path.join(_top_lang_dir, 'communication')\n_python_inc = ygg_cfg.get('c', 'python_include', None)\nif (_python_inc is None) or (not os.path.isfile(_python_inc)): # pragma: no cover\n _python_inc = sysconfig.get_paths()['include']\nelse:\n _python_inc = os.path.dirname(_python_inc)\ntry:\n _python_lib = ygg_cfg.get('c', 'python_shared',\n ygg_cfg.get('c', 'python_static', None))\n if (_python_lib is None) or (not os.path.isfile(_python_lib)): # pragma: no cover\n _python_lib = tools.get_python_c_library(allow_failure=False)\nexcept BaseException: # pragma: debug\n warnings.warn(\"ERROR LOCATING PYTHON LIBRARY\")\n _python_lib = None\n_numpy_inc = numpy_distutils.misc_util.get_numpy_include_dirs()\n_numpy_lib = None\n\n\nclass CModelDriver(CompiledModelDriver):\n r\"\"\"Class for running C models.\"\"\"\n\n _schema_subtype_description = ('Model is written in C.')\n language = 'c'\n language_ext = ['.c', '.h']\n interface_library = 'ygg'\n supported_comms = ['ipc', 'zmq']\n supported_comm_options = {\n 'ipc': {'platforms': ['MacOS', 'Linux']},\n 'zmq': {'libraries': ['zmq', 'czmq']}}\n interface_dependencies = ['rapidjson']\n interface_directories = [_incl_interface]\n external_libraries = {\n 'rapidjson': {'include': os.path.join(os.path.dirname(tools.__file__),\n 'rapidjson', 'include',\n 'rapidjson', 'rapidjson.h'),\n 'libtype': 'header_only',\n 'language': 'c'},\n 'zmq': {'include': 'zmq.h',\n 'libtype': 'shared',\n 'language': 'c'},\n 'czmq': {'include': 'czmq.h',\n 'libtype': 'shared',\n 'language': 'c'},\n 'numpy': {'include': os.path.join(_numpy_inc[0], 'numpy',\n 'arrayobject.h'),\n 'libtype': 'header_only',\n 'language': 'c'},\n 'python': {'include': os.path.join(_python_inc, 'Python.h'),\n 'language': 'c'}}\n internal_libraries = {\n 'ygg': {'source': os.path.join(_incl_interface, 'YggInterface.c'),\n 'linker_language': 'c++', # Some dependencies are C++\n 'internal_dependencies': ['regex', 'datatypes'],\n 'external_dependencies': ['rapidjson',\n 'python', 'numpy'],\n 'include_dirs': [_incl_comm, _incl_seri],\n 'compiler_flags': []},\n 'regex_win32': {'source': 'regex_win32.cpp',\n 'directory': os.path.join(_top_lang_dir, 'regex'),\n 'language': 'c++',\n 'libtype': _default_internal_libtype,\n 'internal_dependencies': [],\n 'external_dependencies': []},\n 'regex_posix': {'source': 'regex_posix.h',\n 'directory': os.path.join(_top_lang_dir, 'regex'),\n 'language': 'c',\n 'libtype': 'header_only',\n 'internal_dependencies': [],\n 'external_dependencies': []},\n 'datatypes': {'directory': os.path.join(_top_lang_dir, 'datatypes'),\n 'language': 'c++',\n 'libtype': _default_internal_libtype,\n 'internal_dependencies': ['regex'],\n 'external_dependencies': ['rapidjson',\n 'python', 'numpy'],\n 'include_dirs': []}}\n type_map = {\n 'int': 'intX_t',\n 'float': 'double',\n 'string': 'string_t',\n 'array': 'json_array_t',\n 'object': 'json_object_t',\n 'boolean': 'bool',\n 'null': 'void*',\n 'uint': 'uintX_t',\n 'complex': 'complex_X',\n 'bytes': 'char*',\n 'unicode': 'unicode_t',\n '1darray': '*',\n 'ndarray': '*',\n 'ply': 'ply_t',\n 'obj': 'obj_t',\n 'schema': 'schema_t',\n 'flag': 'int',\n 'class': 'python_class_t',\n 'function': 'python_function_t',\n 'instance': 'python_instance_t',\n 'any': 'generic_t'}\n function_param = {\n 'import': '#include \\\"{filename}\\\"',\n 'index': '{variable}[{index}]',\n 'interface': '#include \\\"{interface_library}\\\"',\n 'input': ('yggInput_t {channel} = yggInputType('\n '\\\"{channel_name}\\\", {channel_type});'),\n 'output': ('yggOutput_t {channel} = yggOutputType('\n '\\\"{channel_name}\\\", {channel_type});'),\n 'recv_heap': 'yggRecvRealloc',\n 'recv_stack': 'yggRecv',\n 'recv_function': 'yggRecvRealloc',\n 'send_function': 'yggSend',\n 'not_flag_cond': '{flag_var} < 0',\n 'flag_cond': '{flag_var} >= 0',\n 'declare': '{type_name} {variable};',\n 'init_array': 'init_json_array()',\n 'init_object': 'init_json_object()',\n 'init_schema': 'init_schema()',\n 'init_ply': 'init_ply()',\n 'init_obj': 'init_obj()',\n 'init_class': 'init_python()',\n 'init_function': 'init_python()',\n 'init_instance': 'init_generic()',\n 'init_any': 'init_generic()',\n 'copy_array': '{name} = copy_json_array({value});',\n 'copy_object': '{name} = copy_json_object({value});',\n 'copy_schema': '{name} = copy_schema({value});',\n 'copy_ply': '{name} = copy_ply({value});',\n 'copy_obj': '{name} = copy_obj({value});',\n 'copy_class': '{name} = copy_python({value});',\n 'copy_function': '{name} = copy_python({value});',\n 'copy_instance': '{name} = copy_generic({value});',\n 'copy_any': '{name} = copy_generic({value});',\n 'free_array': 'free_json_array({variable});',\n 'free_object': 'free_json_object({variable});',\n 'free_schema': 'free_schema({variable});',\n 'free_ply': 'free_ply({variable});',\n 'free_obj': 'free_obj({variable});',\n 'free_class': 'destroy_python({variable});',\n 'free_function': 'destroy_python({variable});',\n 'free_instance': 'free_generic({variable});',\n 'free_any': 'free_generic({variable});',\n 'print_float': 'printf(\"%f\\\\n\", {object});',\n 'print_int': 'printf(\"%i\\\\n\", {object});',\n 'print_uint': 'printf(\"%u\\\\n\", {object});',\n 'print_string': 'printf(\"%s\\\\n\", {object});',\n 'print_unicode': 'printf(\"%s\\\\n\", {object});',\n 'print_bytes': 'printf(\"%s\\\\n\", {object});',\n 'print_complex': 'print_complex({object});',\n 'print_array': 'display_json_array({object});',\n 'print_object': 'display_json_object({object});',\n 'print_schema': 'display_schema({object});',\n 'print_ply': 'display_ply({object});',\n 'print_obj': 'display_obj({object});',\n 'print_class': 'display_python({object});',\n 'print_function': 'display_python({object});',\n 'print_instance': 'display_generic({object});',\n 'print_any': 'display_generic({object});',\n 'assign': '{name} = {value};',\n 'assign_copy': 'memcpy({name}, {value}, {N}*sizeof({native_type}));',\n 'comment': '//',\n 'true': '1',\n 'false': '0',\n 'not': '!',\n 'and': '&&',\n 'indent': 2 * ' ',\n 'quote': '\\\"',\n 'print': 'printf(\\\"{message}\\\\n\\\");',\n 'fprintf': 'printf(\\\"{message}\\\\n\\\", {variables});',\n 'error': 'printf(\\\"{error_msg}\\\\n\\\"); return -1;',\n 'block_end': '}',\n 'line_end': ';',\n 'if_begin': 'if ({cond}) {{',\n 'if_elif': '}} else if ({cond}) {{',\n 'if_else': '}} else {{',\n 'for_begin': ('for ({iter_var} = {iter_begin}; {iter_var} < {iter_end}; '\n '{iter_var}++) {{'),\n 'while_begin': 'while ({cond}) {{',\n 'break': 'break;',\n 'exec_begin': 'int main() {',\n 'exec_end': ' return 0;\\n}',\n 'exec_prefix': '#include <stdbool.h>',\n 'free': 'if ({variable} != NULL) {{ free({variable}); {variable} = NULL; }}',\n 'function_def_begin': '{output_type} {function_name}({input_var}) {{',\n 'return': 'return {output_var};',\n 'function_def_regex': (\n r'(?P<flag_type>.+?)\\s*{function_name}\\s*'\n r'\\((?P<inputs>(?:[^{{])*?)\\)\\s*\\{{'\n r'(?P<body>(?:.*?\\n?)*?)'\n r'(?:(?:return *(?P<flag_var>.+?)?;(?:.*?\\n?)*?\\}})'\n r'|(?:\\}}))'),\n 'inputs_def_regex': (\n r'\\s*(?P<native_type>(?:[^\\s\\*])+(\\s+)?'\n r'(?P<ptr>\\*+)?)(?(ptr)(?(1)(?:\\s*)|(?:\\s+)))'\n r'(\\((?P<name_ptr>\\*+)?)?(?P<name>.+?)(?(4)(?:\\)))'\n r'(?P<shape>(?:\\[.+?\\])+)?\\s*(?:,|$)(?:\\n)?'),\n 'outputs_def_regex': (\n r'\\s*(?P<native_type>(?:[^\\s\\*])+(\\s+)?'\n r'(?P<ptr>\\*+)?)(?(ptr)(?(1)(?:\\s*)|(?:\\s+)))'\n r'(?P<name>.+?)(?P<shape>(?:\\[.+?\\])+)?\\s*(?:,|$)(?:\\n)?')}\n outputs_in_inputs = True\n include_channel_obj = True\n is_typed = True\n brackets = (r'{', r'}')\n\n @staticmethod\n def after_registration(cls, **kwargs):\n r\"\"\"Operations that should be performed to modify class attributes after\n registration.\"\"\"\n if cls.default_compiler is None:\n if platform._is_linux:\n cls.default_compiler = 'gcc'\n elif platform._is_mac:\n cls.default_compiler = 'clang'\n elif platform._is_win: # pragma: windows\n cls.default_compiler = 'cl'\n CompiledModelDriver.after_registration(cls, **kwargs)\n if kwargs.get('second_pass', False):\n return\n if _python_lib:\n if _python_lib.endswith(('.lib', '.a')):\n cls.external_libraries['python']['libtype'] = 'static'\n cls.external_libraries['python']['static'] = _python_lib\n else:\n cls.external_libraries['python']['libtype'] = 'shared'\n cls.external_libraries['python']['shared'] = _python_lib\n for x in ['zmq', 'czmq']:\n if x in cls.external_libraries:\n if platform._is_win: # pragma: windows\n cls.external_libraries[x]['libtype'] = 'static'\n # Platform specific regex internal library\n if platform._is_win: # pragma: windows\n regex_lib = cls.internal_libraries['regex_win32']\n else:\n regex_lib = cls.internal_libraries['regex_posix']\n cls.internal_libraries['regex'] = regex_lib\n # Platform specific internal library options\n cls.internal_libraries['ygg']['include_dirs'] += [_top_lang_dir]\n if platform._is_win: # pragma: windows\n stdint_win = os.path.join(_top_lang_dir, 'windows_stdint.h')\n assert(os.path.isfile(stdint_win))\n shutil.copy(stdint_win, os.path.join(_top_lang_dir, 'stdint.h'))\n cls.internal_libraries['datatypes']['include_dirs'] += [_top_lang_dir]\n if platform._is_linux:\n for x in ['ygg', 'datatypes']:\n if 'compiler_flags' not in cls.internal_libraries[x]:\n cls.internal_libraries[x]['compiler_flags'] = []\n if '-fPIC' not in cls.internal_libraries[x]['compiler_flags']:\n cls.internal_libraries[x]['compiler_flags'].append('-fPIC')\n \n @classmethod\n def configure(cls, cfg, macos_sdkroot=None):\n r\"\"\"Add configuration options for this language. This includes locating\n any required external libraries and setting option defaults.\n\n Args:\n cfg (YggConfigParser): Config class that options should be set for.\n macos_sdkroot (str, optional): Full path to the root directory for\n the MacOS SDK that should be used. Defaults to None and is\n ignored.\n\n Returns:\n list: Section, option, description tuples for options that could not\n be set.\n\n \"\"\"\n # Call __func__ to avoid direct invoking of class which dosn't exist\n # in after_registration where this is called\n out = CompiledModelDriver.configure.__func__(cls, cfg)\n # Change configuration to be directory containing include files\n rjlib = cfg.get(cls._language, 'rapidjson_include', None)\n if (rjlib is not None) and os.path.isfile(rjlib):\n cfg.set(cls._language, 'rapidjson_include',\n os.path.dirname(os.path.dirname(rjlib)))\n nplib = cfg.get(cls._language, 'numpy_include', None)\n if (nplib is not None) and os.path.isfile(nplib):\n cfg.set(cls._language, 'numpy_include',\n os.path.dirname(os.path.dirname(nplib)))\n if macos_sdkroot is None:\n macos_sdkroot = _osx_sysroot\n if macos_sdkroot is not None:\n if not os.path.isdir(macos_sdkroot): # pragma: debug\n raise ValueError(\"Path to MacOS SDK root directory \"\n \"does not exist: %s.\" % macos_sdkroot)\n cfg.set(cls._language, 'macos_sdkroot', macos_sdkroot)\n return out\n\n @classmethod\n def call_linker(cls, obj, language=None, **kwargs):\n r\"\"\"Link several object files to create an executable or library (shared\n or static), checking for errors.\n\n Args:\n obj (list): Object files that should be linked.\n language (str, optional): Language that should be used to link\n the files. Defaults to None and the language of the current\n driver is used.\n **kwargs: Additional keyword arguments are passed to run_executable.\n\n Returns:\n str: Full path to compiled source.\n\n \"\"\"\n if (((cls.language == 'c') and (language is None)\n and kwargs.get('for_model', False)\n and (not kwargs.get('skip_interface_flags', False)))):\n language = 'c++'\n kwargs.update(cls.update_linker_kwargs(**kwargs))\n kwargs['skip_interface_flags'] = True\n return super(CModelDriver, cls).call_linker(obj, language=language,\n **kwargs)\n \n @classmethod\n def update_ld_library_path(cls, env, paths_to_add=None, add_to_front=False):\n r\"\"\"Update provided dictionary of environment variables so that\n LD_LIBRARY_PATH includes the interface directory containing the interface\n libraries.\n\n Args:\n env (dict): Dictionary of enviroment variables to be updated.\n paths_to_add (list, optional): Paths that should be added. If not\n provided, defaults to [cls.get_language_dir()].\n add_to_front (bool, optional): If True, new paths are added to the\n front, rather than the end. Defaults to False.\n\n Returns:\n dict: Updated dictionary of environment variables.\n\n \"\"\"\n if paths_to_add is None:\n paths_to_add = [cls.get_language_dir()]\n if platform._is_linux:\n path_list = []\n prev_path = env.pop('LD_LIBRARY_PATH', '')\n if prev_path:\n path_list.append(prev_path)\n for x in paths_to_add:\n if x not in prev_path:\n if add_to_front:\n path_list.insert(0, x)\n else:\n path_list.append(x)\n if path_list:\n env['LD_LIBRARY_PATH'] = os.pathsep.join(path_list)\n return env\n\n def set_env(self, **kwargs):\n r\"\"\"Get environment variables that should be set for the model process.\n\n Args:\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n dict: Environment variables for the model process.\n\n \"\"\"\n out = super(CModelDriver, self).set_env(**kwargs)\n out = self.update_ld_library_path(out)\n if platform._is_win: # pragma: windows\n out.setdefault('PYTHONHOME', sysconfig.get_config_var('prefix'))\n out.setdefault('PYTHONPATH', os.pathsep.join([\n sysconfig.get_path('stdlib'), sysconfig.get_path('purelib'),\n os.path.join(sysconfig.get_config_var('prefix'), 'DLLs')]))\n return out\n \n @classmethod\n def parse_var_definition(cls, io, value, **kwargs):\n r\"\"\"Extract information about input/output variables from a\n string definition.\n\n Args:\n io (str): Description of variables contained in the provided\n string. Must be 'inputs' or 'outputs'.\n value (str): String containing one or more variable definitions.\n **kwargs: Additional keyword arguments are passed to the\n parent class's method.\n\n Returns:\n list: List of information about the variables contained in\n the provided string.\n\n Raises:\n AssertionError: If io is not 'inputs' or 'outputs'.\n NotImplementedError: If the def_regex for the specified\n io is not defined.\n\n \"\"\"\n out = super(CModelDriver, cls).parse_var_definition(io, value, **kwargs)\n io_map = {x['name']: x for x in out}\n for i, x in enumerate(out):\n if (x['name'] + '_length') in io_map:\n x['length_var'] = x['name'] + '_length'\n elif ('length_' + x['name']) in io_map:\n x['length_var'] = 'length_' + x['name']\n elif (((x['name'] + '_ndim') in io_map)\n and ((x['name'] + '_shape') in io_map)):\n x['ndim_var'] = x['name'] + '_ndim'\n x['shape_var'] = x['name'] + '_shape'\n x['datatype']['type'] = 'ndarray'\n elif ((('ndim_' + x['name']) in io_map)\n and (('shape_' + x['name']) in io_map)):\n x['ndim_var'] = 'ndim_' + x['name']\n x['shape_var'] = 'shape_' + x['name']\n x['datatype']['type'] = 'ndarray'\n elif 'shape' in x:\n x['datatype']['shape'] = [\n int(float(s.strip('[]')))\n for s in x.pop('shape').split('][')]\n assert(x['datatype']['subtype'] in _valid_types)\n if len(x['datatype']['shape']) == 1:\n x['datatype']['length'] = x['datatype'].pop(\n 'shape')[0]\n x['datatype']['type'] = '1darray'\n else:\n x['datatype']['type'] = 'ndarray'\n return out\n \n @classmethod\n def update_io_from_function(cls, model_file, model_function,\n inputs=[], outputs=[], contents=None,\n outputs_in_inputs=None):\n r\"\"\"Update inputs/outputs from the function definition.\n\n Args:\n model_file (str): Full path to the file containing the model\n function's declaration.\n model_function (str): Name of the model function.\n inputs (list, optional): List of model inputs including types.\n Defaults to [].\n outputs (list, optional): List of model outputs including types.\n Defaults to [].\n contents (str, optional): Contents of file to parse rather than\n re-reading the file. Defaults to None and is ignored.\n outputs_in_inputs (bool, optional): If True, the outputs are\n presented in the function definition as inputs. Defaults\n to False.\n\n Returns:\n dict, None: Flag variable used by the model. If None, the\n model does not use a flag variable.\n\n \"\"\"\n flag_var = super(CModelDriver, cls).update_io_from_function(\n model_file, model_function, inputs=inputs,\n outputs=outputs, contents=contents,\n outputs_in_inputs=outputs_in_inputs)\n # Add length_vars if missing for use by yggdrasil\n for x in inputs:\n for v in x['vars']:\n if cls.requires_length_var(v) and (not v.get('length_var', False)):\n v['length_var'] = {'name': v['name'] + '_length',\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True,\n 'dependent': True}\n elif cls.requires_shape_var(v):\n if not (v.get('ndim_var', False)\n and v.get('shape_var', False)): # pragma: debug\n raise RuntimeError(\"Uncomment logic that follows.\")\n # if not v.get('ndim_var', False):\n # v['ndim_var'] = {\n # 'name': v['name'] + '_ndim',\n # 'datatype': {'type': 'uint',\n # 'precision': 64},\n # 'is_length_var': True,\n # 'dependent': True}\n # if not v.get('shape_var', False):\n # v['shape_var'] = {\n # 'name': v['name'] + '_ndim',\n # 'datatype': {'type': '1darray',\n # 'subtype': 'uint',\n # 'precision': 64},\n # 'is_length_var': True,\n # 'dependent': True}\n for x in outputs:\n for v in x['vars']:\n if cls.requires_length_var(v) and (not v.get('length_var', False)):\n if v['datatype']['type'] in ['1darray', 'ndarray']: # pragma: debug\n raise RuntimeError(\"Length must be defined for arrays.\")\n elif v['datatype'].get('subtype', v['datatype']['type']) == 'bytes':\n v['length_var'] = 'strlen(%s)' % v['name']\n else:\n v['length_var'] = 'strlen4(%s)' % v['name']\n elif (cls.requires_shape_var(v)\n and not (v.get('ndim_var', False)\n and v.get('shape_var', False))): # pragma: debug\n raise RuntimeError(\"Shape must be defined for ND arrays.\")\n # Flag input variables for reallocation\n for x in inputs:\n allows_realloc = [cls.allows_realloc(v) for v in x['vars']]\n if all(allows_realloc):\n for v in x['vars']:\n if (((v['native_type'] not in ['char*', 'string_t',\n 'bytes_t', 'unicode_t'])\n and (not v.get('is_length_var', False))\n and (v['datatype']['type'] not in\n ['any', 'object', 'array', 'schema',\n 'instance', '1darray', 'ndarray'])\n and (cls.function_param['recv_function']\n == cls.function_param['recv_heap']))):\n v['allow_realloc'] = True\n for x in inputs + outputs:\n if x['datatype']['type'] == 'array':\n nvars_items = len(x['datatype'].get('items', []))\n nvars = sum([(not ix.get('is_length_var', False))\n for ix in x['vars']])\n if nvars_items == nvars:\n x['use_generic'] = False\n else:\n x['use_generic'] = True\n return flag_var\n \n @classmethod\n def input2output(cls, var):\n r\"\"\"Perform conversion necessary to turn a variable extracted from a\n function definition from an input to an output.\n\n Args:\n var (dict): Variable definition.\n\n Returns:\n dict: Updated variable definition.\n\n \"\"\"\n out = super(CModelDriver, cls).input2output(var)\n if out.get('ptr', ''):\n assert(out['native_type'].endswith('*'))\n out['ptr'] = out['ptr'][:-1]\n out['native_type'] = out['native_type'][:-1]\n out['datatype'] = cls.get_json_type(out['native_type'])\n if (((out['datatype']['type'] == '1darray')\n and var.get('ndim_var', False)\n and var.get('shape_var', False))):\n out['datatype']['type'] = 'ndarray'\n return out\n\n @classmethod\n def output2input(cls, var, in_definition=True):\n r\"\"\"Perform conversion necessary to turn an output variable\n into an corresponding input that can be used to format a\n function definition.\n\n Args:\n var (dict): Variable definition.\n in_definition (bool, optional): If True, the returned\n dictionary corresponds to an input variable in a\n function definition. If False, the returned value\n will correspond to an input to a function. Defaults to\n True.\n\n Returns:\n dict: Updated variable definition.\n\n \"\"\"\n out = super(CModelDriver, cls).output2input(var)\n if isinstance(var, dict):\n if in_definition:\n out = dict(out, name='*' + out['name'])\n if ((('shape' in out.get('datatype', {}))\n or ('length' in out.get('datatype', {})))):\n out['name'] = '(%s)' % out['name']\n else:\n out = dict(out, name='&' + out['name'])\n if ('shape' in out.get('datatype', {})) and (not platform._is_win):\n out['name'] += len(out['datatype']['shape']) * '[0]'\n return out\n \n @classmethod\n def allows_realloc(cls, var):\n r\"\"\"Determine if a variable allows the receive call to perform\n realloc.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if the variable allows realloc, False otherwise.\n\n \"\"\"\n if isinstance(var, dict):\n datatype = var.get('datatype', var)\n if ('shape' in datatype) or ('length' in datatype):\n return False\n return True\n \n @classmethod\n def requires_length_var(cls, var):\n r\"\"\"Determine if a variable requires a separate length variable.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if a length variable is required, False otherwise.\n\n \"\"\"\n if ((isinstance(var, dict)\n and ((cls.get_native_type(**var) in ['char*', 'string_t',\n 'bytes_t', 'unicode_t'])\n or var.get('datatype', {}).get(\n 'type', var.get('type', None)) in ['1darray'])\n and (not var.get('is_length_var', False))\n and ('length' not in var.get('datatype', {})))):\n return True\n return False\n \n @classmethod\n def requires_shape_var(cls, var):\n r\"\"\"Determine if a variable requires a separate shape variable.\n\n Args:\n var (dict): Dictionary of variable properties.\n\n Returns:\n bool: True if a shape variable is required, False otherwise.\n\n \"\"\"\n if ((isinstance(var, dict)\n and (var.get('datatype', {}).get(\n 'type', var.get('type', None)) == 'ndarray')\n and (not var.get('is_length_var', False))\n and ('shape' not in var.get('datatype', {})))):\n return True\n return False\n \n @classmethod\n def get_native_type(cls, **kwargs):\n r\"\"\"Get the native type.\n\n Args:\n type (str, optional): Name of |yggdrasil| extended JSON\n type or JSONSchema dictionary defining a datatype.\n **kwargs: Additional keyword arguments may be used in determining\n the precise declaration that should be used.\n\n Returns:\n str: The native type.\n\n \"\"\"\n out = super(CModelDriver, cls).get_native_type(**kwargs)\n if not ((out == '*') or ('X' in out) or (out == 'double')):\n return out\n from yggdrasil.metaschema.datatypes import get_type_class\n json_type = kwargs.get('datatype', kwargs.get('type', 'bytes'))\n if isinstance(json_type, str):\n json_type = {'type': json_type}\n assert(isinstance(json_type, dict))\n json_type = get_type_class(json_type['type']).normalize_definition(\n json_type)\n if out == '*':\n json_subtype = copy.deepcopy(json_type)\n json_subtype['type'] = json_subtype.pop('subtype')\n out = cls.get_native_type(datatype=json_subtype)\n if ('length' not in json_type) and ('shape' not in json_type):\n out += '*'\n elif 'X' in out:\n precision = json_type['precision']\n if json_type['type'] == 'complex':\n precision_map = {64: 'float',\n 128: 'double',\n 256: 'long_double'}\n if precision in precision_map:\n out = out.replace('X', precision_map[precision])\n else: # pragma: debug\n raise ValueError(\"Unsupported precision for complex types: %d\"\n % precision)\n else:\n out = out.replace('X', str(precision))\n elif out == 'double':\n if json_type['precision'] == 32:\n out = 'float'\n return out.replace(' ', '')\n \n @classmethod\n def get_json_type(cls, native_type):\n r\"\"\"Get the JSON type from the native language type.\n\n Args:\n native_type (str): The native language type.\n\n Returns:\n str, dict: The JSON type.\n\n \"\"\"\n out = {}\n regex_var = r'(?P<type>.+?(?P<precision>\\d*)(?:_t)?)\\s*(?P<pointer>\\**)'\n grp = re.fullmatch(regex_var, native_type).groupdict()\n if grp.get('precision', False):\n out['precision'] = int(grp['precision'])\n grp['type'] = grp['type'].replace(grp['precision'], 'X')\n if grp['type'] == 'char':\n out['type'] = 'bytes'\n out['precision'] = 0\n elif grp['type'] == 'void':\n out['type'] = 'null'\n elif grp['type'].startswith('complex'):\n out['type'] = 'complex'\n precision_map = {'long_double': 256,\n 'double': 128,\n 'float': 64}\n prec_str = grp['type'].split('complex_')[-1]\n if prec_str in precision_map:\n out['precision'] = precision_map[prec_str]\n else: # pragma: debug\n raise ValueError(\"Cannot determine precision for complex type '%s'\"\n % grp['type'])\n else:\n if grp['type'] == 'double':\n out['precision'] = 8 * 8\n elif grp['type'] == 'float':\n grp['type'] = 'double'\n out['precision'] = 4 * 8\n elif grp['type'] in ['int', 'uint']:\n grp['type'] += 'X_t'\n out['precision'] = 8 * np.dtype('intc').itemsize\n elif grp['type'] in ['bytes_t', 'string_t', 'unicode_t']:\n out['precision'] = 0\n out['type'] = super(CModelDriver, cls).get_json_type(grp['type'])\n if grp.get('pointer', False):\n nptr = len(grp['pointer'])\n if grp['type'] in ['char', 'void']:\n nptr -= 1\n if nptr > 0:\n out['subtype'] = out['type']\n out['type'] = '1darray'\n if out['type'] in _valid_types:\n out['subtype'] = out['type']\n out['type'] = 'scalar'\n return out\n \n @classmethod\n def format_function_param(cls, key, default=None, **kwargs):\n r\"\"\"Return the formatted version of the specified key.\n\n Args:\n key (str): Key in cls.function_param mapping that should be\n formatted.\n default (str, optional): Format that should be returned if key\n is not in cls.function_param. Defaults to None.\n **kwargs: Additional keyword arguments are used in formatting the\n request function parameter.\n\n Returns:\n str: Formatted string.\n\n Raises:\n NotImplementedError: If key is not in cls.function_param and default\n is not set.\n\n \"\"\"\n if (key == 'import') and ('filename' in kwargs):\n kwargs['filename'] = os.path.basename(kwargs['filename'])\n elif (key == 'interface') and ('interface_library' in kwargs):\n kwargs['interface_library'] = os.path.basename(\n kwargs['interface_library']).replace('.c', '.h')\n kwargs['default'] = default\n return super(CModelDriver, cls).format_function_param(key, **kwargs)\n \n @classmethod\n def write_model_function_call(cls, model_function, flag_var,\n inputs, outputs, **kwargs):\n r\"\"\"Write lines necessary to call the model function.\n\n Args:\n model_function (str): Handle of the model function that should be\n called.\n flag_var (str): Name of variable that should be used as a flag.\n inputs (list): List of dictionaries describing inputs to the model.\n outputs (list): List of dictionaries describing outputs from the model.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines required to carry out a call to a model function in\n this language.\n\n \"\"\"\n new_inputs = copy.deepcopy(inputs)\n for x in new_inputs:\n for v in x['vars']:\n if v.get('allow_realloc', False):\n v['name'] = '*' + v['name']\n return super(CModelDriver, cls).write_model_function_call(\n model_function, flag_var, new_inputs, outputs, **kwargs)\n \n @classmethod\n def write_model_recv(cls, channel, recv_var, **kwargs):\n r\"\"\"Write a model receive call include checking the return flag.\n\n Args:\n channel (str): Name of variable that the channel being received from\n was stored in.\n recv_var (dict, list): Information of one or more variables that\n receieved information should be stored in.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines required to carry out a receive call in this language.\n\n \"\"\"\n recv_var_str = recv_var\n if not isinstance(recv_var, str):\n recv_var_par = cls.channels2vars(recv_var)\n allows_realloc = [cls.allows_realloc(v)\n for v in recv_var_par]\n if all(allows_realloc):\n kwargs['alt_recv_function'] = cls.function_param['recv_heap']\n else:\n kwargs['alt_recv_function'] = cls.function_param['recv_stack']\n recv_var_str = cls.prepare_output_variables(\n recv_var_par, in_inputs=cls.outputs_in_inputs,\n for_yggdrasil=True)\n return super(CModelDriver, cls).write_model_recv(channel, recv_var_str, **kwargs)\n \n @classmethod\n def write_declaration(cls, var, **kwargs):\n r\"\"\"Return the lines required to declare a variable with a certain\n type.\n\n Args:\n var (dict, str): Name or information dictionary for the variable\n being declared.\n **kwargs: Addition keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: The lines declaring the variable.\n\n \"\"\"\n if isinstance(var, str): # pragma: no cover\n var = {'name': var}\n type_name = cls.get_native_type(**var)\n if var.get('allow_realloc', False):\n type_name += '*'\n var = dict(var, native_type=type_name)\n if ((type_name.endswith('*')\n or (type_name in ['bytes_t', 'string_t', 'unicode_t']))):\n kwargs.get('requires_freeing', []).append(var)\n kwargs.setdefault('value', 'NULL')\n elif var.get('is_length_var', False):\n kwargs.setdefault('value', '0')\n var = dict(var, name=cls.get_name_declare(var))\n out = super(CModelDriver, cls).write_declaration(var, **kwargs)\n for k in ['length', 'ndim', 'shape']:\n if ((isinstance(var.get(k + '_var', None), dict)\n and var[k + '_var'].get('dependent', False))):\n out += cls.write_declaration(var[k + '_var'])\n return out\n\n @classmethod\n def get_name_declare(cls, var):\n r\"\"\"Determine the name that should be used for declaration.\n\n Args:\n var (str, dict): Name of variable or dictionary of information.\n\n Returns:\n str: Modified name for declaration.\n\n \"\"\"\n if isinstance(var, str): # pragma: no cover\n return var\n assert(isinstance(var, dict))\n out = var['name']\n if 'length' in var.get('datatype', {}):\n out += '[%d]' % var['datatype']['length']\n elif 'shape' in var.get('datatype', {}):\n for s in var['datatype']['shape']:\n out += '[%d]' % s\n return out\n \n @classmethod\n def write_free(cls, var, **kwargs):\n r\"\"\"Return the lines required to free a variable with a certain type.\n\n Args:\n var (dict, str): Name or information dictionary for the variable\n being declared.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: The lines freeing the variable.\n\n \"\"\"\n out = []\n if isinstance(var, str):\n var = {'name': var}\n if ((isinstance(var.get('datatype', False), dict)\n and (('free_%s' % var['datatype']['type'])\n in cls.function_param))):\n if var.get('allow_realloc', False):\n out += super(CModelDriver, cls).write_free(\n var, **kwargs)\n var = {'name': var['name']}\n else:\n var = dict(var, name=('&' + var['name']))\n out += super(CModelDriver, cls).write_free(var, **kwargs)\n return out\n \n @classmethod\n def prepare_variables(cls, vars_list, in_definition=False,\n for_yggdrasil=False):\n r\"\"\"Concatenate a set of input variables such that it can be passed as a\n single string to the function_call parameter.\n\n Args:\n vars_list (list): List of variable dictionaries containing info\n (e.g. names) that should be used to prepare a string representing\n input/output to/from a function call.\n in_definition (bool, optional): If True, the returned sequence\n will be of the format required for specifying variables\n in a function definition. Defaults to False.\n for_yggdrasil (bool, optional): If True, the variables will be\n prepared in the formated expected by calls to yggdarsil\n send/recv methods. Defaults to False.\n\n Returns:\n str: Concatentated variables list.\n\n \"\"\"\n if not isinstance(vars_list, list):\n vars_list = [vars_list]\n new_vars_list = []\n for x in vars_list:\n if isinstance(x, str):\n new_vars_list.append(x)\n else:\n assert(isinstance(x, dict))\n if for_yggdrasil and x.get('is_length_var', False):\n continue\n new_vars_list.append(x)\n if for_yggdrasil:\n for k in ['length', 'ndim', 'shape']:\n kvar = k + '_var'\n if x.get(kvar, False):\n if ((x['name'].startswith('*')\n or x['name'].startswith('&'))):\n new_vars_list.append(\n dict(x[kvar],\n name=x['name'][0] + x[kvar]['name']))\n else:\n new_vars_list.append(x[kvar])\n if in_definition:\n new_vars_list2 = []\n for x in new_vars_list:\n if x['name'].startswith('*'):\n name = '%s%s* %s' % tuple(\n [cls.get_native_type(**x)]\n + x['name'].rsplit('*', 1))\n else:\n name = '%s %s' % (cls.get_native_type(**x), x['name'])\n new_var = dict(x, name=name)\n new_var['name'] = cls.get_name_declare(new_var)\n new_vars_list2.append(new_var)\n new_vars_list = new_vars_list2\n return super(CModelDriver, cls).prepare_variables(\n new_vars_list, in_definition=in_definition,\n for_yggdrasil=for_yggdrasil)\n \n @classmethod\n def prepare_output_variables(cls, vars_list, in_definition=False,\n in_inputs=False, for_yggdrasil=False):\n r\"\"\"Concatenate a set of output variables such that it can be passed as\n a single string to the function_call parameter.\n\n Args:\n vars_list (list): List of variable names to concatenate as output\n from a function call.\n in_definition (bool, optional): If True, the returned sequence\n will be of the format required for specifying output\n variables in a function definition. Defaults to False.\n in_inputs (bool, optional): If True, the output variables should\n be formated to be included as input variables. Defaults to\n False.\n for_yggdrasil (bool, optional): If True, the variables will be\n prepared in the formated expected by calls to yggdarsil\n send/recv methods. Defaults to False.\n\n Returns:\n str: Concatentated variables list.\n\n \"\"\"\n if not in_inputs:\n # If the output is a True output and not passed as an input\n # parameter, then the output should not include the type\n # information that is added if in_definition is True.\n in_definition = False\n return super(CModelDriver, cls).prepare_output_variables(\n vars_list, in_definition=in_definition, in_inputs=in_inputs,\n for_yggdrasil=for_yggdrasil)\n\n @classmethod\n def write_print_output_var(cls, var, in_inputs=False, **kwargs):\n r\"\"\"Get the lines necessary to print an output variable in this\n language.\n\n Args:\n var (dict): Variable information.\n in_inputs (bool, optional): If True, the output variable\n is passed in as an input variable to be populated.\n Defaults to False.\n **kwargs: Additional keyword arguments are passed to write_print_var.\n\n Returns:\n list: Lines printing the specified variable.\n\n \"\"\"\n if in_inputs and (cls.language != 'c++'):\n if isinstance(var, dict):\n var = dict(var, name='%s[0]' % var['name'])\n else:\n var = '%s[0]' % var\n return super(CModelDriver, cls).write_print_output_var(\n var, in_inputs=in_inputs, **kwargs)\n \n @classmethod\n def write_function_def(cls, function_name, dont_add_lengths=False,\n use_length_prefix=False, **kwargs):\n r\"\"\"Write a function definition.\n\n Args:\n function_name (str): Name fo the function being defined.\n dont_add_lengths (bool, optional): If True, length variables\n are not added for arrays. Defaults to False.\n use_length_prefix (bool, optional): If True and length variables\n are added, they will be named using prefixes. Otherwise,\n suffixes will be used. Defaults to False.\n **kwargs: Additional keyword arguments are passed to the\n parent class's method.\n\n Returns:\n list: Lines completing the function call.\n\n Raises:\n ValueError: If outputs_in_inputs is not True and more than\n one output variable is specified.\n\n \"\"\"\n if not dont_add_lengths:\n for io in ['input', 'output']:\n if io + '_var' in kwargs:\n io_var = cls.parse_var_definition(\n io + 's', kwargs.pop(io + '_var'))\n else:\n io_var = kwargs.get(io + 's', [])\n for x in io_var:\n if use_length_prefix:\n v_length = 'length_' + x['name']\n v_ndim = 'ndim_' + x['name']\n v_shape = 'shape_' + x['name']\n else:\n v_length = x['name'] + '_length'\n v_ndim = x['name'] + '_ndim'\n v_shape = x['name'] + '_shape'\n if x.get('is_length_var', False):\n continue\n if cls.requires_length_var(x):\n if not x.get('length_var', False):\n x['length_var'] = {\n 'name': v_length,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['length_var'])\n elif cls.requires_shape_var(x):\n if not x.get('ndim_var', False):\n x['ndim_var'] = {\n 'name': v_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['ndim_var'])\n if not x.get('shape_var', False):\n x['shape_var'] = {\n 'name': v_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n io_var.append(x['shape_var'])\n length_var = {\n 'name': v_length,\n 'datatype': {'type': 'uint',\n 'precision': 64},\n 'is_length_var': True}\n kwargs['function_contents'] = (\n cls.write_declaration(length_var)\n + kwargs.get('function_contents', []))\n kwargs[io + 's'] = io_var\n output_type = None\n if kwargs.get('outputs_in_inputs', False):\n output_type = cls.get_native_type(datatype='flag')\n else:\n if 'output_var' in kwargs:\n kwargs['outputs'] = cls.parse_var_definition(\n 'outputs', kwargs.pop('output_var'))\n outputs = kwargs.get('outputs', [])\n nout = len(outputs)\n if nout == 0:\n output_type = 'void'\n elif nout == 1:\n output_type = cls.get_native_type(**(outputs[0]))\n else: # pragma: debug\n raise ValueError(\"C does not support more than one \"\n \"output variable.\")\n kwargs['output_type'] = output_type\n return super(CModelDriver, cls).write_function_def(\n function_name, **kwargs)\n \n @classmethod\n def write_native_type_definition(cls, name, datatype, name_base=None,\n requires_freeing=None, no_decl=False,\n use_generic=False):\n r\"\"\"Get lines declaring the data type within the language.\n\n Args:\n name (str): Name of variable that definition should be stored in.\n datatype (dict): Type definition.\n requires_freeing (list, optional): List that variables requiring\n freeing should be appended to. Defaults to None.\n no_decl (bool, optional): If True, the variable is defined without\n declaring it (assumes that variable has already been declared).\n Defaults to False.\n use_generic (bool, optional): If True variables serialized\n and/or deserialized by the type will be assumed to be\n generic objects. Defaults to False.\n\n Returns:\n list: Lines required to define a type definition.\n\n \"\"\"\n out = []\n fmt = None\n keys = {}\n if use_generic:\n keys['use_generic'] = 'true'\n else:\n keys['use_generic'] = 'false'\n typename = datatype['type']\n if name_base is None:\n name_base = name\n if datatype['type'] == 'array':\n if 'items' in datatype:\n assert(isinstance(datatype['items'], list))\n keys['nitems'] = len(datatype['items'])\n keys['items'] = '%s_items' % name_base\n fmt = ('create_dtype_json_array({nitems}, {items}, '\n '{use_generic})')\n out += [('dtype_t** %s = '\n '(dtype_t**)malloc(%d*sizeof(dtype_t*));')\n % (keys['items'], keys['nitems'])]\n for i, x in enumerate(datatype['items']):\n # Prevent recusion\n x_copy = copy.deepcopy(x)\n x_copy.pop('items', None)\n x_copy.pop('properties', None)\n out += cls.write_native_type_definition(\n '%s[%d]' % (keys['items'], i), x_copy,\n name_base=('%s_item%d' % (name_base, i)),\n requires_freeing=requires_freeing, no_decl=True,\n use_generic=use_generic)\n assert(isinstance(requires_freeing, list))\n requires_freeing += [keys['items']]\n else:\n keys['use_generic'] = 'true'\n fmt = ('create_dtype_json_array(0, NULL, '\n '{use_generic})')\n elif datatype['type'] == 'object':\n keys['use_generic'] = 'true'\n if 'properties' in datatype:\n assert(isinstance(datatype['properties'], dict))\n keys['nitems'] = len(datatype['properties'])\n keys['keys'] = '%s_keys' % name_base\n keys['values'] = '%s_vals' % name_base\n fmt = ('create_dtype_json_object({nitems}, {keys}, '\n '{values}, {use_generic})')\n out += [('dtype_t** %s = '\n '(dtype_t**)malloc(%d*sizeof(dtype_t*));')\n % (keys['values'], keys['nitems']),\n ('char** %s = (char**)malloc(%d*sizeof(char*));')\n % (keys['keys'], keys['nitems'])]\n for i, (k, v) in enumerate(datatype['properties'].items()):\n # Prevent recusion\n v_copy = copy.deepcopy(v)\n v_copy.pop('items', None)\n v_copy.pop('properties', None)\n out += ['%s[%d] = \\\"%s\\\";' % (keys['keys'], i, k)]\n out += cls.write_native_type_definition(\n '%s[%d]' % (keys['values'], i), v_copy,\n name_base=('%s_prop%d' % (name_base, i)),\n requires_freeing=requires_freeing, no_decl=True,\n use_generic=use_generic)\n assert(isinstance(requires_freeing, list))\n requires_freeing += [keys['values'], keys['keys']]\n else:\n fmt = ('create_dtype_json_object(0, NULL, NULL, '\n '{use_generic})')\n elif datatype['type'] in ['ply', 'obj']:\n fmt = 'create_dtype_%s({use_generic})' % datatype['type']\n elif datatype['type'] == '1darray':\n fmt = ('create_dtype_1darray(\\\"{subtype}\\\", {precision}, {length}, '\n '\\\"{units}\\\", {use_generic})')\n for k in ['subtype', 'precision']:\n keys[k] = datatype[k]\n keys['length'] = datatype.get('length', '0')\n keys['units'] = datatype.get('units', '')\n elif datatype['type'] == 'ndarray':\n fmt = ('create_dtype_ndarray(\\\"{subtype}\\\", {precision},'\n ' {ndim}, {shape}, \\\"{units}\\\", {use_generic})')\n for k in ['subtype', 'precision']:\n keys[k] = datatype[k]\n if 'shape' in datatype:\n shape_var = '%s_shape' % name_base\n out += ['size_t %s[%d] = {%s};' % (\n shape_var, len(datatype['shape']),\n ', '.join([str(s) for s in datatype['shape']]))]\n keys['ndim'] = len(datatype['shape'])\n keys['shape'] = shape_var\n fmt = fmt.replace('create_dtype_ndarray',\n 'create_dtype_ndarray_arr')\n else:\n keys['ndim'] = 0\n keys['shape'] = 'NULL'\n keys['units'] = datatype.get('units', '')\n elif (typename == 'scalar') or (typename in _valid_types):\n fmt = ('create_dtype_scalar(\\\"{subtype}\\\", {precision}, '\n '\\\"{units}\\\", {use_generic})')\n keys['subtype'] = datatype.get('subtype', datatype['type'])\n keys['units'] = datatype.get('units', '')\n if keys['subtype'] in ['bytes', 'string', 'unicode']:\n keys['precision'] = datatype.get('precision', 0)\n else:\n keys['precision'] = datatype['precision']\n typename = 'scalar'\n elif datatype['type'] in ['boolean', 'null', 'number',\n 'integer', 'string']:\n fmt = 'create_dtype_default(\\\"{type}\\\", {use_generic})'\n keys['type'] = datatype['type']\n elif (typename in ['class', 'function']):\n fmt = 'create_dtype_pyobj(\\\"{type}\\\", {use_generic})'\n keys['type'] = typename\n elif typename == 'instance':\n keys['use_generic'] = 'true'\n # fmt = 'create_dtype_pyinst(NULL, NULL)'\n fmt = 'create_dtype_empty({use_generic})'\n elif typename == 'schema':\n keys['use_generic'] = 'true'\n fmt = 'create_dtype_schema({use_generic})'\n elif typename == 'any':\n keys['use_generic'] = 'true'\n fmt = 'create_dtype_empty({use_generic})'\n else: # pragma: debug\n raise ValueError(\"Cannot create C version of type '%s'\"\n % typename)\n def_line = '%s = %s;' % (name, fmt.format(**keys))\n if not no_decl:\n def_line = 'dtype_t* ' + def_line\n out.append(def_line)\n return out\n\n @classmethod\n def write_channel_def(cls, key, datatype=None, requires_freeing=None,\n use_generic=False, **kwargs):\n r\"\"\"Write an channel declaration/definition.\n\n Args:\n key (str): Entry in cls.function_param that should be used.\n datatype (dict, optional): Data type associated with the channel.\n Defaults to None and is ignored.\n requires_freeing (list, optional): List that variables requiring\n freeing should be appended to. Defaults to None.\n use_generic (bool, optional): If True variables serialized\n and/or deserialized by the channel will be assumed to be\n generic objects. Defaults to False.\n **kwargs: Additional keyword arguments are passed as parameters\n to format_function_param.\n\n Returns:\n list: Lines required to declare and define an output channel.\n\n \"\"\"\n out = []\n if (datatype is not None) and ('{channel_type}' in cls.function_param[key]):\n kwargs['channel_type'] = '%s_type' % kwargs['channel']\n out += cls.write_native_type_definition(\n kwargs['channel_type'], datatype,\n requires_freeing=requires_freeing,\n use_generic=use_generic)\n out += super(CModelDriver, cls).write_channel_def(key, datatype=datatype,\n **kwargs)\n return out\n\n @classmethod\n def write_assign_to_output(cls, dst_var, src_var,\n outputs_in_inputs=False,\n dont_add_lengths=False,\n use_length_prefix=False, **kwargs):\n r\"\"\"Write lines assigning a value to an output variable.\n\n Args:\n dst_var (str, dict): Name or information dictionary for\n variable being assigned to.\n src_var (str, dict): Name or information dictionary for\n value being assigned to dst_var.\n outputs_in_inputs (bool, optional): If True, outputs are passed\n as input parameters. In some languages, this means that a\n pointer or reference is passed (e.g. C) and so the assignment\n should be to the memory indicated rather than the variable.\n Defaults to False.\n dont_add_lengths (bool, optional): If True, length variables\n are not added for arrays. Defaults to False.\n use_length_prefix (bool, optional): If True and length variables\n are added, they will be named using prefixes. Otherwise,\n suffixes will be used. Defaults to False.\n **kwargs: Additional keyword arguments are passed to the parent\n class's method.\n\n Returns:\n list: Lines achieving assignment.\n\n \"\"\"\n out = []\n if cls.requires_length_var(dst_var):\n src_var_length = None\n dst_var_length = None\n if isinstance(src_var, dict):\n src_var_length = src_var.get('length_var', None)\n if isinstance(dst_var, dict):\n dst_var_length = dst_var.get('length_var', None)\n if not dont_add_lengths:\n if src_var_length is None:\n if use_length_prefix:\n src_var_length = 'length_' + src_var['name']\n else:\n src_var_length = src_var['name'] + '_length'\n if dst_var_length is None:\n if use_length_prefix:\n dst_var_length = 'length_' + dst_var['name']\n else:\n dst_var_length = dst_var['name'] + '_length'\n out += cls.write_assign_to_output(\n dst_var_length, src_var_length,\n outputs_in_inputs=outputs_in_inputs)\n elif src_var_length is None:\n if ((dst_var['datatype']['type']\n in ['1darray', 'ndarray'])): # pragma: debug\n raise RuntimeError(\"Length must be set in order \"\n \"to write array assignments.\")\n elif (dst_var['datatype'].get('subtype', dst_var['datatype']['type'])\n in ['bytes']):\n src_var_length = '(strlen(%s)+1)' % src_var['name']\n else:\n src_var_length = '(strlen4(%s)+1)' % src_var['name']\n src_var_dtype = cls.get_native_type(**src_var)\n if src_var_dtype in ['bytes_t', 'unicode_t', 'string_t']:\n src_var_dtype = 'char*'\n src_var_dtype = src_var_dtype.rsplit('*', 1)[0]\n out += cls.write_assign_to_output(\n dst_var['name'], 'value',\n outputs_in_inputs=outputs_in_inputs,\n replacement=('{name} = ({native_type}*)realloc({name}, '\n '{N}*sizeof({native_type}));'),\n native_type=src_var_dtype, N=src_var_length)\n kwargs.update(copy=True, native_type=src_var_dtype,\n N=src_var_length)\n elif cls.requires_shape_var(dst_var):\n if dont_add_lengths: # pragma: debug\n raise RuntimeError(\"Shape must be set in order \"\n \"to write ND array assignments.\")\n # Dimensions\n src_var_ndim = None\n dst_var_ndim = None\n if isinstance(src_var, dict):\n src_var_ndim = src_var.get('ndim_var', None)\n if isinstance(dst_var, dict):\n dst_var_ndim = dst_var.get('ndim_var', None)\n if src_var_ndim is None:\n if use_length_prefix:\n src_var_ndim = 'ndim_' + src_var['name']\n else:\n src_var_ndim = src_var['name'] + '_ndim'\n if dst_var_ndim is None:\n if use_length_prefix:\n dst_var_ndim = 'ndim_' + dst_var['name']\n else:\n dst_var_ndim = dst_var['name'] + '_ndim'\n if isinstance(src_var_ndim, str):\n src_var_ndim = {'name': src_var_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64}}\n if isinstance(dst_var_ndim, str):\n dst_var_ndim = {'name': dst_var_ndim,\n 'datatype': {'type': 'uint',\n 'precision': 64}}\n\n out += cls.write_assign_to_output(\n dst_var_ndim, src_var_ndim,\n outputs_in_inputs=outputs_in_inputs)\n # Shape\n src_var_shape = None\n dst_var_shape = None\n if isinstance(src_var, dict):\n src_var_shape = src_var.get('shape_var', None)\n if isinstance(dst_var, dict):\n dst_var_shape = dst_var.get('shape_var', None)\n if src_var_shape is None:\n if use_length_prefix:\n src_var_shape = 'shape_' + src_var['name']\n else:\n src_var_shape = src_var['name'] + '_shape'\n if dst_var_shape is None:\n if use_length_prefix:\n dst_var_shape = 'shape_' + dst_var['name']\n else:\n dst_var_shape = dst_var['name'] + '_shape'\n if isinstance(src_var_shape, str):\n src_var_shape = {'name': src_var_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'length_var': src_var_ndim['name']}\n if isinstance(dst_var_shape, str):\n dst_var_shape = {'name': dst_var_shape,\n 'datatype': {'type': '1darray',\n 'subtype': 'uint',\n 'precision': 64},\n 'length_var': dst_var_ndim['name']}\n out += cls.write_assign_to_output(\n dst_var_shape, src_var_shape,\n outputs_in_inputs=outputs_in_inputs,\n dont_add_lengths=True)\n src_var_dtype = cls.get_native_type(**src_var).rsplit('*', 1)[0]\n if use_length_prefix:\n src_var_length = 'length_' + src_var['name']\n else:\n src_var_length = src_var['name'] + '_length'\n out += (('{length} = 1;\\n'\n 'size_t cdim;\\n'\n 'for (cdim = 0; cdim < {ndim}; cdim++) {{\\n'\n ' {length} = {length}*{shape}[cdim];\\n'\n '}}\\n').format(length=src_var_length,\n ndim=src_var_ndim['name'],\n shape=src_var_shape['name'])).splitlines()\n out += cls.write_assign_to_output(\n dst_var['name'], 'value',\n outputs_in_inputs=outputs_in_inputs,\n replacement=('{name} = ({native_type}*)realloc({name}, '\n '{N}*sizeof({native_type}));'),\n native_type=src_var_dtype, N=src_var_length)\n kwargs.update(copy=True, native_type=src_var_dtype,\n N=src_var_length)\n elif isinstance(dst_var, dict):\n if 'shape' in dst_var.get('datatype', {}):\n nele = 1\n for s in dst_var['datatype']['shape']:\n nele *= s\n kwargs.update(copy=True, N=nele,\n native_type=dst_var['datatype']['subtype'])\n elif 'length' in dst_var.get('datatype', {}):\n kwargs.update(copy=True, N=dst_var['datatype']['length'],\n native_type=dst_var['datatype']['subtype'])\n if outputs_in_inputs and (cls.language != 'c++'):\n if isinstance(dst_var, dict):\n dst_var = dict(dst_var,\n name='%s[0]' % dst_var['name'])\n else:\n dst_var = '%s[0]' % dst_var\n if ((outputs_in_inputs and isinstance(dst_var, dict)\n and isinstance(dst_var['datatype'], dict)\n and ('copy_' + dst_var['datatype']['type']\n in cls.function_param))):\n kwargs['copy'] = True\n out += super(CModelDriver, cls).write_assign_to_output(\n dst_var, src_var, outputs_in_inputs=outputs_in_inputs,\n **kwargs)\n return out\n" ]
[ [ "numpy.dtype", "numpy.distutils.misc_util.get_numpy_include_dirs" ] ]
marcoabrate/transformers
[ "3f77c26d74e1282955fefa8dfff2451e44f6d4a9" ]
[ "src/transformers/trainer.py" ]
[ "# coding=utf-8\n# Copyright 2020-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.\n\"\"\"\n\nimport collections\nimport inspect\nimport math\nimport os\nimport re\nimport shutil\nimport time\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\n\n# Integrations must be imported before ML frameworks:\nfrom .integrations import ( # isort: split\n default_hp_search_backend,\n get_reporting_integration_callbacks,\n hp_params,\n is_fairscale_available,\n is_optuna_available,\n is_ray_tune_available,\n run_hp_search_optuna,\n run_hp_search_ray,\n init_deepspeed,\n)\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler\n\nfrom .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator\nfrom .file_utils import (\n WEIGHTS_NAME,\n is_apex_available,\n is_datasets_available,\n is_in_notebook,\n is_sagemaker_distributed_available,\n is_torch_tpu_available,\n)\nfrom .modeling_utils import PreTrainedModel\nfrom .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING\nfrom .optimization import Adafactor, AdamW, get_scheduler\nfrom .tokenization_utils_base import PreTrainedTokenizerBase\nfrom .trainer_callback import (\n CallbackHandler,\n DefaultFlowCallback,\n PrinterCallback,\n ProgressCallback,\n TrainerCallback,\n TrainerControl,\n TrainerState,\n)\nfrom .trainer_pt_utils import (\n DistributedLengthGroupedSampler,\n DistributedTensorGatherer,\n LabelSmoother,\n LengthGroupedSampler,\n SequentialDistributedSampler,\n distributed_broadcast_scalars,\n distributed_concat,\n nested_concat,\n nested_detach,\n nested_numpify,\n nested_xla_mesh_reduce,\n reissue_pt_warnings,\n)\nfrom .trainer_utils import (\n PREFIX_CHECKPOINT_DIR,\n BestRun,\n EvalPrediction,\n HPSearchBackend,\n PredictionOutput,\n TrainOutput,\n default_compute_objective,\n default_hp_space,\n set_seed,\n speed_metrics,\n)\nfrom .training_args import ParallelMode, TrainingArguments\nfrom .utils import logging\n\n\n_is_native_amp_available = False\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nif is_in_notebook():\n from .utils.notebook import NotebookProgressCallback\n\n DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\nif is_fairscale_available():\n from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\n from fairscale.optim import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\nif is_sagemaker_distributed_available():\n import smdistributed.dataparallel.torch.distributed as dist\n from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP\nelse:\n import torch.distributed as dist\n\nif TYPE_CHECKING:\n import optuna\n\nlogger = logging.get_logger(__name__)\n\n\ndef _model_unwrap(model: nn.Module) -> nn.Module:\n # since there could be multiple levels of wrapping, unwrap recursively\n if hasattr(model, \"module\"):\n return _model_unwrap(model.module)\n else:\n return model\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.\n\n Args:\n model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):\n The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.\n\n .. note::\n\n :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`\n provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as\n they work the same way as the 🤗 Transformers models.\n args (:class:`~transformers.TrainingArguments`, `optional`):\n The arguments to tweak for training. Will default to a basic instance of\n :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in\n the current directory if not provided.\n data_collator (:obj:`DataCollator`, `optional`):\n The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.\n Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of\n :func:`~transformers.DataCollatorWithPadding` otherwise.\n train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):\n The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the\n maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an\n interrupted training or reuse the fine-tuned model.\n model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):\n A function that instantiates the model to be used. If provided, each call to\n :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.\n\n The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be\n able to choose different architectures according to hyper parameters (such as layer count, sizes of inner\n layers, dropout probabilities etc).\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):\n A list of callbacks to customize the training loop. Will add those to the list of default callbacks\n detailed in :doc:`here <callback>`.\n\n If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.\n optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple\n containing the optimizer and the scheduler to use. Will default to an instance of\n :class:`~transformers.AdamW` on your model and a scheduler given by\n :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.\n\n Important attributes:\n\n - **model** -- Always points to the core model. If using a transformers model, it will be a\n :class:`~transformers.PreTrainedModel` subclass.\n - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the\n original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,\n the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the\n inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.\n - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from\n data parallelism, this means some of the model layers are split on different GPUs).\n \"\"\"\n\n def __init__(\n self,\n model: Union[PreTrainedModel, torch.nn.Module] = None,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n tokenizer: Optional[\"PreTrainedTokenizerBase\"] = None,\n model_init: Callable[[], PreTrainedModel] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),\n ):\n if args is None:\n output_dir = \"tmp_trainer\"\n logger.info(f\"No `TrainingArguments` passed, using `output_dir={output_dir}`.\")\n args = TrainingArguments(output_dir=output_dir)\n self.args = args\n # Seed must be set before instantiating the model when using model\n set_seed(self.args.seed)\n self.hp_name = None\n self.deepspeed = None\n\n if model is None:\n if model_init is not None:\n self.model_init = model_init\n model = self.call_model_init()\n else:\n raise RuntimeError(\"`Trainer` requires either a `model` or `model_init` argument\")\n else:\n if model_init is not None:\n warnings.warn(\n \"`Trainer` requires either a `model` or `model_init` argument, but not both. \"\n \"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.\",\n FutureWarning,\n )\n self.model_init = model_init\n\n if hasattr(model, \"is_parallelizable\") and model.is_parallelizable and model.model_parallel:\n self.is_model_parallel = True\n else:\n self.is_model_parallel = False\n\n default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)\n self.data_collator = data_collator if data_collator is not None else default_collator\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n\n # Model parallel\n if not self.is_model_parallel:\n model = model.to(args.device)\n else:\n # Force n_gpu to 1 to avoid DataParallel.\n self.args._n_gpu = 1\n\n # later use `self.model is self.model_wrapped` to check if it's wrapped or not\n self.model_wrapped = model\n self.model = model\n\n self.compute_metrics = compute_metrics\n self.optimizer, self.lr_scheduler = optimizers\n if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):\n raise RuntimeError(\n \"Passing a `model_init` is incompatible with providing the `optimizers` argument.\"\n \"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.\"\n )\n default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)\n callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks\n self.callback_handler = CallbackHandler(\n callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler\n )\n self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)\n\n # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.\n self._loggers_initialized = False\n\n # Create output directory if needed\n if self.is_world_process_zero():\n os.makedirs(self.args.output_dir, exist_ok=True)\n if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):\n # Set an xla_device flag on the model's config.\n # We'll find a more elegant and not need to do this in the future.\n self.model.config.xla_device = True\n if not callable(self.data_collator) and callable(getattr(self.data_collator, \"collate_batch\", None)):\n raise ValueError(\"The `data_collator` should be a simple callable (function, class with `__call__`).\")\n\n if args.max_steps > 0:\n logger.info(\"max_steps is given, it will override any value given in num_train_epochs\")\n\n # Enforce rules on using datasets with no __len__\n if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:\n raise ValueError(\"train_dataset does not implement __len__, max_steps has to be specified\")\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n if is_datasets_available():\n if isinstance(train_dataset, datasets.Dataset):\n self._remove_unused_columns(self.train_dataset, description=\"training\")\n if isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(self.eval_dataset, description=\"evaluation\")\n\n # Setup Sharded DDP training\n self.sharded_dpp = False\n if args.sharded_ddp:\n if args.deepspeed:\n raise ValueError(\n \"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags.\"\n )\n\n if args.local_rank == -1:\n raise ValueError(\"Using sharded DDP only works in distributed training.\")\n elif not is_fairscale_available():\n raise ImportError(\"Sharded DDP training requires fairscale: `pip install fairscale`.\")\n else:\n self.sharded_dpp = True\n\n # Mixed precision setup\n self.use_apex = False\n self.use_amp = False\n self.fp16_backend = None\n\n if args.fp16:\n if args.fp16_backend == \"auto\":\n self.fp16_backend = \"amp\" if _is_native_amp_available else \"apex\"\n else:\n self.fp16_backend = args.fp16_backend\n logger.info(f\"Using {self.fp16_backend} fp16 backend\")\n\n if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16\n if self.fp16_backend == \"amp\":\n self.use_amp = True\n self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()\n else:\n if not is_apex_available():\n raise ImportError(\n \"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.\"\n )\n self.use_apex = True\n\n # Label smoothing\n if self.args.label_smoothing_factor != 0:\n self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)\n else:\n self.label_smoother = None\n\n self.state = TrainerState()\n self.control = TrainerControl()\n # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the\n # state at each call to self.log.\n self._total_flos = None\n self.hp_search_backend = None\n self.use_tune_checkpoints = False\n default_label_names = (\n [\"start_positions\", \"end_positions\"]\n if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()\n else [\"labels\"]\n )\n self.label_names = default_label_names if self.args.label_names is None else self.args.label_names\n self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)\n\n def add_callback(self, callback):\n \"\"\"\n Add a callback to the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will instantiate a member of that class.\n \"\"\"\n self.callback_handler.add_callback(callback)\n\n def pop_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.\n\n If the callback is not found, returns :obj:`None` (and no error is raised).\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will pop the first member of that class found in the list of callbacks.\n\n Returns:\n :class:`~transformer.TrainerCallback`: The callback removed, if found.\n \"\"\"\n return self.callback_handler.pop_callback(callback)\n\n def remove_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will remove the first member of that class found in the list of callbacks.\n \"\"\"\n self.callback_handler.remove_callback(callback)\n\n def _remove_unused_columns(self, dataset: \"datasets.Dataset\", description: Optional[str] = None):\n if not self.args.remove_unused_columns:\n return\n # Inspect model forward signature to keep only the arguments it accepts.\n signature = inspect.signature(self.model.forward)\n signature_columns = list(signature.parameters.keys())\n # Labels may be named label or label_ids, the default data collator handles that.\n signature_columns += [\"label\", \"label_ids\"]\n columns = [k for k in signature_columns if k in dataset.column_names]\n ignored_columns = list(set(dataset.column_names) - set(signature_columns))\n dset_description = \"\" if description is None else f\"in the {description} set \"\n logger.info(\n f\"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}.\"\n )\n dataset.set_format(type=dataset.format[\"type\"], columns=columns)\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:\n if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(\n self.train_dataset, collections.abc.Sized\n ):\n return None\n\n # Gather the number of processes and this process index.\n if self.args.parallel_mode == ParallelMode.TPU:\n num_processes = xm.xrt_world_size()\n process_index = xm.get_ordinal()\n elif (\n self.args.parallel_mode == ParallelMode.DISTRIBUTED\n or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED\n ):\n num_processes = dist.get_world_size()\n process_index = dist.get_rank()\n else:\n num_processes = 1\n process_index = 0\n\n # Build the sampler.\n if self.args.group_by_length:\n if num_processes <= 1:\n return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)\n else:\n return DistributedLengthGroupedSampler(\n self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index\n )\n\n else:\n if num_processes <= 1:\n return RandomSampler(self.train_dataset)\n else:\n return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)\n\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"\n Returns the training :class:`~torch.utils.data.DataLoader`.\n\n Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted\n to distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n self.train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:\n if is_torch_tpu_available():\n return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n elif self.args.local_rank != -1:\n return SequentialDistributedSampler(eval_dataset)\n else:\n return SequentialSampler(eval_dataset)\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n \"\"\"\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(eval_dataset, description=\"evaluation\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n eval_sampler = self._get_eval_sampler(eval_dataset)\n\n return DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n \"\"\"\n Returns the test :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):\n self._remove_unused_columns(test_dataset, description=\"test\")\n test_sampler = self._get_eval_sampler(test_dataset)\n\n # We use the same batch_size as for eval.\n return DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if self.optimizer is None:\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer_cls = Adafactor if self.args.adafactor else AdamW\n if self.args.adafactor:\n optimizer_cls = Adafactor\n optimizer_kwargs = {\"scale_parameter\": False, \"relative_step\": False}\n else:\n optimizer_cls = AdamW\n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n }\n optimizer_kwargs[\"lr\"] = self.args.learning_rate\n if self.sharded_dpp:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if self.lr_scheduler is None:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n self.optimizer,\n num_warmup_steps=self.args.warmup_steps,\n num_training_steps=num_training_steps,\n )\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.\n\n Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`\n \"\"\"\n return len(dataloader.dataset)\n\n def _hp_search_setup(self, trial: Union[\"optuna.Trial\", Dict[str, Any]]):\n \"\"\" HP search setup code \"\"\"\n self._trial = trial\n\n if self.hp_search_backend is None or trial is None:\n return\n\n params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial\n for key, value in params.items():\n if not hasattr(self.args, key):\n raise AttributeError(\n f\"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.\"\n )\n old_attr = getattr(self.args, key, None)\n # Casting value to the proper type\n if old_attr is not None:\n value = type(old_attr)(value)\n setattr(self.args, key, value)\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n logger.info(\"Trial:\", trial.params)\n\n def _report_to_hp_search(\n self, trial: Union[\"optuna.Trial\", Dict[str, Any]], epoch: int, metrics: Dict[str, float]\n ):\n if self.hp_search_backend is None or trial is None:\n return\n self.objective = self.compute_objective(metrics.copy())\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n import optuna\n\n trial.report(self.objective, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n if self.state.global_step % self.args.save_steps == 0:\n self._tune_save_checkpoint()\n tune.report(objective=self.objective, **metrics)\n\n def _tune_save_checkpoint(self):\n from ray import tune\n\n if not self.use_tune_checkpoints:\n return\n with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:\n self.args.output_dir = checkpoint_dir\n output_dir = os.path.join(self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\")\n self.save_model(output_dir)\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n def call_model_init(self, trial=None):\n model_init_argcount = len(inspect.signature(self.model_init).parameters)\n if model_init_argcount == 0:\n model = self.model_init()\n elif model_init_argcount == 1:\n model = self.model_init(trial)\n else:\n raise RuntimeError(\"model_init should have 0 or 1 argument.\")\n\n if model is None:\n raise RuntimeError(\"model_init should not return None.\")\n\n return model\n\n def train(\n self,\n resume_from_checkpoint: Optional[str] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None,\n **kwargs,\n ):\n \"\"\"\n Main training entry point.\n\n Args:\n resume_from_checkpoint (:obj:`str`, `optional`):\n Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If\n present, training will resume from the model/optimizer/scheduler states loaded here.\n trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n kwargs:\n Additional keyword arguments used to hide deprecated arguments\n \"\"\"\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n warnings.warn(\n \"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` \"\n \"instead.\",\n FutureWarning,\n )\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(self.args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):\n logger.info(f\"Loading model from {resume_from_checkpoint}).\")\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(resume_from_checkpoint)\n model_reloaded = True\n else:\n state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if not self.is_model_parallel:\n self.model = self.model.to(self.args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if self.args.max_steps > 0:\n max_steps = self.args.max_steps\n num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(\n self.args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(self.args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = self.args.max_steps\n num_train_epochs = 1\n num_update_steps_per_epoch = max_steps\n\n if self.args.deepspeed:\n model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)\n self.model = model.module\n self.model_wrapped = model # will get further wrapped in DDP\n self.deepspeed = model # DeepSpeedEngine object\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n else:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n model = self.model_wrapped\n\n # Mixed precision training with apex (torch < 1.6)\n if self.use_apex:\n model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)\n\n # Multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if self.sharded_dpp:\n model = ShardedDDP(model, self.optimizer)\n elif is_sagemaker_distributed_available():\n model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)\n elif self.args.local_rank != -1:\n if self.args.ddp_find_unused_parameters is not None:\n find_unused_parameters = self.args.ddp_find_unused_parameters\n elif isinstance(model, PreTrainedModel):\n # find_unused_parameters breaks checkpointing as per\n # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021\n find_unused_parameters = not getattr(model.config, \"gradient_checkpointing\", False)\n else:\n find_unused_parameters = True\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank],\n output_device=self.args.local_rank,\n find_unused_parameters=find_unused_parameters,\n )\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.\n\n # Train!\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n else:\n world_size = 1\n\n total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size\n num_examples = (\n self.num_examples(train_dataloader)\n if train_dataset_is_sized\n else total_train_batch_size * self.args.max_steps\n )\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {self.args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, \"trainer_state.json\")\n ):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not self.args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not self.args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch.\"\n )\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(self.args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n self._total_flos = self.state.total_flos\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not self.args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(\n self.args.device\n )\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps\n self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)\n\n if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss += self.training_step(model, inputs)\n else:\n tr_loss += self.training_step(model, inputs)\n self._total_flos += self.floating_point_ops(inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= self.args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.use_amp:\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(self.args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(self.optimizer) if self.use_apex else model.parameters(),\n self.args.max_grad_norm,\n )\n\n # Optimizer step\n if self.deepspeed:\n self.deepspeed.step()\n elif is_torch_tpu_available():\n xm.optimizer_step(self.optimizer)\n elif self.use_amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n self.lr_scheduler.step()\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(\n f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\"\n )\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(self.state.best_model_checkpoint)\n if not self.is_model_parallel:\n self.model = self.model.to(self.args.device)\n else:\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n if self.deepspeed:\n self.deepspeed.load_checkpoint(\n self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False\n )\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n if self._total_flos is not None:\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)\n\n def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):\n if self.control.should_log:\n logs: Dict[str, float] = {}\n tr_loss_scalar = tr_loss.item()\n # reset tr_loss to zero\n tr_loss -= tr_loss\n\n logs[\"loss\"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)\n # backward compatibility for pytorch schedulers\n logs[\"learning_rate\"] = (\n self.lr_scheduler.get_last_lr()[0]\n if version.parse(torch.__version__) >= version.parse(\"1.4\")\n else self.lr_scheduler.get_lr()[0]\n )\n self._total_loss_scalar += tr_loss_scalar\n self._globalstep_last_logged = self.state.global_step\n\n self.log(logs)\n\n metrics = None\n if self.control.should_evaluate:\n metrics = self.evaluate()\n self._report_to_hp_search(trial, epoch, metrics)\n\n if self.control.should_save:\n self._save_checkpoint(model, trial, metrics=metrics)\n self.control = self.callback_handler.on_save(self.args, self.state, self.control)\n\n def _save_checkpoint(self, model, trial, metrics=None):\n # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we\n # want to save.\n assert _model_unwrap(model) is self.model, \"internal model should be a reference to self.model\"\n\n # Save model checkpoint\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n if self.hp_search_backend is not None and trial is not None:\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n run_id = trial.number\n else:\n from ray import tune\n\n run_id = tune.get_trial_id()\n run_name = self.hp_name(trial) if self.hp_name is not None else f\"run-{run_id}\"\n output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)\n else:\n output_dir = os.path.join(self.args.output_dir, checkpoint_folder)\n\n self.store_flos()\n\n self.save_model(output_dir)\n if self.deepspeed:\n self.deepspeed.save_checkpoint(output_dir)\n\n # Save optimizer and scheduler\n if self.sharded_dpp:\n self.optimizer.consolidate_state_dict()\n\n if is_torch_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n elif self.is_world_process_zero() and not self.deepspeed:\n # deepspeed.save_checkpoint above saves model/optim/sched\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n\n # Determine the new best metric / best model checkpoint\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n # Save the Trainer state\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n\n # Maybe delete some older checkpoints.\n if self.is_world_process_zero():\n self._rotate_checkpoints(use_mtime=True)\n\n def _load_optimizer_and_scheduler(self, checkpoint):\n \"\"\"If optimizer and scheduler states exist, load them.\"\"\"\n if checkpoint is None:\n return\n\n if os.path.isfile(os.path.join(checkpoint, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(checkpoint, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n if is_torch_tpu_available():\n # On TPU we have to take some extra precautions to properly load the states on the right device.\n optimizer_state = torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=\"cpu\")\n with warnings.catch_warnings(record=True) as caught_warnings:\n lr_scheduler_state = torch.load(os.path.join(checkpoint, \"scheduler.pt\"), map_location=\"cpu\")\n reissue_pt_warnings(caught_warnings)\n\n xm.send_cpu_data_to_device(optimizer_state, self.args.device)\n xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)\n\n self.optimizer.load_state_dict(optimizer_state)\n self.lr_scheduler.load_state_dict(lr_scheduler_state)\n else:\n self.optimizer.load_state_dict(\n torch.load(os.path.join(checkpoint, \"optimizer.pt\"), map_location=self.args.device)\n )\n with warnings.catch_warnings(record=True) as caught_warnings:\n self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, \"scheduler.pt\")))\n reissue_pt_warnings(caught_warnings)\n\n if self.deepspeed:\n # Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function\n self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)\n\n def hyperparameter_search(\n self,\n hp_space: Optional[Callable[[\"optuna.Trial\"], Dict[str, float]]] = None,\n compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,\n n_trials: int = 20,\n direction: str = \"minimize\",\n backend: Optional[Union[\"str\", HPSearchBackend]] = None,\n hp_name: Optional[Callable[[\"optuna.Trial\"], str]] = None,\n **kwargs,\n ) -> BestRun:\n \"\"\"\n Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by\n :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is\n provided, the sum of all metrics otherwise.\n\n .. warning::\n\n To use this method, you need to have provided a ``model_init`` when initializing your\n :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible\n with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the\n method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.\n\n Args:\n hp_space (:obj:`Callable[[\"optuna.Trial\"], Dict[str, float]]`, `optional`):\n A function that defines the hyperparameter search space. Will default to\n :func:`~transformers.trainer_utils.default_hp_space_optuna` or\n :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.\n compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):\n A function computing the objective to minimize or maximize from the metrics returned by the\n :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.\n n_trials (:obj:`int`, `optional`, defaults to 100):\n The number of trial runs to test.\n direction(:obj:`str`, `optional`, defaults to :obj:`\"minimize\"`):\n Whether to optimize greater or lower objects. Can be :obj:`\"minimize\"` or :obj:`\"maximize\"`, you should\n pick :obj:`\"minimize\"` when optimizing the validation loss, :obj:`\"maximize\"` when optimizing one or\n several metrics.\n backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):\n The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which\n one is installed. If both are installed, will default to optuna.\n kwargs:\n Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For\n more information see:\n\n - the documentation of `optuna.create_study\n <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__\n - the documentation of `tune.run\n <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__\n\n Returns:\n :class:`transformers.trainer_utils.BestRun`: All the information about the best run.\n \"\"\"\n if backend is None:\n backend = default_hp_search_backend()\n if backend is None:\n raise RuntimeError(\n \"At least one of optuna or ray should be installed. \"\n \"To install optuna run `pip install optuna`.\"\n \"To install ray run `pip install ray[tune]`.\"\n )\n backend = HPSearchBackend(backend)\n if backend == HPSearchBackend.OPTUNA and not is_optuna_available():\n raise RuntimeError(\"You picked the optuna backend, but it is not installed. Use `pip install optuna`.\")\n if backend == HPSearchBackend.RAY and not is_ray_tune_available():\n raise RuntimeError(\n \"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`.\"\n )\n self.hp_search_backend = backend\n if self.model_init is None:\n raise RuntimeError(\n \"To use hyperparameter search, you need to pass your model through a model_init function.\"\n )\n\n self.hp_space = default_hp_space[backend] if hp_space is None else hp_space\n self.hp_name = hp_name\n self.compute_objective = default_compute_objective if compute_objective is None else compute_objective\n\n run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray\n best_run = run_hp_search(self, n_trials, direction, **kwargs)\n\n self.hp_search_backend = None\n return best_run\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n \"\"\"\n if self.state.epoch is not None:\n logs[\"epoch\"] = round(self.state.epoch, 2)\n\n output = {**logs, **{\"step\": self.state.global_step}}\n self.state.log_history.append(output)\n self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)\n\n def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:\n \"\"\"\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n \"\"\"\n for k, v in inputs.items():\n if isinstance(v, torch.Tensor):\n inputs[k] = v.to(self.args.device)\n\n if self.args.past_index >= 0 and self._past is not None:\n inputs[\"mems\"] = self._past\n\n return inputs\n\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if self.use_amp:\n with autocast():\n loss = self.compute_loss(model, inputs)\n else:\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.use_amp:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n # Save past state if it exists\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n # We don't use .loss here since the model may return tuples instead of ModelOutput.\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss\n\n def is_local_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several\n machines) main process.\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=True)\n else:\n return self.args.local_rank in [-1, 0]\n\n def is_world_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the global main process (when training in a distributed fashion on several\n machines, this is only going to be :obj:`True` for one process).\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=False)\n else:\n return self.args.local_rank == -1 or dist.get_rank() == 0\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n\n Will only save from the world_master process (unless in TPUs).\n \"\"\"\n\n if is_torch_tpu_available():\n self._save_tpu(output_dir)\n elif self.is_world_process_zero():\n self._save(output_dir)\n\n # If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to\n # SM_MODEL_DIR for easy deployment.\n if output_dir is None and os.getenv(\"SM_MODEL_DIR\") is not None:\n self.save_model(output_dir=os.getenv(\"SM_MODEL_DIR\"))\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n xm.rendezvous(\"saving_checkpoint\")\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n def store_flos(self):\n # Storing the number of floating-point operations that went into the model\n if self._total_flos is not None:\n if self.args.local_rank != -1:\n self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()\n else:\n self.state.total_flos = self._total_flos\n\n def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n # Make sure we don't delete the best model.\n if self.state.best_model_checkpoint is not None:\n best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))\n checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (\n checkpoints_sorted[-1],\n checkpoints_sorted[best_model_index],\n )\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))\n self.log(output.metrics)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)\n return output.metrics\n\n def predict(\n self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = \"eval\"\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n .. note::\n\n If your predictions or labels have different sequence length (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n Returns: `NamedTuple` A namedtuple with the following keys:\n\n - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.\n - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).\n - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset\n contained labels).\n \"\"\"\n if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n test_dataloader, description=\"Prediction\", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix\n )\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))\n return output\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n if not isinstance(dataloader.dataset, collections.abc.Sized):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = (\n prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only\n )\n\n model = self.model\n # multi-gpu eval\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", batch_size)\n losses_host: torch.Tensor = None\n preds_host: Union[torch.Tensor, List[torch.Tensor]] = None\n labels_host: Union[torch.Tensor, List[torch.Tensor]] = None\n\n world_size = 1\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = dist.get_world_size()\n world_size = max(1, world_size)\n\n eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)\n if not prediction_loss_only:\n preds_gatherer = DistributedTensorGatherer(world_size, num_examples)\n labels_gatherer = DistributedTensorGatherer(world_size, num_examples)\n\n model.eval()\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)\n\n if self.args.past_index >= 0:\n self._past = None\n\n self.callback_handler.eval_dataloader = dataloader\n\n for step, inputs in enumerate(dataloader):\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n if loss is not None:\n losses = loss.repeat(batch_size)\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if logits is not None:\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n if labels is not None:\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n eval_loss = eval_losses_gatherer.finalize()\n preds = preds_gatherer.finalize() if not prediction_loss_only else None\n label_ids = labels_gatherer.finalize() if not prediction_loss_only else None\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n if eval_loss is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = eval_loss.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def _gather_and_numpify(self, tensors, name):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n\n return nested_numpify(tensors)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n with torch.no_grad():\n if has_labels:\n loss, outputs = self.compute_loss(model, inputs, return_outputs=True)\n loss = loss.mean().detach()\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n return (loss, logits, labels)\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n \"\"\"\n For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of\n floating point operations for every backward + forward pass. If using another model, either implement such a\n method in the model or subclass and override this method.\n\n Args:\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n Returns:\n :obj:`int`: The number of floating-point operations.\n \"\"\"\n if hasattr(self.model, \"floating_point_ops\"):\n return self.model.floating_point_ops(inputs)\n else:\n return 0\n" ]
[ [ "torch.cuda.amp.GradScaler", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.sampler.SequentialSampler", "torch.no_grad", "torch.tensor", "torch.utils.data.dataloader.DataLoader", "torch.utils.data.sampler.RandomSampler", "torch.cuda.amp.autocast", "torch.nn.parallel.DistributedDataParallel", "torch.distributed.get_local_rank", "torch.nn.DataParallel", "torch.cat" ] ]
arpitdm/nifty
[ "763792d2ddc72f2af8c6d1372c5ed8d04c741ae1" ]
[ "models/fairgnn.py" ]
[ "import torch.nn as nn\nfrom models import *\nimport torch\nimport gc\n\ndef get_model(nfeat, args):\n if args.model == \"gcn\":\n model = GCN_Body(nfeat,args.num_hidden,args.dropout)\n elif args.model == \"gat\":\n heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]\n model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)\n else:\n print(\"Model not implement\")\n return\n\n return model\n\nclass FairGNN(nn.Module):\n\n def __init__(self, nfeat, args):\n super(FairGNN,self).__init__()\n\n nhid = args.num_hidden\n dropout = args.dropout\n self.estimator = GCN(nfeat,args.hidden,1,dropout)\n self.GNN = get_model(nfeat,args)\n self.classifier = nn.Linear(nhid,1)\n self.adv = nn.Linear(nhid,1)\n\n # G_params = list(self.GNN.parameters()) + list(self.classifier.parameters()) + list(self.estimator.parameters())\n # self.optimizer_G = torch.optim.Adam(G_params, lr = args.lr, weight_decay = args.weight_decay)\n # self.optimizer_A = torch.optim.Adam(self.adv.parameters(), lr = args.lr, weight_decay = args.weight_decay)\n\n self.args = args\n # self.criterion = nn.BCEWithLogitsLoss()\n\n self.G_loss = 0\n self.A_loss = 0\n\n def forward(self, x, edge_index):\n s = self.estimator(x, edge_index)\n z = self.GNN(x, edge_index)\n y = self.classifier(z)\n return y, s, z\n \n def optimize(self,g,x,labels,idx_train,sens,idx_sens_train):\n self.train()\n\n ### update E, G\n self.adv.requires_grad_(False)\n self.optimizer_G.zero_grad()\n\n s = self.estimator(g,x)\n h = self.GNN(g,x)\n y = self.classifier(h)\n\n s_g = self.adv(h)\n\n s_score = torch.sigmoid(s.detach())\n # s_score = (s_score > 0.5).float()\n s_score[idx_sens_train]=sens[idx_sens_train].unsqueeze(1).float()\n y_score = torch.sigmoid(y)\n self.cov = torch.abs(torch.mean((s_score - torch.mean(s_score)) * (y_score - torch.mean(y_score))))\n \n self.cls_loss = self.criterion(y[idx_train],labels[idx_train].unsqueeze(1).float())\n self.adv_loss = self.criterion(s_g,s_score)\n \n self.G_loss = self.cls_loss + self.args.alpha * self.cov - self.args.beta * self.adv_loss\n self.G_loss.backward()\n self.optimizer_G.step()\n\n ## update Adv\n self.adv.requires_grad_(True)\n self.optimizer_A.zero_grad()\n s_g = self.adv(h.detach())\n self.A_loss = self.criterion(s_g,s_score)\n self.A_loss.backward()\n self.optimizer_A.step()\n\n\n" ]
[ [ "torch.sigmoid", "torch.nn.Linear", "torch.mean" ] ]
buchgr/tensorflow
[ "2938772a08ed02ced4663ca38168ab3f82e8f81b" ]
[ "tensorflow/python/keras/saving/model_architectures.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for saving/loading function for keras Model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python import keras\n\n# Declaring namedtuple()\nModelFn = collections.namedtuple('ModelFn',\n ['model', 'input_shape', 'target_shape'])\n\n\ndef basic_sequential():\n \"\"\"Basic sequential model.\"\"\"\n model = keras.Sequential([\n keras.layers.Dense(3, activation='relu', input_shape=(3,)),\n keras.layers.Dense(2, activation='softmax'),\n ])\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef basic_sequential_deferred():\n \"\"\"Sequential model with deferred input shape.\"\"\"\n model = keras.Sequential([\n keras.layers.Dense(3, activation='relu'),\n keras.layers.Dense(2, activation='softmax'),\n ])\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef stacked_rnn():\n \"\"\"Stacked RNN model.\"\"\"\n inputs = keras.Input((None, 3))\n layer = keras.layers.RNN([keras.layers.LSTMCell(2) for _ in range(3)])\n x = layer(inputs)\n outputs = keras.layers.Dense(2)(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 4, 3), (None, 2))\n\n\ndef lstm():\n \"\"\"LSTM model.\"\"\"\n inputs = keras.Input((None, 3))\n x = keras.layers.LSTM(4, return_sequences=True)(inputs)\n x = keras.layers.LSTM(3, return_sequences=True)(x)\n x = keras.layers.LSTM(2, return_sequences=False)(x)\n outputs = keras.layers.Dense(2)(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 4, 3), (None, 2))\n\n\ndef multi_input_multi_output():\n \"\"\"Multi-input Multi-ouput model.\"\"\"\n body_input = keras.Input(shape=(None,), name='body')\n tags_input = keras.Input(shape=(2,), name='tags')\n\n x = keras.layers.Embedding(10, 4)(body_input)\n body_features = keras.layers.LSTM(5)(x)\n x = keras.layers.concatenate([body_features, tags_input])\n\n pred_1 = keras.layers.Dense(2, activation='sigmoid', name='priority')(x)\n pred_2 = keras.layers.Dense(3, activation='softmax', name='department')(x)\n\n model = keras.Model(\n inputs=[body_input, tags_input], outputs=[pred_1, pred_2])\n return ModelFn(model, [(None, 1), (None, 2)], [(None, 2), (None, 3)])\n\n\ndef nested_sequential_in_functional():\n \"\"\"A sequential model nested in a functional model.\"\"\"\n inner_model = keras.Sequential([\n keras.layers.Dense(3, activation='relu', input_shape=(3,)),\n keras.layers.Dense(2, activation='relu'),\n ])\n\n inputs = keras.Input(shape=(3,))\n x = inner_model(inputs)\n outputs = keras.layers.Dense(2, activation='softmax')(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef seq_to_seq():\n \"\"\"Sequence to sequence model.\"\"\"\n num_encoder_tokens = 3\n num_decoder_tokens = 3\n latent_dim = 2\n encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))\n encoder = keras.layers.LSTM(latent_dim, return_state=True)\n _, state_h, state_c = encoder(encoder_inputs)\n encoder_states = [state_h, state_c]\n decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))\n decoder_lstm = keras.layers.LSTM(\n latent_dim, return_sequences=True, return_state=True)\n decoder_outputs, _, _ = decoder_lstm(\n decoder_inputs, initial_state=encoder_states)\n decoder_dense = keras.layers.Dense(num_decoder_tokens, activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n return ModelFn(\n model, [(None, 2, num_encoder_tokens), (None, 2, num_decoder_tokens)],\n (None, 2, num_decoder_tokens))\n\n\ndef shared_layer_functional():\n \"\"\"Shared layer in a functional model.\"\"\"\n main_input = keras.Input(shape=(10,), dtype='int32', name='main_input')\n x = keras.layers.Embedding(\n output_dim=5, input_dim=4, input_length=10)(main_input)\n lstm_out = keras.layers.LSTM(3)(x)\n auxiliary_output = keras.layers.Dense(\n 1, activation='sigmoid', name='aux_output')(lstm_out)\n auxiliary_input = keras.Input(shape=(5,), name='aux_input')\n x = keras.layers.concatenate([lstm_out, auxiliary_input])\n x = keras.layers.Dense(2, activation='relu')(x)\n main_output = keras.layers.Dense(\n 1, activation='sigmoid', name='main_output')(x)\n model = keras.Model(\n inputs=[main_input, auxiliary_input],\n outputs=[main_output, auxiliary_output])\n return ModelFn(model, [(None, 10), (None, 5)], [(None, 1), (None, 1)])\n\n\ndef shared_sequential():\n \"\"\"Shared sequential model in a functional model.\"\"\"\n inner_model = keras.Sequential([\n keras.layers.Conv2D(2, 3, activation='relu'),\n keras.layers.Conv2D(2, 3, activation='relu'),\n ])\n inputs_1 = keras.Input((5, 5, 3))\n inputs_2 = keras.Input((5, 5, 3))\n x1 = inner_model(inputs_1)\n x2 = inner_model(inputs_2)\n x = keras.layers.concatenate([x1, x2])\n outputs = keras.layers.GlobalAveragePooling2D()(x)\n model = keras.Model([inputs_1, inputs_2], outputs)\n return ModelFn(model, [(None, 5, 5, 3), (None, 5, 5, 3)], (None, 4))\n\n\nclass _MySubclassModel(keras.Model):\n \"\"\"A subclass model.\"\"\"\n\n def __init__(self):\n super(_MySubclassModel, self).__init__(name='my_subclass_model')\n self.dense1 = keras.layers.Dense(8, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='softmax')\n self.bn = keras.layers.BatchNormalization()\n self.dp = keras.layers.Dropout(0.5)\n\n def call(self, inputs, **kwargs):\n x = self.dense1(inputs)\n x = self.dp(x)\n x = self.bn(x)\n return self.dense2(x)\n\n\ndef nested_subclassed_model():\n \"\"\"A subclass model nested in another subclass model.\"\"\"\n\n class NestedSubclassModel(keras.Model):\n \"\"\"A nested subclass model.\"\"\"\n\n def __init__(self):\n super(NestedSubclassModel, self).__init__()\n self.dense1 = keras.layers.Dense(4, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='relu')\n self.bn = keras.layers.BatchNormalization()\n self.inner_subclass_model = _MySubclassModel()\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.bn(x)\n x = self.inner_subclass_model(x)\n return self.dense2(x)\n\n return ModelFn(NestedSubclassModel(), (None, 3), (None, 2))\n\n\ndef nested_subclassed_in_functional_model():\n \"\"\"A subclass model nested in a functional model.\"\"\"\n inner_subclass_model = _MySubclassModel()\n inputs = keras.Input(shape=(3,))\n x = inner_subclass_model(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2, activation='softmax')(x)\n model = keras.Model(inputs, outputs)\n return ModelFn(model, (None, 3), (None, 2))\n\n\ndef nested_functional_in_subclassed_model():\n \"\"\"A functional model nested in a subclass model.\"\"\"\n def get_functional_model():\n inputs = keras.Input(shape=(4,))\n x = keras.layers.Dense(4, activation='relu')(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2)(x)\n return keras.Model(inputs, outputs)\n\n class NestedFunctionalInSubclassModel(keras.Model):\n \"\"\"A functional nested in subclass model.\"\"\"\n\n def __init__(self):\n super(NestedFunctionalInSubclassModel, self).__init__(\n name='nested_functional_in_subclassed_model')\n self.dense1 = keras.layers.Dense(4, activation='relu')\n self.dense2 = keras.layers.Dense(2, activation='relu')\n self.inner_functional_model = get_functional_model()\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.inner_functional_model(x)\n return self.dense2(x)\n return ModelFn(NestedFunctionalInSubclassModel(), (None, 3), (None, 2))\n\n\ndef shared_layer_subclassed_model():\n \"\"\"Shared layer in a subclass model.\"\"\"\n\n class SharedLayerSubclassModel(keras.Model):\n \"\"\"A subclass model with shared layers.\"\"\"\n\n def __init__(self):\n super(SharedLayerSubclassModel, self).__init__(\n name='shared_layer_subclass_model')\n self.dense = keras.layers.Dense(3, activation='relu')\n self.dp = keras.layers.Dropout(0.5)\n self.bn = keras.layers.BatchNormalization()\n\n def call(self, inputs):\n x = self.dense(inputs)\n x = self.dp(x)\n x = self.bn(x)\n return self.dense(x)\n return ModelFn(SharedLayerSubclassModel(), (None, 3), (None, 3))\n\n\ndef functional_with_keyword_args():\n \"\"\"A functional model with keyword args.\"\"\"\n inputs = keras.Input(shape=(3,))\n x = keras.layers.Dense(4)(inputs)\n x = keras.layers.BatchNormalization()(x)\n outputs = keras.layers.Dense(2)(x)\n\n model = keras.Model(inputs, outputs, name='m', trainable=False)\n return ModelFn(model, (None, 3), (None, 2))\n\n\nALL_MODELS = [\n ('basic_sequential', basic_sequential),\n ('basic_sequential_deferred', basic_sequential_deferred),\n ('stacked_rnn', stacked_rnn),\n ('lstm', lstm),\n ('multi_input_multi_output', multi_input_multi_output),\n ('nested_sequential_in_functional', nested_sequential_in_functional),\n ('seq_to_seq', seq_to_seq),\n ('shared_layer_functional', shared_layer_functional),\n ('shared_sequential', shared_sequential),\n ('nested_subclassed_model', nested_subclassed_model),\n ('nested_subclassed_in_functional_model',\n nested_subclassed_in_functional_model),\n ('nested_functional_in_subclassed_model',\n nested_functional_in_subclassed_model),\n ('shared_layer_subclassed_model', shared_layer_subclassed_model),\n ('functional_with_keyword_args', functional_with_keyword_args)\n]\n\n\ndef get_models(exclude_models=None):\n \"\"\"Get all models excluding the specificed ones.\"\"\"\n models = [model for model in ALL_MODELS\n if model[0] not in exclude_models]\n return models\n" ]
[ [ "tensorflow.python.keras.Input", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.layers.Embedding", "tensorflow.python.keras.layers.concatenate", "tensorflow.python.keras.layers.LSTM", "tensorflow.python.keras.layers.LSTMCell", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Dropout", "tensorflow.python.keras.Model", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.GlobalAveragePooling2D" ] ]
gvashishtha/azureml-examples
[ "dc7ee4c01410757beeaa52a4f696882ca38e0be7" ]
[ "code/deployment/triton/bidaf_utils.py" ]
[ "\"\"\"score_bidaf.py\n\nScoring script for use with the Bi-directional Attention Flow model from the ONNX model zoo.\nhttps://github.com/onnx/models/tree/master/text/machine_comprehension/bidirectional_attention_flow\n\"\"\"\n\nimport json\nimport nltk\nimport numpy as np\nimport os\n\nfrom nltk import word_tokenize\nfrom utils import get_model_info, parse_model_http, triton_init, triton_infer\nfrom tritonclientutils import triton_to_np_dtype\n\n\ndef preprocess(text, dtype):\n \"\"\"Tokenizes text for use in the bidirectional attention flow model\n\n Parameters\n ---------\n text : str\n Text to be tokenized\n\n dtype : numpy datatype\n Datatype of the resulting array\n\n Returns\n ---------\n (np.array(), np.array())\n Tuple containing two numpy arrays with the tokenized\n words and chars, respectively.\n \n From: https://github.com/onnx/models/tree/master/text/machine_comprehension/bidirectional_attention_flow # noqa\n \"\"\"\n nltk.download(\"punkt\")\n tokens = word_tokenize(text)\n # split into lower-case word tokens, in numpy array with shape of (seq, 1)\n words = np.array([w.lower() for w in tokens], dtype=dtype).reshape(-1, 1)\n # split words into chars, in numpy array with shape of (seq, 1, 1, 16)\n chars = [[c for c in t][:16] for t in tokens]\n chars = [cs + [\"\"] * (16 - len(cs)) for cs in chars]\n chars = np.array(chars, dtype=dtype).reshape(-1, 1, 1, 16)\n return words, chars\n\n\ndef postprocess(context_words, answer):\n \"\"\"Post-process results to show the chosen result\n\n Parameters\n --------\n context_words : str\n Original context\n\n answer : InferResult\n Triton inference result containing start and\n end positions of desired answer\n\n Returns\n --------\n Numpy array containing the words from the context that\n answer the given query.\n \"\"\"\n\n start = answer.as_numpy(\"start_pos\")[0]\n end = answer.as_numpy(\"end_pos\")[0]\n print(f\"start is {start}, end is {end}\")\n return [w.encode() for w in context_words[start : end + 1].reshape(-1)]\n" ]
[ [ "numpy.array" ] ]
wi1k1n/nrf-accelerations
[ "3075d63177e8ac04ee91784d5b0c56379335740f" ]
[ "util/visualize_light_samples.py" ]
[ "import argparse, sys, os, os.path as op, json, subprocess\nimport numpy as np\nimport open3d as o3d\nimport plotly.graph_objects as go\n\nPATH = 'D:\\\\edu\\\\UniBonn\\\\Study\\\\thesis\\\\codes\\\\NSVF\\\\'\n# PATH2MODEL = 'D:\\\\edu\\\\UniBonn\\\\Study\\\\thesis\\\\codes\\\\blender\\\\projects\\\\brdf_sphere\\\\brdf_sphere.ply'\n\nplotData = []\n\n\nlight_start = np.load(op.join(PATH, 'light_start.npy'))\nlight_dirs = np.load(op.join(PATH, 'light_dirs.npy'))\nhits = np.load(op.join(PATH, 'hits.npy'))\n\n# sample_xyz = ray_start + ray_dir * sampled_depth\n# sample_xyz = sample_xyz[np.tile(sample_mask, sample_mask + (3,))].reshape(sample_xyz.shape)\n\n# light_start = light_start[39, :25, :]\n# light_dirs = light_dirs[39, :25, :]\n\nlight_start = light_start[:5, ...]\nlight_dirs = light_dirs[:5, ...]\nhits = hits[:5, ...]\n\nfor i, ls in enumerate(light_start):\n\tcv = ls[hits[i] > 0]\n\tplotData.append(go.Scatter3d(x=cv[:, 0], y=cv[:, 1], z=cv[:, 2],\n\t\t\t\t\t\t\t\tname='v{}'.format(i),\n\t\t\t\t\t\t\t\tmarker=dict(size=1, color=\"blue\"),\n\t\t\t\t\t\t\t\tmode='markers')\n\t\t\t\t\t)\nfor i, d in enumerate(light_dirs):\n\tcd = d[hits[i] > 0]\n\tcd /= np.linalg.norm(cd, axis=0)\n\tcv = light_start[i][hits[i] > 0]\n\tcvt = cv + cd\n\tfor j, cp in enumerate(cv):\n\t\tplotData.append(go.Scatter3d(\n\t\t\t\t\t\t\tx=[cp[0], cvt[j, 0]],\n\t\t\t\t\t\t\ty=[cp[1], cvt[j, 1]],\n\t\t\t\t\t\t\tz=[cp[2], cvt[j, 2]],\n\t\t\t\t\t\t\tname='pts',\n\t\t\t\t\t\t\tmarker=dict(size=1, color=\"red\"),\n\t\t\t\t\t\t\tmode='lines')\n\t\t\t\t\t\t)\n\nfig = go.Figure(data=plotData)\nprint('Saving to {0}'.format(os.path.abspath('visualize_light_samples.html')))\nfig.write_html('visualize_light_samples.html', auto_open=True)" ]
[ [ "numpy.linalg.norm" ] ]
evenrus/myeloma_SNV
[ "b8faa365babcc5583bc7b8431e4c5053acb35cb9" ]
[ "myeloma_snv/commands.py" ]
[ "\"\"\"variants_process main command.\"\"\"\n\nimport timeit\nimport re\nfrom datetime import datetime\nfrom os.path import join\nimport pandas as pd\nimport numpy as np\nimport pybedtools as pyb\n\nSTART = timeit.default_timer()\n\n## IMPORT VARIANTS FILE\ndef import_variants(path):\n \"\"\"\n Determine filetype and import, returns pandas dataFrame\n \"\"\"\n if re.search('.csv$', path):\n try:\n variants = pd.read_csv(\n filepath_or_buffer=path,\n comment='#',\n low_memory=False)\n except NameError:\n raise Exception(f'Error when importing file {path}')\n\n print(f'Loaded file containing {variants.shape[0]} '\n f'variant calls. Processing...')\n return(variants)\n elif re.search('.tsv.gz$', path):\n try:\n variants = pd.read_csv(\n filepath_or_buffer=path,\n compression='gzip',\n sep='\\t',\n comment='#',\n low_memory=False)\n except NameError:\n raise Exception(f'Error when importing file {path}')\n print(f'Loaded file containing {variants.shape[0]} '\n f'variant calls. Processing...')\n return(variants)\n else:\n raise Exception(f'Input file {path} has unsupported '\n f'extension: try .csv or .tsv.gz')\n\n## ANNOTATION FUNCTIONS\ndef annotate_cosmic(variants):\n \"\"\"\n Generate columns:\n HEME_EXACT: Number of exact matches for hematopoietic and\n lymphoid tissue in cosmic.\n ANY_EXACT_POS: YES/NO for any EXACT or POS match in cosmic.\n \"\"\"\n heme_exact = []\n cosmic = variants['COSMIC'].tolist()\n search_1 = 'HAEMATOPOIETIC_AND_LYMPHOID_TISSUE'\n search_2 = r'(?<=HAEMATOPOIETIC_AND_LYMPHOID_TISSUE=)\\w+'\n for entry in cosmic:\n if pd.isnull(entry):\n heme_exact.append(None)\n else:\n first = entry.split('|')[0]\n if re.search('^GENOMIC_EXACT', first):\n if re.search(search_1, first):\n count = re.search(search_2, first)[0]\n heme_exact.append(count)\n else:\n heme_exact.append(None)\n else:\n heme_exact.append(None)\n variants['HEME_EXACT'] = heme_exact\n any_exact_pos = []\n for entry in cosmic:\n if pd.isnull(entry):\n any_exact_pos.append(0)\n elif re.search(\n 'GENOMIC_EXACT', entry) or re.search(\n 'GENOMIC_POS', entry):\n any_exact_pos.append(1)\n else:\n any_exact_pos.append(0)\n variants['ANY_EXACT_POS'] = any_exact_pos\n return(variants)\n\ndef annotate_genefreq(variants, genes):\n \"\"\"\n Generate column:\n MAX_MUTFREQ: Maximal mutation frequency in gene\n as previously published in large MM studies.\n \"\"\"\n freqlist = pd.read_excel(io=genes)\n freqlist['MAX_MUTFREQ'] = round(\n freqlist.filter(regex='freq').max(axis=1), 1)\n freqlist = freqlist[['GENE', 'MAX_MUTFREQ']]\n variants = pd.merge(variants, freqlist, how='left')\n return(variants)\n\ndef annotate_maf(variants):\n \"\"\"\n Generate column:\n MAX_MAF: Maximal MAF of variant in any normal database\n \"\"\"\n variants['MAX_MAF'] = 0 # Sets variable to 0 if frequency is not reported\n variants['MAX_MAF'] = variants.filter(regex='MAF').max(axis=1)\n return(variants)\n\ndef annotate_normals(variants, path_normals):\n \"\"\"\n Annotates variants with internal normal controls:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n \"\"\"\n normals = pd.read_csv(\n filepath_or_buffer=path_normals)\n\n normals = normals.set_index(['CHR','START'])\n \n def annot_row(row, data):\n thres = 10\n chrom = str(row['CHR'])\n start = row['START']\n po = (chrom, start) in data.index\n close = data.ix[(chrom, start-thres):(chrom, start+thres)]\n if po:\n pos = data.loc[(chrom, start)]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['count'].iloc[0],\n exact['MEDIAN_VAF'].iloc[0],\n exact['VAF_Q25'].iloc[0],\n exact['VAF_Q75'].iloc[0],\n start,\n exact['REF'].iloc[0] + '>' + exact['ALT'].iloc[0]\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n ', '.join(pos['count'].astype(str)),\n ', '.join(pos['MEDIAN_VAF'].astype(str)),\n ', '.join(pos['VAF_Q25'].astype(str)),\n ', '.join(pos['VAF_Q75'].astype(str)),\n ', '.join([str(i) for i in pos.index.\\\n get_level_values('START').tolist()]),\n ', '.join([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'], pos['ALT'])])\n ]\n return pd.Series(pos_out)\n elif close.shape[0] > 0:\n cl_out = ['genomic_close',\n ', '.join(close['count'].astype(str).tolist()),\n ', '.join(close['MEDIAN_VAF'].astype(str).tolist()),\n ', '.join(close['VAF_Q25'].astype(str).tolist()),\n ', '.join(close['VAF_Q75'].astype(str).tolist()),\n ', '.join([str(i) for i in close.index.\\\n get_level_values('START').tolist()]),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*7)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\"]\n out_names = ['Normals' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, normals),\n axis=1)\n return(variants)\n\ndef annotate_mmrf(variants, path_mmrf):\n \"\"\"\n Annotates variants with MMRF data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n \"\"\"\n mmrf = pd.read_csv(filepath_or_buffer=path_mmrf, sep='\\t')\n mmrf = mmrf[[\"#CHROM\", \"POS\", \"REF\", \"ALT\", \"GEN[1].AR\"]]\n mmrf = mmrf.drop_duplicates() ## What are these duplicates?\n mmrf.columns = [\"CHR\", \"START\", \"REF\", \"ALT\", \"TARGET_VAF\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n exact['TARGET_VAF'].median(),\n exact['TARGET_VAF'].quantile(q=0.25),\n exact['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())]))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n pos['TARGET_VAF'].median(),\n pos['TARGET_VAF'].quantile(q=0.25),\n pos['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())]))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .median().astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.25).astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.75).astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*7)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\"]\n out_names = ['MMRF' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, mmrf),\n axis=1)\n return(variants)\n\ndef annotate_bolli(variants, path_bolli):\n \"\"\"\n Annotates variants with Bolli data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n Positions: START position\n Change: REF > ALT\n Annotation: Manual annotation category.\n \"\"\"\n bolli = pd.read_csv(filepath_or_buffer=path_bolli, sep='\\t')\n bolli = bolli[[\"CHR\", \"START\", \"WT\", \"MT\", \"Variant_class\"]]\n bolli.columns = [\"CHR\", \"START\", \"REF\", \"ALT\", \"ANNOTATION\"]\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())])),\n ', '.join(set(exact['ANNOTATION']))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())])),\n ', '.join(set(pos['ANNOTATION']))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())])),\n ', '.join(set(close['ANNOTATION']))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*5)\n\n out_names = [\"_Class\", \"_Frequency\",\n \"_Position\", \"_Change\", \"_Annotation\"]\n out_names = ['Bolli' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, bolli),\n axis=1)\n return(variants)\n\ndef annotate_lohr(variants, lohr_path):\n \"\"\"\n Annotates variants with lohr data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n Positions: START position\n Change: REF > ALT\n \"\"\"\n lohr = pd.read_csv(filepath_or_buffer=lohr_path, sep='\\t')\n lohr = lohr[[\"Chromosome\", \"Start_Position\", \"Reference_Allele\",\n \"Tumor_Seq_Allele2\"]]\n lohr.columns = [\"CHR\", \"START\", \"REF\", \"ALT\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())]))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())]))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())]))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*4)\n\n out_names = [\"_Class\", \"_Frequency\", \n \"_Position\", \"_Change\"]\n out_names = ['Lohr' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, lohr),\n axis=1)\n return(variants)\n\ndef annotate_mytype(variants, path_mytype):\n \"\"\"\n Annotates variants with previous myTYPE data:\n Class: Close (chr, start within 10 bp),\n Pos (chr, start),\n Exact (chr, start, ref, alt)\n Frequency: Number of matches\n VAF: Median VAF\n Q25: 25th VAF-quartile\n Q75: 75th VAF-quartile\n Positions: START position\n Change: REF > ALT\n Annotation: Manual annotation category.\n \"\"\"\n mytype = pd.read_csv(filepath_or_buffer=path_mytype, sep=',')\n mytype = mytype[[\"CHR\", \"START\", \"REF\", \"ALT\",\n \"MANUAL_ANNOTATION\", \"TARGET_VAF\"]]\n mytype.columns = [\"CHR\", \"START\", \"REF\", \"ALT\",\n \"ANNOTATION\", \"TARGET_VAF\"]\n\n def annot_row(row, data):\n thres = 10\n subdat = data[data['CHR'].astype(str) == str(row['CHR'])]\n po = row['START'] in subdat['START'].as_matrix().astype(int)\n close = (abs(subdat['START'].as_matrix() \\\n .astype(int) - row['START']) < thres)\n if po:\n pos = subdat[subdat['START'] == row['START']]\n exact = pos[(pos['REF'] == row['REF']) & (pos['ALT'] == row['ALT'])]\n if len(exact) > 0:\n ex_out = ['genomic_exact',\n exact['REF'].count(),\n exact['TARGET_VAF'].median(),\n exact['TARGET_VAF'].quantile(q=0.25),\n exact['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(exact['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(exact['REF'].tolist(), exact['ALT'].tolist())])),\n ', '.join(set(exact['ANNOTATION']))\n ]\n return pd.Series(ex_out)\n else:\n pos_out = ['genomic_pos',\n pos['REF'].count(),\n pos['TARGET_VAF'].median(),\n pos['TARGET_VAF'].quantile(q=0.25),\n pos['TARGET_VAF'].quantile(q=0.75),\n ', '.join(set(pos['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(pos['REF'].tolist(), pos['ALT'].tolist())])),\n ', '.join(set(pos['ANNOTATION']))\n ]\n return pd.Series(pos_out)\n elif close.any():\n close = subdat[close]\n cl_out = ['genomic_close',\n ', '.join(close.groupby(['ALT', 'REF']).size() \\\n .astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .median().astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.25).astype(str).tolist()),\n ', '.join(close.groupby(['ALT', 'REF'])['TARGET_VAF'] \\\n .quantile(q=0.75).astype(str).tolist()),\n ', '.join(set(close['START'].astype(str))),\n ', '.join(set([str(a) + '>' + str(b) for a, b in \\\n zip(close['REF'].tolist(), close['ALT'].tolist())])),\n ', '.join(set(close['ANNOTATION']))\n ]\n return pd.Series(cl_out)\n else:\n return pd.Series([None]*8)\n\n out_names = [\"_Class\", \"_Frequency\", \"_VAF\", \"_Q25\", \"_Q75\",\n \"_Position\", \"_Change\", \"_Annotation\"]\n out_names = ['myTYPE' + s for s in out_names]\n\n variants[out_names] = variants.apply(lambda row: annot_row(row, mytype),\n axis=1)\n return(variants)\n\ndef annotate_known(variants, mytype):\n \"\"\"\n Generate columns:\n KNOWN_MM = 1 if previously found in MM. Includes any match in MMRF,\n Bolli and Lohr, and UNKNOWN/LIKELY/ONCOGENIC by mytype\n \"\"\"\n\n # Only run function if data is passed to the optional variable \"mytype\"\n if mytype:\n mytype_annot = variants['myTYPE_Annotation'].tolist()\n myTYPE_somatic = []\n for entry in mytype_annot:\n if pd.isnull(entry):\n myTYPE_somatic.append(0)\n else:\n search_1 = re.search('ONCOGENIC', entry)\n search_2 = re.search('LIKELY', entry)\n search_3 = re.search('UNKNOWN', entry)\n if search_1 or search_2 or search_3:\n myTYPE_somatic.append(1)\n else:\n myTYPE_somatic.append(0)\n variants['myTYPE_somatic'] = myTYPE_somatic\n else:\n variants['myTYPE_somatic'] = 0\n\n # Define column KNOWN_MM based on annotation data\n variants['KNOWN_MM'] = np.where((variants['myTYPE_somatic'] == 1) |\n (variants['MMRF_Class'].notnull()) |\n (variants['Bolli_Class'].notnull()) |\n (variants['Lohr_Class'].notnull()), 1, 0)\n\n variants = variants.drop('myTYPE_somatic', axis=1)\n return(variants)\n\n## APPLY FLAGS FOR FILTERING\ndef filter_panel(variants, genes_bed):\n \"\"\"\n Filter MFLAG_PANEL: 1 if variant is not in BED file of regions to keep\n \"\"\"\n variants_bed = variants[[\"CHR\", \"START\", \"END\", \"ID_VARIANT\"]]\n # Turning variants file into bed format\n variants_bed = pyb.BedTool.from_dataframe(variants_bed)\n # Import list of genes in panel as bed format\n genes = pyb.BedTool(genes_bed)\n # Bed file with intersection of panel and input file\n variants_inter = variants_bed.intersect(genes, u=True)\n # Empty list for names of variants in intersection bed file\n flaglist = []\n\n # If bed file is not empty\n if not variants_inter.head(n=1, as_string=True) == '':\n # Convert intersect bed file to data frame; subset col with variant ID\n flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']\n # Flag variant if ID is not in overlap list\n variants['MFLAG_PANEL'] = np.where(variants.ID_VARIANT.isin(flaglist), 0, 1)\n return(variants)\n\ndef filter_drop(variants, genes_drop):\n \"\"\"\n Filter MFLAG_DROP: 1 if variant is in list of genes to drop.\n \"\"\"\n drop = pd.read_excel(io=genes_drop)['GENE']\n variants['MFLAG_DROP'] = np.where(variants.GENE.isin(drop), 1, 0)\n return(variants)\n\ndef filter_igh(variants, igh_path):\n \"\"\"\n Filter MFLAG_IGH: 1 if variant in IGH locus\n \"\"\"\n variants_bed = variants[[\"CHR\", \"START\", \"END\", \"ID_VARIANT\"]]\n variants_bed = pyb.BedTool.from_dataframe(variants_bed)\n igh = pyb.BedTool(igh_path)\n variants_inter = variants_bed.intersect(igh, u=True)\n flaglist = []\n if not variants_inter.head(n=1, as_string=True) == '':\n flaglist = pyb.BedTool.to_dataframe(variants_inter)['name']\n variants['MFLAG_IGH'] = np.where(variants.ID_VARIANT.isin(flaglist), 1, 0)\n return(variants)\n\ndef filter_maf(variants):\n \"\"\"\n Filter MFLAG_MAF: 1 if variant MAF > 3 % in exac/1000genomes\n \"\"\"\n variants['MFLAG_MAF'] = np.where(variants['MAX_MAF'] > 0.03, 1, 0)\n return(variants)\n\ndef filter_maf_cosmic(variants, mode):\n \"\"\"\n Filter MFLAG_MAFCOS: 1 if variant has >0.1 % MAF and not in COSMIC\n For SNVs: Only counts exact and pos as in cosmic\n For Indels: Counts all COSMIC.\n \"\"\"\n if mode == 'snv':\n variants['MFLAG_MAFCOS'] = np.where(\n (variants['MAX_MAF'] > 0.001) &\n (variants['ANY_EXACT_POS'] == 0), 1, 0)\n if mode == 'indel':\n variants['MFLAG_MAFCOS'] = np.where(\n (variants['MAX_MAF'] > 0.001) &\n (variants['COSMIC'].isnull()), 1, 0)\n return(variants)\n\ndef filter_nonpass(variants, mode):\n \"\"\"\n Filter MFLAG_MAF: 1 if NON-PASS AND not in cosmic or previously known in MM\n Counts SNVs and Indels as \"in cosmic\" like for MAFCOS flag.\n For SNV: Only removes missense mutations with this flag\n \"\"\"\n if mode == 'snv':\n drop = ['non_synonymous_codon']\n variants['MFLAG_NONPASS'] = np.where(\n (variants['FILTER'] != \"PASS\") &\n (variants['EFFECT'].isin(drop)) &\n (variants['ANY_EXACT_POS'] == 0) &\n (variants['KNOWN_MM'] == 0), 1, 0)\n return(variants)\n variants['MFLAG_NONPASS'] = np.where(\n (variants['FILTER'] != \"PASS\") &\n (variants['COSMIC'].isnull()) &\n (variants['KNOWN_MM'] == 0), 1, 0)\n return(variants)\n\ndef filter_normals(variants):\n \"\"\"\n Filter MFLAG_NORM: 1 if variant has genomic exact or pos in normals\n \"\"\"\n match = ['genomic_exact', 'genomic_pos']\n variants['MFLAG_NORM'] = np.where(variants['Normals_Class'] \\\n .isin(match), 1, 0)\n return(variants)\n\ndef filter_vaf(variants):\n \"\"\"\n Filter MFLAG_VAF: 1 if target VAF < 1 %\n \"\"\"\n variants['MFLAG_VAF'] = np.where(\n (variants['TARGET_VAF'] < 0.01) & (variants['FILTER'] != 'PASS'), 1, 0)\n return(variants)\n\ndef filter_bidir(variants):\n \"\"\"\n Filter MFLAG_BIDIR: 1 if BIDIR = 0\n \"\"\"\n variants['MFLAG_BIDIR'] = np.where(variants['BIDIR'] == 0, 1, 0)\n return(variants)\n\n## FILTER AND EXPORT\ndef namecore(infile):\n \"\"\"\n Returns the \"core\" of the input file name, for use in output files.\n \"\"\"\n name = infile.split('/')[-1]\n if re.search('.csv$', name):\n return(re.sub('.csv$', '', name))\n return(re.sub('.tsv.gz$', '', name))\n\ndef filter_export(variants, outdir, name, mode):\n \"\"\"\n Function properties:\n 1. Filters variants into \"good\" or \"bad\" based on flags.\n 2. Writes files with good and bad variants.\n 3. Creates processing summary report.\n \"\"\"\n # Filtering\n good = variants[variants.filter(regex='MFLAG').sum(axis=1) == 0]\n bad = variants[variants.filter(regex='MFLAG').sum(axis=1) > 0]\n\n # Define output names\n date = str(datetime.today()).split()[0].split(\"-\")\n name = '_'.join([name, '_'.join(date)])\n goodname = join(outdir, name + '_goodcalls.csv')\n badname = join(outdir, name + '_badcalls.csv')\n textname = join(outdir, name + '_report.txt')\n\n # Export files\n good.to_csv(\n path_or_buf=goodname,\n index=False)\n bad.to_csv(\n path_or_buf=badname,\n index=False)\n\n # Summary report\n stop = timeit.default_timer()\n\n with open(textname, 'w') as f:\n # Call the \"Version\" file for version info?\n f.write(\n f'Somatic variant processing for myTYPE\\nv.1.0\\n '\n f'Completed time: {str(datetime.today()).split(\".\")[0]}\\n')\n f.write(f'Run time: {round(stop-START, 3)}\\n')\n f.write(f'####\\nMode: {mode}\\n')\n f.write(f'Imported calls: {variants.shape[0]}\\n')\n f.write('Flagging variants for filtering:\\n')\n f.write(f'MFLAG_PANEL: Variant not in BED file of '\n f'regions to keep: {variants[\"MFLAG_PANEL\"].sum()}\\n')\n f.write(f'MFLAG_DROP: Variant in excluded gene: '\n f'{variants[\"MFLAG_DROP\"].sum()}\\n')\n f.write(f'MFLAG_IGH: In IGH locus: {variants[\"MFLAG_IGH\"].sum()}\\n')\n f.write(f'MFLAG_MAF: MAF > 3 % in exac/1000genomes: '\n f'{variants[\"MFLAG_MAF\"].sum()}\\n')\n f.write(f'MFLAG_MAFCOS: MAF > 0.1 % and not in COSMIC '\n f'(exact/pos): {variants[\"MFLAG_MAFCOS\"].sum()}\\n')\n f.write(f'MFLAG_NONPASS: NON-PASS IF not in cosmic, previously '\n f'known in MM, not stopgain, splicesite..: '\n f'{variants[\"MFLAG_NONPASS\"].sum()}\\n')\n f.write(f'MFLAG_NORM: Variant exact or pos in >0 good normals: '\n f'{variants[\"MFLAG_NORM\"].sum()}\\n')\n f.write(f'MFLAG_VAF: Remove NON-PASS calls with target '\n f'VAF < 1 %: {variants[\"MFLAG_VAF\"].sum()}\\n')\n f.write(f'MFLAG_BIDIR: Remove variants BIDIR = 0 (only reads '\n f'on one strand): {variants[\"MFLAG_BIDIR\"].sum(0)}\\n')\n f.write(f'Removing calls with >= 1 MFLAG: {bad.shape[0]}\\n')\n f.write(f'Calls passed filters: {good.shape[0]}\\n')\n return()\n\n# Main Function\ndef process(\n mode,\n infile,\n outdir,\n genes,\n genes_drop,\n genes_bed,\n igh,\n mmrf,\n bolli,\n lohr,\n normals,\n mytype):\n \"\"\"Main function to process myTYPE SNV and indel output\"\"\"\n ## IMPORTING DATA\n variants = import_variants(infile)\n\n ## ANNOTATIONS\n variants = annotate_cosmic(variants)\n if genes:\n # Only runs if a path was passed to optional argument \"gene\"\n variants = annotate_genefreq(variants, genes)\n # Replace this with mutation frequency from MMRF? (and other raw data?)\n variants = annotate_maf(variants)\n variants = annotate_normals(variants, normals)\n variants = annotate_mmrf(variants, mmrf)\n variants = annotate_bolli(variants, bolli)\n variants = annotate_lohr(variants, lohr)\n if mytype:\n # Only runs if a path was passed to optional argument \"mytype\"\n variants = annotate_mytype(variants, mytype)\n variants = annotate_known(variants, mytype)\n\n ## FILTERS\n variants = filter_panel(variants, genes_bed)\n if genes_drop:\n variants = filter_drop(variants, genes_drop)\n variants = filter_igh(variants, igh)\n variants = filter_maf(variants)\n variants = filter_maf_cosmic(variants, mode)\n variants = filter_nonpass(variants, mode)\n variants = filter_normals(variants)\n variants = filter_vaf(variants)\n variants = filter_bidir(variants)\n\n ## OUTPUT\n name = namecore(infile)\n filter_export(variants, outdir, name, mode)\n print('Variant processing complete')\n return(variants) # Added this here - may be necessary for test?\n" ]
[ [ "pandas.Series", "pandas.read_csv", "pandas.read_excel", "pandas.merge", "pandas.isnull", "numpy.where" ] ]
rubyvanrooyen/katdal
[ "e90bca3c2cd6305492d03ddc9aa48e67c1800428" ]
[ "katdal/h5datav2.py" ]
[ "################################################################################\n# Copyright (c) 2011-2021, National Research Foundation (SARAO)\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n\"\"\"Data accessor class for HDF5 files produced by KAT-7 correlator.\"\"\"\n\nimport logging\nimport pathlib\nimport secrets\n\nimport h5py\nimport katpoint\nimport numpy as np\n\nfrom .categorical import CategoricalData, sensor_to_categorical\nfrom .dataset import (DEFAULT_SENSOR_PROPS, DEFAULT_VIRTUAL_SENSORS,\n BrokenFile, DataSet, Subarray, WrongVersion,\n _robust_target, _selection_to_list)\nfrom .flags import DESCRIPTIONS as FLAG_DESCRIPTIONS\nfrom .flags import NAMES as FLAG_NAMES\nfrom .lazy_indexer import LazyIndexer, LazyTransform\nfrom .sensordata import RecordSensorGetter, SensorCache, to_str\nfrom .spectral_window import SpectralWindow\n\nlogger = logging.getLogger(__name__)\n\n# Simplify the scan activities to derive the basic state of the antenna (slewing, scanning, tracking, stopped)\nSIMPLIFY_STATE = {'scan_ready': 'slew', 'scan': 'scan', 'scan_complete': 'scan', 'track': 'track', 'slew': 'slew'}\n\nSENSOR_PROPS = dict(DEFAULT_SENSOR_PROPS)\nSENSOR_PROPS.update({\n '*activity': {'greedy_values': ('slew', 'stop'), 'initial_value': 'slew',\n 'transform': lambda act: SIMPLIFY_STATE.get(act, 'stop')},\n '*target': {'initial_value': '', 'transform': _robust_target},\n # These float sensors are actually categorical by nature as they represent user settings\n 'RFE/center-frequency-hz': {'categorical': True},\n 'RFE/rfe7.lo1.frequency': {'categorical': True},\n '*attenuation': {'categorical': True},\n '*attenuator.horizontal': {'categorical': True},\n '*attenuator.vertical': {'categorical': True},\n})\n\nSENSOR_ALIASES = {\n 'nd_coupler': 'rfe3.rfe15.noise.coupler.on',\n 'nd_pin': 'rfe3.rfe15.noise.pin.on',\n}\n\n\ndef _calc_azel(cache, name, ant):\n \"\"\"Calculate virtual (az, el) sensors from actual ones in sensor cache.\"\"\"\n base_name = 'pos.actual-scan-azim' if name.endswith('az') else 'pos.actual-scan-elev'\n real_sensor = f'Antennas/{ant}/{base_name}'\n cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor))\n return sensor_data\n\n\nVIRTUAL_SENSORS = dict(DEFAULT_VIRTUAL_SENSORS)\nVIRTUAL_SENSORS.update({'Antennas/{ant}/az': _calc_azel, 'Antennas/{ant}/el': _calc_azel})\n\nWEIGHT_NAMES = ('precision',)\nWEIGHT_DESCRIPTIONS = ('visibility precision (inverse variance, i.e. 1 / sigma^2)',)\n\n# -------------------------------------------------------------------------------------------------\n# -- Utility functions\n# -------------------------------------------------------------------------------------------------\n\n\ndef get_single_value(group, name):\n \"\"\"Return single value from attribute or dataset with given name in group.\n\n If `name` is an attribute of the HDF5 group `group`, it is returned,\n otherwise it is interpreted as an HDF5 dataset of `group` and the last value\n of `name` is returned. This is meant to retrieve static configuration values\n that potentially get set more than once during capture initialisation, but\n then does not change during actual capturing.\n\n Parameters\n ----------\n group : :class:`h5py.Group` object\n HDF5 group to query\n name : string\n Name of HDF5 attribute or dataset to query\n\n Returns\n -------\n value : object\n Attribute or last value of dataset\n\n \"\"\"\n attrs = group.attrs\n value = attrs[name] if name in attrs else group[name][-1]\n return to_str(value)\n\n\ndef dummy_dataset(name, shape, dtype, value):\n \"\"\"Dummy HDF5 dataset containing a single value.\n\n This creates a dummy HDF5 dataset in memory containing a single value. It\n can have virtually unlimited size as the dataset is highly compressed.\n\n Parameters\n ----------\n name : string\n Name of dataset\n shape : sequence of int\n Shape of dataset\n dtype : :class:`numpy.dtype` object or equivalent\n Type of data stored in dataset\n value : object\n All elements in the dataset will equal this value\n\n Returns\n -------\n dataset : :class:`h5py.Dataset` object\n Dummy HDF5 dataset\n\n \"\"\"\n # It is important to randomise the filename as h5py does not allow two writable file objects with the same name\n # Without this randomness katdal can only open one file requiring a dummy dataset\n dummy_file = h5py.File(f'{name}_{secrets.token_hex(8)}.h5', 'x', driver='core', backing_store=False)\n return dummy_file.create_dataset(name, shape=shape, maxshape=shape,\n dtype=dtype, fillvalue=value, compression='gzip')\n\n# -------------------------------------------------------------------------------------------------\n# -- CLASS : H5DataV2\n# -------------------------------------------------------------------------------------------------\n\n\nclass H5DataV2(DataSet):\n \"\"\"Load HDF5 format version 2 file produced by KAT-7 correlator.\n\n For more information on attributes, see the :class:`DataSet` docstring.\n\n Parameters\n ----------\n filename : string\n Name of HDF5 file\n ref_ant : string, optional\n Name of reference antenna, used to partition data set into scans\n (default is first antenna in use)\n time_offset : float, optional\n Offset to add to all correlator timestamps, in seconds\n mode : string, optional\n HDF5 file opening mode (e.g. 'r+' to open file in write mode)\n quicklook : {False, True}\n True if synthesised timestamps should be used to partition data set even\n if real timestamps are irregular, thereby avoiding the slow loading of\n real timestamps at the cost of slightly inaccurate label borders\n keepdims : {False, True}, optional\n Force vis / weights / flags to be 3-dimensional, regardless of selection\n kwargs : dict, optional\n Extra keyword arguments, typically meant for other formats and ignored\n\n Attributes\n ----------\n file : :class:`h5py.File` object\n Underlying HDF5 file, exposed via :mod:`h5py` interface\n\n \"\"\"\n\n def __init__(self, filename, ref_ant='', time_offset=0.0, mode='r',\n quicklook=False, keepdims=False, **kwargs):\n # The closest thing to a capture block ID is the Unix timestamp in the original filename\n # There is only one (unnamed) output stream, so leave off the stream name\n cbid = pathlib.Path(filename).stem\n DataSet.__init__(self, cbid, ref_ant, time_offset, url=filename)\n\n # Load file\n self.file, self.version = H5DataV2._open(filename, mode)\n f = self.file\n\n # Load main HDF5 groups\n data_group, sensors_group, config_group = f['Data'], f['MetaData/Sensors'], f['MetaData/Configuration']\n markup_group = f['Markup']\n # Get observation script parameters, with defaults\n for k, v in config_group['Observation'].items():\n # For KAT-7 (v2.1) data, strip the 'script_' prefix from most parameters\n k = k if self.version > '2.1' or k in ('script_name', 'script_arguments') else k[7:]\n self.obs_params[str(k)] = to_str(v)\n self.observer = self.obs_params.get('observer', '')\n self.description = self.obs_params.get('description', '')\n self.experiment_id = self.obs_params.get('experiment_id', '')\n # Get script log from History group\n self.obs_script_log = f['History/script_log']['log'].tolist()\n\n # ------ Extract timestamps ------\n\n self.dump_period = get_single_value(config_group['Correlator'], 'int_time')\n # Obtain visibility data and timestamps\n self._vis = data_group['correlator_data']\n self._timestamps = data_group['timestamps']\n num_dumps = len(self._timestamps)\n if num_dumps != self._vis.shape[0]:\n raise BrokenFile(f'Number of timestamps received from k7_capture ({num_dumps}) '\n f'differs from number of dumps in data ({self._vis.shape[0]})')\n # Discard the last sample if the timestamp is a duplicate (caused by stop packet in k7_capture)\n num_dumps = (num_dumps - 1) if num_dumps > 1 and (self._timestamps[-1] == self._timestamps[-2]) else num_dumps\n # Do quick test for uniform spacing of timestamps (necessary but not sufficient)\n expected_dumps = (self._timestamps[num_dumps - 1] - self._timestamps[0]) / self.dump_period + 1\n # The expected_dumps should always be an integer (like num_dumps), unless the timestamps and/or dump period\n # are messed up in the file, so the threshold of this test is a bit arbitrary (e.g. could use > 0.5)\n irregular = abs(expected_dumps - num_dumps) >= 0.01\n if irregular:\n # Warn the user, as this is anomalous\n logger.warning(\"Irregular timestamps detected in file '%s': expected %.3f dumps \"\n \"based on dump period and start/end times, got %d instead\",\n filename, expected_dumps, num_dumps)\n if quicklook:\n logger.warning(\"Quicklook option selected - partitioning data based on synthesised timestamps instead\")\n if not irregular or quicklook:\n # Estimate timestamps by assuming they are uniformly spaced (much quicker than loading them from file).\n # This is useful for the purpose of segmenting data set, where accurate timestamps are not that crucial.\n # The real timestamps are still loaded when the user explicitly asks for them.\n data_timestamps = self._timestamps[0] + self.dump_period * np.arange(num_dumps)\n else:\n # Load the real timestamps instead (could take several seconds on a large data set)\n data_timestamps = self._timestamps[:num_dumps]\n # Move timestamps from start of each dump to the middle of the dump\n data_timestamps += 0.5 * self.dump_period + self.time_offset\n if data_timestamps[0] < 1e9:\n logger.warning(\"File '%s' has invalid first correlator timestamp (%f)\", filename, data_timestamps[0])\n self._time_keep = np.ones(num_dumps, dtype=np.bool)\n self.start_time = katpoint.Timestamp(data_timestamps[0] - 0.5 * self.dump_period)\n self.end_time = katpoint.Timestamp(data_timestamps[-1] + 0.5 * self.dump_period)\n self._keepdims = keepdims\n\n # ------ Extract flags ------\n\n # Check if flag group is present, else use dummy flag data\n self._flags = markup_group['flags'] if 'flags' in markup_group else \\\n dummy_dataset('dummy_flags', shape=self._vis.shape[:-1], dtype=np.uint8, value=0)\n # Obtain flag descriptions from file or recreate default flag description table\n self._flags_description = to_str(markup_group['flags_description'][:]) \\\n if 'flags_description' in markup_group else np.array(list(zip(FLAG_NAMES, FLAG_DESCRIPTIONS)))\n self._flags_select = np.array([0], dtype=np.uint8)\n self._flags_keep = 'all'\n\n # ------ Extract weights ------\n\n # Check if weight group present, else use dummy weight data\n self._weights = markup_group['weights'] if 'weights' in markup_group else \\\n dummy_dataset('dummy_weights', shape=self._vis.shape[:-1], dtype=np.float32, value=1.0)\n # Obtain weight descriptions from file or recreate default weight description table\n self._weights_description = to_str(markup_group['weights_description'][:]) \\\n if 'weights_description' in markup_group else np.array(list(zip(WEIGHT_NAMES, WEIGHT_DESCRIPTIONS)))\n self._weights_select = []\n self._weights_keep = 'all'\n\n # ------ Extract sensors ------\n\n # Populate sensor cache with all HDF5 datasets below sensor group that fit the description of a sensor\n cache = {}\n\n def register_sensor(name, obj):\n \"\"\"A sensor is defined as a non-empty dataset with expected dtype.\"\"\"\n if isinstance(obj, h5py.Dataset) and obj.shape != () and \\\n obj.dtype.names == ('timestamp', 'value', 'status'):\n # Rename pedestal sensors from the old regime to become sensors of the corresponding antenna\n name = ('Antennas/ant' + name[13:]) if name.startswith('Pedestals/ped') else name\n cache[name] = RecordSensorGetter(obj, name)\n sensors_group.visititems(register_sensor)\n # Use estimated data timestamps for now, to speed up data segmentation\n self.sensor = SensorCache(cache, data_timestamps, self.dump_period, keep=self._time_keep,\n props=SENSOR_PROPS, virtual=VIRTUAL_SENSORS, aliases=SENSOR_ALIASES)\n\n # ------ Extract subarrays ------\n\n # By default, only pick antennas that were in use by the script\n script_ants = to_str(config_group['Observation'].attrs['script_ants']).split(',')\n self.ref_ant = script_ants[0] if not ref_ant else ref_ant\n # Original list of correlation products as pairs of input labels\n corrprods = get_single_value(config_group['Correlator'], 'bls_ordering')\n if len(corrprods) != self._vis.shape[2]:\n # Apply k7_capture baseline mask after the fact, in the hope that it fixes correlation product mislabelling\n corrprods = np.array([cp for cp in corrprods if cp[0][:-1] in script_ants and cp[1][:-1] in script_ants])\n # If there is still a mismatch between labels and data shape, file is considered broken (maybe bad labels?)\n if len(corrprods) != self._vis.shape[2]:\n raise BrokenFile('Number of baseline labels (containing expected antenna names) '\n 'received from correlator (%d) differs from number of baselines in data (%d)' %\n (len(corrprods), self._vis.shape[2]))\n else:\n logger.warning('Reapplied k7_capture baseline mask to fix unexpected number of baseline labels')\n # All antennas in configuration as katpoint Antenna objects\n ants = [katpoint.Antenna(to_str(config_group['Antennas'][name].attrs['description']))\n for name in config_group['Antennas']]\n self.subarrays = [Subarray(ants, corrprods)]\n self.sensor['Observation/subarray'] = CategoricalData(self.subarrays, [0, len(data_timestamps)])\n self.sensor['Observation/subarray_index'] = CategoricalData([0], [0, len(data_timestamps)])\n # Store antenna objects in sensor cache too, for use in virtual sensor calculations\n for ant in ants:\n self.sensor[f'Antennas/{ant.name}/antenna'] = CategoricalData([ant], [0, len(data_timestamps)])\n # Extract array reference from first antenna (first 5 fields of description)\n array_ant_fields = ['array'] + ants[0].description.split(',')[1:5]\n array_ant = katpoint.Antenna(','.join(array_ant_fields))\n self.sensor['Antennas/array/antenna'] = CategoricalData([array_ant], [0, len(data_timestamps)])\n\n # ------ Extract spectral windows / frequencies ------\n\n # Ideally we would like to use calculated center-frequency-hz sensor produced by k7_capture (better for nband)\n if self.version >= '2.1':\n centre_freq = self.sensor.get('RFE/center-frequency-hz')\n else:\n # Fall back to basic RFE7 LO frequency, as this supported multiple spectral windows before k7_capture did\n # This assumes WBC mode, though (NBC modes only fully supported since HDF5 v2.1)\n centre_freq = self.sensor.get('RFE/rfe7.lo1.frequency')\n centre_freq.unique_values = [freq - 4200e6 for freq in centre_freq.unique_values]\n num_chans = get_single_value(config_group['Correlator'], 'n_chans')\n if num_chans != self._vis.shape[1]:\n raise BrokenFile(f'Number of channels received from correlator ({num_chans}) '\n f'differs from number of channels in data ({self._vis.shape[1]})')\n bandwidth = get_single_value(config_group['Correlator'], 'bandwidth')\n channel_width = bandwidth / num_chans\n try:\n mode = self.sensor.get('DBE/dbe.mode').unique_values[0]\n except (KeyError, IndexError):\n # Guess the mode for version 2.0 files that haven't been re-augmented\n mode = 'wbc' if num_chans <= 1024 else 'wbc8k' if bandwidth > 200e6 else 'nbc'\n self.spectral_windows = [SpectralWindow(spw_centre, channel_width, num_chans, mode)\n for spw_centre in centre_freq.unique_values]\n self.sensor['Observation/spw'] = CategoricalData([self.spectral_windows[idx] for idx in centre_freq.indices],\n centre_freq.events)\n self.sensor['Observation/spw_index'] = CategoricalData(centre_freq.indices, centre_freq.events)\n\n # ------ Extract scans / compound scans / targets ------\n\n # Use the activity sensor of reference antenna to partition the data set into scans (and to set their states)\n scan = self.sensor.get(f'Antennas/{self.ref_ant}/activity')\n # If the antenna starts slewing on the second dump, incorporate the first dump into the slew too.\n # This scenario typically occurs when the first target is only set after the first dump is received.\n # The workaround avoids putting the first dump in a scan by itself, typically with an irrelevant target.\n if len(scan) > 1 and scan.events[1] == 1 and scan[1] == 'slew':\n scan.events, scan.indices = scan.events[1:], scan.indices[1:]\n scan.events[0] = 0\n # Use labels to partition the data set into compound scans\n label = sensor_to_categorical(markup_group['labels']['timestamp'], to_str(markup_group['labels']['label'][:]),\n data_timestamps, self.dump_period, **SENSOR_PROPS['Observation/label'])\n # Discard empty labels (typically found in raster scans, where first scan has proper label and rest are empty)\n # However, if all labels are empty, keep them, otherwise whole data set will be one pathological compscan...\n if len(label.unique_values) > 1:\n label.remove('')\n # Create duplicate scan events where labels are set during a scan (i.e. not at start of scan)\n # ASSUMPTION: Number of scans >= number of labels (i.e. each label should introduce a new scan)\n scan.add_unmatched(label.events)\n self.sensor['Observation/scan_state'] = scan\n self.sensor['Observation/scan_index'] = CategoricalData(list(range(len(scan))), scan.events)\n # Move proper label events onto the nearest scan start\n # ASSUMPTION: Number of labels <= number of scans (i.e. only a single label allowed per scan)\n label.align(scan.events)\n # If one or more scans at start of data set have no corresponding label, add a default label for them\n if label.events[0] > 0:\n label.add(0, '')\n self.sensor['Observation/label'] = label\n self.sensor['Observation/compscan_index'] = CategoricalData(list(range(len(label))), label.events)\n # Use the target sensor of reference antenna to set the target for each scan\n target = self.sensor.get(f'Antennas/{self.ref_ant}/target')\n # Move target events onto the nearest scan start\n # ASSUMPTION: Number of targets <= number of scans (i.e. only a single target allowed per scan)\n target.align(scan.events)\n self.sensor['Observation/target'] = target\n self.sensor['Observation/target_index'] = CategoricalData(target.indices, target.events)\n # Set up catalogue containing all targets in file, with reference antenna as default antenna\n self.catalogue.add(target.unique_values)\n self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]\n # Ensure that each target flux model spans all frequencies in data set if possible\n self._fix_flux_freq_range()\n\n # Avoid storing reference to self in transform closure below, as this hinders garbage collection\n dump_period, time_offset = self.dump_period, self.time_offset\n # Restore original (slow) timestamps so that subsequent sensors (e.g. pointing) will have accurate values\n extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)\n self.sensor.timestamps = LazyIndexer(self._timestamps, keep=slice(num_dumps), transforms=[extract_time])\n # Apply default selection and initialise all members that depend on selection in the process\n self.select(spw=0, subarray=0, ants=script_ants)\n\n @staticmethod\n def _open(filename, mode='r'):\n \"\"\"Open file and do basic version and augmentation sanity check.\"\"\"\n f = h5py.File(filename, mode)\n version = to_str(f.attrs.get('version', '1.x'))\n if not version.startswith('2.'):\n raise WrongVersion(f\"Attempting to load version '{version}' file with version 2 loader\")\n if 'augment_ts' not in f.attrs:\n raise BrokenFile('HDF5 file not augmented - please run '\n 'k7_augment.py (provided by katcapture package)')\n return f, version\n\n @staticmethod\n def _get_ants(filename):\n \"\"\"Quick look function to get the list of antennas in a data file.\n\n This is intended to be called without createing a full katdal object.\n\n Parameters\n ----------\n filename : string\n Data file name\n\n Returns\n -------\n antennas : list of :class:'katpoint.Antenna' objects\n\n \"\"\"\n f, version = H5DataV2._open(filename)\n config_group = f['MetaData/Configuration']\n all_ants = [ant for ant in config_group['Antennas']]\n script_ants = to_str(config_group['Observation'].attrs.get('script_ants'))\n script_ants = script_ants.split(',') if script_ants else all_ants\n return [katpoint.Antenna(to_str(config_group['Antennas'][ant].attrs['description']))\n for ant in script_ants if ant in all_ants]\n\n @staticmethod\n def _get_targets(filename):\n \"\"\"Quick look function to get the list of targets in a data file.\n\n This is intended to be called without createing a full katdal object.\n\n Parameters\n ----------\n filename : string\n Data file name\n\n Returns\n -------\n targets : :class:'katpoint.Catalogue' object\n All targets in file\n\n \"\"\"\n f, version = H5DataV2._open(filename)\n # Use the delay-tracking centre as the one and only target\n # Try two different sensors for the DBE target\n try:\n target_list = f['MetaData/Sensors/DBE/target']\n except Exception:\n # Since h5py errors have varied over the years, we need Exception\n target_list = f['MetaData/Sensors/Beams/Beam0/target']\n all_target_strings = [to_str(target_data[1]) for target_data in target_list]\n return katpoint.Catalogue(np.unique(all_target_strings))\n\n def __str__(self):\n \"\"\"Verbose human-friendly string representation of data set.\"\"\"\n descr = [super().__str__()]\n # append the process_log, if it exists, for non-concatenated h5 files\n if 'process_log' in self.file['History']:\n descr.append('-------------------------------------------------------------------------------')\n descr.append('Process log:')\n for proc in self.file['History']['process_log']:\n # proc has a structured dtype and to_str doesn't work on it, so\n # we have to to_str each element.\n param_list = f'{to_str(proc[0]):>15}:'\n for param in to_str(proc[1]).split(','):\n param_list += f' {param}'\n descr.append(param_list)\n return '\\n'.join(descr)\n\n @property\n def _weights_keep(self):\n known_weights = [row[0] for row in getattr(self, '_weights_description', [])]\n return [known_weights[ind] for ind in self._weights_select]\n\n @_weights_keep.setter\n def _weights_keep(self, names):\n known_weights = [row[0] for row in getattr(self, '_weights_description', [])]\n # Ensure a sequence of weight names\n names = _selection_to_list(names, all=known_weights)\n # Create index list for desired weights\n selection = []\n for name in names:\n try:\n selection.append(known_weights.index(name))\n except ValueError:\n logger.warning(\"%r is not a legitimate weight type for this file, \"\n \"supported ones are %s\", name, known_weights)\n if known_weights and not selection:\n logger.warning('No valid weights were selected - setting all weights to 1.0 by default')\n self._weights_select = selection\n\n @property\n def _flags_keep(self):\n if not hasattr(self, '_flags_description'):\n return []\n known_flags = [row[0] for row in self._flags_description]\n # The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip\n selection = np.unpackbits(self._flags_select)\n assert len(known_flags) == len(selection), \\\n f'Expected {len(selection)} flag types in file, got {self._flags_description}'\n return [name for name, bit in zip(known_flags, selection) if bit]\n\n @_flags_keep.setter\n def _flags_keep(self, names):\n if not hasattr(self, '_flags_description'):\n self._flags_select = np.array([0], dtype=np.uint8)\n return\n known_flags = [row[0] for row in self._flags_description]\n # Ensure `names` is a sequence of valid flag names (or an empty list)\n names = _selection_to_list(names, all=known_flags)\n # Create boolean list for desired flags\n selection = np.zeros(8, dtype=np.uint8)\n assert len(known_flags) == len(selection), \\\n f'Expected {len(selection)} flag types in file, got {self._flags_description}'\n for name in names:\n try:\n selection[known_flags.index(name)] = 1\n except ValueError:\n logger.warning(\"%r is not a legitimate flag type for this file, \"\n \"supported ones are %s\", name, known_flags)\n # Pack index list into bit mask\n # The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip\n flagmask = np.packbits(selection)\n if known_flags and not flagmask:\n logger.warning('No valid flags were selected - setting all flags to False by default')\n self._flags_select = flagmask\n\n @property\n def timestamps(self):\n \"\"\"Visibility timestamps in UTC seconds since Unix epoch.\n\n The timestamps are returned as an array indexer of float64, shape (*T*,),\n with one timestamp per integration aligned with the integration\n *midpoint*. To get the data array itself from the indexer `x`, do `x[:]`\n or perform any other form of indexing on it.\n\n \"\"\"\n # Avoid storing reference to self in transform closure below, as this hinders garbage collection\n dump_period, time_offset = self.dump_period, self.time_offset\n extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)\n return LazyIndexer(self._timestamps, keep=self._time_keep, transforms=[extract_time])\n\n def _vislike_indexer(self, dataset, extractor):\n \"\"\"Lazy indexer for vis-like datasets (vis / weights / flags).\n\n This operates on datasets with shape (*T*, *F*, *B*) and potentially\n different dtypes. The data type conversions are all left to the provided\n extractor transform, while this method takes care of the common\n selection issues, such as preserving singleton dimensions and dealing\n with duplicate final dumps.\n\n Parameters\n ----------\n dataset : :class:`h5py.Dataset` object or equivalent\n Underlying vis-like dataset on which lazy indexing will be done\n extractor : function, signature ``data = f(data, keep)``\n Transform to apply to data (`keep` is user-provided 2nd-stage index)\n\n Returns\n -------\n indexer : :class:`LazyIndexer` object\n Lazy indexer with appropriate selectors and transforms included\n\n \"\"\"\n # Create first-stage index from dataset selectors\n time_keep = self._time_keep\n # If there is a duplicate final dump, these lengths don't match -> ignore last dump in file\n if len(time_keep) == len(dataset) - 1:\n time_keep = np.zeros(len(dataset), dtype=np.bool)\n time_keep[:len(self._time_keep)] = self._time_keep\n stage1 = (time_keep, self._freq_keep, self._corrprod_keep)\n\n def _force_3dim(data, keep):\n \"\"\"Keep singleton dimensions in stage 2 (i.e. final) indexing.\"\"\"\n # Ensure that keep tuple has length of 3 (truncate or pad with blanket slices as necessary)\n keep = keep[:3] + (slice(None),) * (3 - len(keep))\n # Final indexing ensures that returned data are always 3-dimensional (i.e. keep singleton dimensions)\n keep_singles = [(np.newaxis if np.isscalar(dim_keep) else slice(None))\n for dim_keep in keep]\n return data[tuple(keep_singles)]\n force_3dim = LazyTransform('force_3dim', _force_3dim)\n transforms = [extractor, force_3dim] if self._keepdims else [extractor]\n return LazyIndexer(dataset, stage1, transforms)\n\n @property\n def vis(self):\n r\"\"\"Complex visibility data as a function of time, frequency and baseline.\n\n The visibility data are returned as an array indexer of complex64, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The returned array always has all three dimensions,\n even for scalar (single) values. The number of integrations *T* matches\n the length of :meth:`timestamps`, the number of frequency channels *F*\n matches the length of :meth:`freqs` and the number of correlation\n products *B* matches the length of :meth:`corr_products`. To get the\n data array itself from the indexer `x`, do `x[:]` or perform any other\n form of indexing on it. Only then will data be loaded into memory.\n\n The sign convention of the imaginary part is consistent with an\n electric field of :math:`e^{i(\\omega t - jz)}` i.e. phase that\n increases with time.\n \"\"\"\n extract = LazyTransform('extract_vis',\n # Discard the 4th / last dimension as this is subsumed in complex view\n # The visibilities are conjugated due to using the lower sideband\n lambda vis, keep: vis.view(np.complex64)[..., 0].conjugate(),\n lambda shape: shape[:-1], np.complex64)\n return self._vislike_indexer(self._vis, extract)\n\n @property\n def weights(self):\n \"\"\"Visibility weights as a function of time, frequency and baseline.\n\n The weights data are returned as an array indexer of float32, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The number of integrations *T* matches the length of\n :meth:`timestamps`, the number of frequency channels *F* matches the\n length of :meth:`freqs` and the number of correlation products *B*\n matches the length of :meth:`corr_products`. To get the data array\n itself from the indexer `x`, do `x[:]` or perform any other form of\n indexing on it. Only then will data be loaded into memory.\n\n \"\"\"\n # We currently only cater for a single weight type (i.e. either select it or fall back to 1.0)\n def transform(weights, keep):\n return weights.astype(np.float32) if self._weights_select else \\\n np.ones_like(weights, dtype=np.float32)\n extract = LazyTransform('extract_weights', transform, dtype=np.float32)\n return self._vislike_indexer(self._weights, extract)\n\n @property\n def flags(self):\n \"\"\"Flags as a function of time, frequency and baseline.\n\n The flags data are returned as an array indexer of bool, shape\n (*T*, *F*, *B*), with time along the first dimension, frequency along the\n second dimension and correlation product (\"baseline\") index along the\n third dimension. The number of integrations *T* matches the length of\n :meth:`timestamps`, the number of frequency channels *F* matches the\n length of :meth:`freqs` and the number of correlation products *B*\n matches the length of :meth:`corr_products`. To get the data array\n itself from the indexer `x`, do `x[:]` or perform any other form of\n indexing on it. Only then will data be loaded into memory.\n\n \"\"\"\n def transform(flags, keep):\n \"\"\"Use flagmask to blank out the flags we don't want.\"\"\"\n # Then convert uint8 to bool -> if any flag bits set, flag is set\n return np.bool_(np.bitwise_and(self._flags_select, flags))\n extract = LazyTransform('extract_flags', transform, dtype=np.bool)\n return self._vislike_indexer(self._flags, extract)\n\n @property\n def temperature(self):\n \"\"\"Air temperature in degrees Celsius.\"\"\"\n return self.sensor['Enviro/asc.air.temperature']\n\n @property\n def pressure(self):\n \"\"\"Barometric pressure in millibars.\"\"\"\n return self.sensor['Enviro/asc.air.pressure']\n\n @property\n def humidity(self):\n \"\"\"Relative humidity as a percentage.\"\"\"\n return self.sensor['Enviro/asc.air.relative-humidity']\n\n @property\n def wind_speed(self):\n \"\"\"Wind speed in metres per second.\"\"\"\n return self.sensor['Enviro/asc.wind.speed']\n\n @property\n def wind_direction(self):\n \"\"\"Wind direction as an azimuth angle in degrees.\"\"\"\n return self.sensor['Enviro/asc.wind.direction']\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.packbits", "numpy.ones_like", "numpy.arange", "numpy.unpackbits", "numpy.array", "numpy.isscalar", "numpy.bitwise_and", "numpy.unique" ] ]
Statfactory/ColdBrew
[ "ee16eee73e8dc89646abd6ee3e19858e49c6ffb7" ]
[ "cortado/cutcovfactor.py" ]
[ "from cortado.abstractfactor import AbstractFactor\nimport numpy as np\nfrom cortado.seq import Seq\nfrom cortado.funcslicer import FuncSlicer\nfrom cortado.consts import HEADLENGTH, SLICELEN, MISSINGLEVEL\nfrom numba import jit\nfrom numba.typed import Dict\nfrom numba import types\n\n@jit(nopython=True, cache=False)\ndef g_leftclosed(slice, buf, cuts, k):\n def f(x):\n if np.isnan(x):\n return 0\n if x == np.PINF:\n return k\n else:\n i = np.searchsorted(cuts, x, side='right') \n return i\n\n for i in range(len(slice)):\n buf[i] = f(slice[i])\n if len(buf) == len(slice):\n return buf\n else:\n return buf[:len(slice)]\n\n@jit(nopython=True, cache=False)\ndef g_rightclosed(slice, buf, cuts):\n def f(x):\n if np.isnan(x):\n return 0\n if x == np.NINF:\n return 1\n else:\n i = np.searchsorted(cuts, x, side='left') \n return i\n\n for i in range(len(slice)):\n buf[i] = f(slice[i])\n if len(buf) == len(slice):\n return buf\n else:\n return buf[:len(slice)]\n\nclass CutCovFactor(AbstractFactor):\n\n def __init__(self, covariate, cuts, rightclosed = False):\n self.covariate = covariate\n self.cuts = cuts\n \n assert cuts[0] == np.NINF and cuts[-1] == np.PINF\n levelcount = len(cuts) - 1\n if rightclosed:\n levels = [MISSINGLEVEL] + [\"{z}{x},{y}]\".format(x=str(cuts[i]), y=str(cuts[i + 1]), z=\"[\" if i == 0 else \"(\") for i in range(levelcount)]\n else:\n levels = [MISSINGLEVEL] + [\"[{x},{y}{z}\".format(x=str(cuts[i]), y=str(cuts[i + 1]), z=\"]\" if i == (levelcount - 1) else \")\") for i in range(levelcount)]\n dtype = np.uint8 if levelcount <= 256 else np.uint16\n\n \n def slicer(start, length, slicelen):\n length = min(len(self) - start, length)\n slicelen = min(length, slicelen)\n buf = np.empty(slicelen, dtype = dtype)\n if rightclosed:\n return Seq.map((lambda s: g_rightclosed(s, buf, cuts)), covariate.slicer(start, length, slicelen))\n else:\n return Seq.map((lambda s: g_leftclosed(s, buf, cuts, levelcount - 1)), covariate.slicer(start, length, slicelen))\n\n self._levels = levels\n self._slicer = FuncSlicer(slicer, dtype)\n\n @property\n def name(self):\n return self.covariate.name\n\n def __len__(self):\n return len(self.covariate)\n\n @property\n def isordinal(self):\n return True\n\n @property\n def levels(self):\n return self._levels\n\n @property\n def slicer(self):\n return self._slicer" ]
[ [ "numpy.empty", "numpy.searchsorted", "numpy.isnan" ] ]
primasanjaya/muat-github
[ "4603c6c960188643fb38d8dba82e0dcc1ba00b40", "4603c6c960188643fb38d8dba82e0dcc1ba00b40" ]
[ "main_old.py", "dataset/pcawgtcga_dataloader.py" ]
[ "# make deterministic\r\nfrom mingpt.utils import set_seed\r\nset_seed(42)\r\n#frompc\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport math\r\nfrom torch.utils.data import Dataset\r\n\r\nfrom mingpt.model import *\r\n\r\nfrom mingpt.trainer import Trainer, TrainerConfig\r\nfrom mingpt.utils import sample\r\n\r\nimport logging\r\nlogging.basicConfig(\r\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\r\n datefmt=\"%m/%d/%Y %H:%M:%S\",\r\n level=logging.INFO,\r\n)\r\nimport pdb\r\n\r\nfrom dataset.tcga_dataset import TCGA\r\nfrom dataset.tcga_conv_dataset import TCGAConv\r\nfrom dataset.pcawg_conv_dataset import *\r\nfrom dataset.pcawg_dataset import PCAWG\r\nfrom dataset.pcawg_emb_dataset import PCAWGEmb\r\nfrom dataset.pcawg_sepdataset import PCAWGSep\r\nfrom dataset.pcawg_2stream import PCAWG2Stream\r\nfrom dataset.tcgadisttabletoemb_dataset import TCGADist\r\nfrom dataset.tcgamutdist_dataset import TCGAMutDist\r\nfrom dataset.tcgamutdistasone_dataset import TCGAMutDistasOne\r\nfrom dataset.tcgapcawg_dataset import TCGAPCAWG\r\nfrom dataset.newtcgapcawg_dataset import NewTCGAPCAWG\r\nfrom dataset.finaltcgapcawg_dataset import FinalTCGAPCAWG\r\n\r\nfrom mingpt.bert import *\r\nfrom preprocessing.dmm.dmm import *\r\nfrom preprocessing.fromvcffiles import *\r\n\r\nimport argparse\r\nimport os\r\nimport pandas as pd\r\n\r\ndef translate_args(args):\r\n\r\n cwd = os.getcwd()\r\n args.cwd = cwd\r\n\r\n args.mutation_coding = cwd + '/preprocessing/dmm/data/mutation_codes_sv.tsv'\r\n args.input = args.data_dir\r\n\r\n args.output = cwd + '/data/raw/out/00b9d0e6-69dc-4345-bffd-ce32880c8eef.consensus.20160830.somatic.snv_mnv.tsv.gz' \r\n\r\n args.reference = '/csc/epitkane/data/ref_genomes/hs37d5_1000GP/hs37d5_1000GP.fa'\r\n args.context = 8\r\n\r\n args.sample_id = 'submitted_sample_id'\r\n\r\n args.tmp = cwd + '/data/raw/tmp/'\r\n args.verbose = 1\r\n args.generate_negatives = 1\r\n args.report_interval = 100000\r\n\r\n return args\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser(description='TCGA / PEACOCK experiment')\r\n\r\n # DATASET\r\n parser.add_argument('--cwd', type=str,help='project dir')\r\n\r\n parser.add_argument('--dataset', type=str, default='pcawg',\r\n help='dataset')\r\n # MODEL\r\n parser.add_argument('--arch', type=str, default=None,\r\n help='architecture')\r\n # DIRECTORY\r\n parser.add_argument('--data-dir', type=str, default=None,\r\n help='data directory')\r\n parser.add_argument('--crossdata-dir', type=str, default=None,\r\n help='data directory')\r\n parser.add_argument('--adddata-dir', type=str, default=None,\r\n help='data directory')\r\n\r\n parser.add_argument('--n-class', type=int, default=None,\r\n help='number of class')\r\n\r\n parser.add_argument('--batch-size', type=int, default=1,\r\n help='batch size')\r\n\r\n parser.add_argument('--block-size', type=int, default=1000,\r\n help='block of sequence')\r\n\r\n parser.add_argument('--context-length', type=int, default=256,\r\n help='length of sequence')\r\n parser.add_argument('--n-layer', type=int, default=1,\r\n help='attention layer')\r\n parser.add_argument('--n-head', type=int, default=8,\r\n help='attention head')\r\n parser.add_argument('--n-emb', type=int, default=128,\r\n help='embedding dimension')\r\n parser.add_argument('--n-vocab-type', type=int, default=1,\r\n help='embedding dimension')\r\n\r\n parser.add_argument('--tag', type=str, default='myexperiment',\r\n help='dataset')\r\n \r\n parser.add_argument('--train', action='store_true', default=False)\r\n parser.add_argument('--predict', action='store_true', default=False)\r\n parser.add_argument('--trainbp', action='store_true', default=False)\r\n parser.add_argument('--vis-weight', action='store_true', default=False)\r\n parser.add_argument('--top-weight', action='store_true', default=False)\r\n\r\n parser.add_argument('--visval', action='store_true', default=False)\r\n\r\n\r\n parser.add_argument('--single-predict', action='store_true', default=False)\r\n parser.add_argument('--create-dataset', action='store_true', default=False)\r\n parser.add_argument('--two-streams', action='store_true', default=False)\r\n parser.add_argument('--three-streams', action='store_true', default=False)\r\n\r\n parser.add_argument('--filter', action='store_true', default=False)\r\n\r\n parser.add_argument('--bert', action='store_true', default=False)\r\n parser.add_argument('--withclass', action='store_true', default=False)\r\n parser.add_argument('--default', action='store_true', default=False)\r\n parser.add_argument('--addposition', action='store_true', default=False)\r\n parser.add_argument('--oneDhot', action='store_true', default=False)\r\n parser.add_argument('--addorder', action='store_true', default=False)\r\n parser.add_argument('--addtoken', action='store_true', default=False)\r\n parser.add_argument('--addtriplet', action='store_true', default=False)\r\n parser.add_argument('--addtriplettoken', action='store_true', default=False)\r\n parser.add_argument('--addgestoken', action='store_true', default=False)\r\n parser.add_argument('--addrt', action='store_true', default=False)\r\n parser.add_argument('--addlongcontext', action='store_true', default=False)\r\n parser.add_argument('--tokenizedlongcontext', action='store_true', default=False)\r\n parser.add_argument('--ohlongcontext', action='store_true', default=False)\r\n parser.add_argument('--flattenohlongcontext', action='store_true', default=False)\r\n parser.add_argument('--addpostoken', action='store_true', default=False)\r\n parser.add_argument('--addrttoken', action='store_true', default=False)\r\n parser.add_argument('--balance', action='store_true', default=False)\r\n\r\n parser.add_argument('--l1', action='store_true', default=False)\r\n\r\n parser.add_argument('--fold', type=int, default=1, \r\n help='number of mutation')\r\n\r\n parser.add_argument('--output-mode', type=str, default='token',help='dataset')\r\n\r\n parser.add_argument('--rbm', action='store_true', default=False)\r\n\r\n parser.add_argument('--newtraining', action='store_true', default=False)\r\n parser.add_argument('--newpredict', action='store_true', default=False)\r\n parser.add_argument('--newpredict2', action='store_true', default=False)\r\n parser.add_argument('--normal', action='store_true', default=False)\r\n\r\n parser.add_argument('--freezeemb', action='store_true', default=False)\r\n\r\n parser.add_argument('--predictvis', action='store_true', default=False)\r\n\r\n parser.add_argument('--crossdata', action='store_true', default=False)\r\n\r\n parser.add_argument('--nummut', type=int, default=0,\r\n help='number of mutation')\r\n parser.add_argument('--frac', type=float, default=0,\r\n help='frac')\r\n\r\n parser.add_argument('--mutratio', type=str, default='',\r\n help='mutation ratio')\r\n\r\n parser.add_argument('--spectral', action='store_true', default=False)\r\n parser.add_argument('--finalpredict', action='store_true', default=False)\r\n\r\n parser.add_argument('--finalpredictnewdata', action='store_true', default=False)\r\n parser.add_argument('--single-pred-vcf', action='store_true', default=False)\r\n\r\n\r\n parser.add_argument('--vis-attention', action='store_true', default=False)\r\n\r\n\r\n #dmm_parser\r\n parser.add_argument('-v', '--verbose', type=int, help='Try to be more verbose')\r\n parser.add_argument('--mutation-coding', help='Mutation coding table (\"ref alt code\"/line) [{}]'.format(\\\r\n defaults['mutation_coding']), metavar='fn')\r\n parser.add_argument('--config', help='Read parameters from a JSON file')\r\n parser.add_argument('--data-config',\r\n help='Column specification for --input, --validation and --aux-data [{}]'.format(\\\r\n defaults['data_config']))\r\n parser.add_argument('--random-seed', default=None, type=int, metavar='seed')\r\n parser.add_argument('--tmp')\r\n \r\n parser.add_argument('-i', '--input', action='append', metavar='dir(s)',\r\n help='Either a directory with vcf/maf[.gz] files or a vcf/maf[.gz] file (-i may be given more than once)')\r\n parser.add_argument('-o', '--output', metavar='fn', help='Preprocessed mutation data')\r\n parser.add_argument('-r', '--reference', metavar='ref', help='Reference genome (fasta) [{}]'.format(\\\r\n defaults['reference']))\r\n parser.add_argument('-k', '--context', help='Sequence context length (power of 2) [{}]'.format(\\\r\n defaults['context']), metavar='bp', type=int,default=8)\r\n parser.add_argument('-e', '--errors', metavar='fn',\r\n help='File where to log errors [{}]'.format(defaults['errors']))\r\n parser.add_argument('--no-ref-preload', help='Use samtools to read reference on demand (slow but fast startup) [false]',\r\n action='store_true')\r\n parser.add_argument('--no-filter', help='Process all variants [default=only PASS/. variants]',\r\n action='store_true')\r\n parser.add_argument('--sample-id', help='Sample identifier column name in MAF file')\r\n parser.add_argument('-n', '--generate_negatives', help='Ratio of negative to positive examples [{}]. Two passes on data are required for n>0.'.format(\\\r\n defaults['negative_ratio']), type=float)\r\n parser.add_argument('--median-variant-type-negatives', action='store_true',\r\n help='Generate median number of each variant type as negative examples for each sample')\r\n parser.add_argument('--median-variant-type-file', help='Load median variant numbers from a file')\r\n parser.add_argument('--negative-generation-mode', help='[generate] output in one go (default), [augment] input files or [process] augmented files', default='generate')\r\n parser.add_argument('--info-column', help='Input column name to write toutputo output (MAF input only). May be specified more than once.', action='append')\r\n parser.add_argument('--report-interval', help='Interval to report number of variants processed',\r\n type=int)\r\n parser.add_argument('--array-jobs', help='How many array jobs in total', type=int)\r\n parser.add_argument('--array-index', help='Index of this job', type=int)\r\n parser.add_argument('--nope', help='Only one variant per output sequence', action='store_true')\r\n parser.add_argument('--no-overwrite', help='Do not overwrite if output exists', action='store_true')\r\n\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef get_dataloader(args,train_val,load):\r\n\r\n if args.dataset == 'finalpcawg' or args.dataset == 'wgspcawg':\r\n if train_val=='training':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='training', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir\r\n )\r\n\r\n elif train_val=='validation':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='validation', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n\r\n elif args.dataset == 'finaltcga' or args.dataset == 'westcga':\r\n if train_val=='training':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='training', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n\r\n elif train_val=='validation':\r\n dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset, \r\n data_dir=args.data_dir, \r\n mode='validation', \r\n curr_fold=args.fold, \r\n block_size=args.block_size, \r\n load=False,\r\n mutratio = args.mutratio,\r\n addtriplettoken=args.addtriplettoken,\r\n addpostoken=args.addpostoken,\r\n addgestoken=args.addgestoken,\r\n addrt=args.addrt,\r\n nummut = args.nummut,\r\n frac = args.frac,\r\n crossdata = args.crossdata,\r\n crossdatadir = args.crossdata_dir,\r\n adddatadir = args.adddata_dir)\r\n \r\n return dataloader_class\r\n\r\ndef get_model(args,mconf):\r\n if args.arch == 'GPTConv':\r\n model = GPTConv(mconf)\r\n elif args.arch == 'GPTConvDeeper':\r\n model = GPTConvDeeper(mconf)\r\n elif args.arch == 'GPTNonPosition':\r\n model = GPTNonPosition(mconf)\r\n elif args.arch == 'CTransformer':\r\n model = CTransformer(mconf)\r\n elif args.arch == 'ConvTransformer':\r\n model = ConvTransformer(mconf)\r\n elif args.arch == 'Conv2DTransformer':\r\n model = Conv2DTransform\r\n elif args.arch == 'Transformer2Stream':\r\n model = Transformer2Stream(mconf)\r\n elif args.arch == 'CTransformerDNN':\r\n model = CTransformerDNN(mconf)\r\n elif args.arch == 'CTransformerMutDist':\r\n model = CTransformerMutDist(mconf)\r\n elif args.arch == 'SimpleAttention':\r\n model = SimpleAttention(mconf)\r\n elif args.arch == 'BertForSequenceClassification':\r\n model = BertForSequenceClassification(mconf)\r\n elif args.arch == 'BertwithPosition':\r\n model = BertwithPosition(mconf)\r\n elif args.arch == 'CTransformerWithPaddingIDX':\r\n model = CTransformerWithPaddingIDX(mconf)\r\n elif args.arch == 'Conv2DTransformerOnehot':\r\n model = Conv2DTransformerOnehot(mconf)\r\n elif args.arch == 'CTransformerWithPaddingIDXandfirstvec':\r\n model = CTransformerWithPaddingIDXandfirstvec(mconf)\r\n elif args.arch == 'Conv2DTransformerOnehotDeeper':\r\n model = Conv2DTransformerOnehotDeeper(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeper':\r\n model = DNNTransformerOnehotDeeper(mconf)\r\n elif args.arch == 'CTransformerWithPosition':\r\n model = CTransformerWithPosition(mconf)\r\n elif args.arch == 'CTransformerWithPositionConcate':\r\n model = CTransformerWithPositionConcate(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeperwithPosition':\r\n model = DNNTransformerOnehotDeeperwithPosition(mconf)\r\n elif args.arch == 'DNNTransformerOnehotDeeperwithPositionwithOrder':\r\n model = DNNTransformerOnehotDeeperwithPositionwithOrder(mconf)\r\n elif args.arch == 'CTransformerDNNWithPositionConcateToken':\r\n model = CTransformerDNNWithPositionConcateToken(mconf)\r\n elif args.arch == 'CTransformerDNNWithPositionConcateTokenSep':\r\n model = CTransformerDNNWithPositionConcateTokenSep(mconf)\r\n elif args.arch == 'CTransformerRBMWithPositionConcate':\r\n model = CTransformerRBMWithPositionConcate(mconf)\r\n elif args.arch == 'TripletPositionTokenandOnehot':\r\n model = TripletPositionTokenandOnehot(mconf) \r\n elif args.arch == 'PositionToken':\r\n model = PositionToken(mconf) \r\n elif args.arch == 'TripletPositionTokenandOnehotConcAfter':\r\n model = TripletPositionTokenandOnehotConcAfter(mconf)\r\n elif args.arch == 'TripletPositionRTToken':\r\n model = TripletPositionRTToken(mconf)\r\n elif args.arch == 'FullConvTransformer':\r\n model = FullConvTransformer(mconf)\r\n elif args.arch == 'TripletPositionTokenBest':\r\n model = TripletPositionTokenBest(mconf)\r\n elif args.arch == 'TripletPositionTokenRT':\r\n model = TripletPositionTokenRT(mconf)\r\n elif args.arch == 'EmbFC':\r\n model = EmbFC(mconf) \r\n elif args.arch == 'TripletPositionTokenOldBest':\r\n model = TripletPositionTokenOldBest(mconf)\r\n elif args.arch == 'CTransformerPCAWGtoTCGA_TPGES':\r\n model = CTransformerPCAWGtoTCGA_TPGES(mconf)\r\n elif args.arch == 'CTransformerPCAWGtoTCGA_T':\r\n model = CTransformerPCAWGtoTCGA_T(mconf)\r\n elif args.arch == 'TripletPosition':\r\n model = TripletPosition(mconf) \r\n elif args.arch == 'TripletPositionGES':\r\n model = TripletPositionGES(mconf)\r\n elif args.arch == 'TripletPositionGESRT':\r\n model = TripletPositionGESRT (mconf) \r\n elif args.arch == 'TripletPositionF':\r\n model = TripletPositionF(mconf) \r\n elif args.arch == 'TripletPositionGESF':\r\n model = TripletPositionGESF(mconf)\r\n elif args.arch == 'CTransformerF':\r\n model = CTransformerF(mconf)\r\n elif args.arch == 'EmbFCPos':\r\n model = EmbFCPos(mconf)\r\n elif args.arch == 'EmbFCPosGES':\r\n model = EmbFCPosGES(mconf)\r\n\r\n return model\r\n\r\ndef fold_split(args):\r\n\r\n num_class = os.listdir(args.data_dir)\r\n class_name = [i for i in num_class if len(i.split('.'))==1]\r\n class_name = sorted(class_name)\r\n\r\n num_samples = []\r\n\r\n for i in class_name:\r\n ns = len(os.listdir(args.data_dir+i))\r\n num_samples.append(ns)\r\n\r\n d = {'class_name':class_name,'n_samples':num_samples}\r\n pd_class_info = pd.DataFrame(d)\r\n \r\n folds=10\r\n\r\n class_used = pd_class_info.loc[pd_class_info['n_samples']>=folds]\r\n class_used = class_used.rename_axis('class_index').reset_index()\r\n class_used.to_csv(args.data_dir + 'sample_info_' + args.dataset + '.csv', index=False)\r\n\r\n num_class=len(class_used)\r\n\r\n tuple_list = []\r\n\r\n for nm_class in class_used['class_name']:\r\n num_sample = class_used.loc[class_used['class_name']==nm_class]['n_samples'].values[0]\r\n class_idx = class_used.loc[class_used['class_name']==nm_class]['class_index'].values[0]\r\n samples = os.listdir(args.data_dir+nm_class)\r\n count_split = 0\r\n\r\n for i in range(0,num_sample):\r\n count_split = count_split+1\r\n if count_split > folds:\r\n count_split = 1\r\n\r\n tuple_onerow = tuple([nm_class,class_idx,samples[i],count_split])\r\n tuple_list.append(tuple_onerow)\r\n \r\n all_split = pd.DataFrame(tuple_list,columns = ['class_name','class_index','name_samples','split'])\r\n\r\n test_split = pd.DataFrame(columns = all_split.columns)\r\n train_split = pd.DataFrame(columns = all_split.columns)\r\n validation_split = pd.DataFrame(columns = all_split.columns)\r\n\r\n for i in range(1,folds):\r\n test = all_split.loc[all_split['split']==i]\r\n train = all_split.loc[all_split['split']!=i]\r\n split_min = i + 1\r\n if split_min >= folds:\r\n split_min = 1\r\n validation = train.loc[train['split']==split_min]\r\n train = train.loc[train['split']!=split_min]\r\n train['split'] = i\r\n validation['split'] = i\r\n\r\n test_split = test_split.append(test)\r\n validation_split = validation_split.append(validation)\r\n train_split = train_split.append(train)\r\n\r\n train_split.to_csv(args.data_dir + 'train_split.csv', index=False)\r\n validation_split.to_csv(args.data_dir + 'validation_split.csv', index=False)\r\n test_split.to_csv(args.data_dir + 'test_split.csv', index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n best_accuracy=0\r\n\r\n args = get_args()\r\n\r\n if args.train:\r\n\r\n #class_info = fold_split(args)\r\n\r\n block_size = args.block_size # spatial extent of the model for its context\r\n train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)\r\n\r\n if args.bert:\r\n if args.default:\r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class)\r\n else:\r\n if args.addposition:\r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type,position_size=train_dataset.position_size)\r\n else: \r\n mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{args.block_size:.0f}_nl{args.n_layer:.0f}_nh{args.n_head:.0f}_ne{args.n_emb:.0f}_cl{args.context_length:.0f}/\"\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=0.001,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n trainer.bert_train()\r\n\r\n if args.rbm:\r\n num_class=args.n_class\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.multi_stream_rbm(len(output_mode))\r\n else:\r\n trainer.basic_train()\r\n else:\r\n num_class=args.n_class\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.multi_stream(len(output_mode))\r\n else:\r\n trainer.basic_train()\r\n\r\n if args.newtraining:\r\n block_size = args.block_size # spatial extent of the model for its context\r\n train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)\r\n\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n \r\n if args.addpostoken:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size,rt_size = train_dataset.rt_size)\r\n\r\n if args.addgestoken:\r\n mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size, ges_size = train_dataset.ges_size,rt_size = train_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,\r\n num_workers=1,string_logs=string_logs, args=args)\r\n trainer = Trainer(model, train_dataset, validation_dataset, tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n trainer.dynamic_stream()\r\n\r\n \r\n if args.predict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.predict_multi_stream(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.newpredict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.visval:\r\n trainer.vis_embed()\r\n\r\n if args.crossdata:\r\n trainer.newpredict_dynamic_streamc(args.predictvis)\r\n else:\r\n trainer.newpredict_dynamic_stream(args.predictvis)\r\n\r\n if args.finalpredict:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n train_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n\r\n num_class=args.n_class\r\n \r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,\r\n position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.vis_attention:\r\n trainer = Trainer(model, None,[train_dataset, validation_dataset], tconf) \r\n\r\n trainer.visualize_attention(args.vis_attention)\r\n\r\n else: \r\n if args.visval: \r\n trainer.vis_embed()\r\n \r\n if args.predictvis:\r\n trainer = Trainer(model, None,[train_dataset,validation_dataset], tconf) \r\n\r\n trainer.finalpredict_dynamic_stream(args.predictvis,args.adddata_dir)\r\n\r\n if args.finalpredictnewdata:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n \r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,\r\n position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.vis_attention:\r\n trainer = Trainer(model, None,[validation_dataset], tconf) \r\n trainer.visualize_attention(args.vis_attention)\r\n\r\n else: \r\n if args.visval: \r\n trainer.vis_embed()\r\n \r\n if args.predictvis:\r\n trainer = Trainer(model, None,[validation_dataset], tconf) \r\n\r\n trainer.finalpredict_newdata(args.predictvis,args.adddata_dir)\r\n\r\n if args.newpredict2:\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size, rt_size = validation_dataset.rt_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset], tconf)\r\n\r\n if args.visval:\r\n trainer.vis_embed2()\r\n\r\n trainer.newpredict_dynamic_stream(args.predictvis)\r\n\r\n if args.single_predict:\r\n\r\n block_size = args.block_size\r\n num_class=args.n_class\r\n\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n\r\n test_dataset = SinglePrediction(data_dir = args.data_dir)\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n model = get_model(args,mconf)\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-4,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[test_dataset], tconf)\r\n\r\n trainer.single_predict()\r\n\r\n if args.vis_weight:\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.predict_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.top_weight:\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.topweight_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n if args.single_pred_vcf:\r\n\r\n args = translate_args(args)\r\n\r\n #cmd_preprocess(args)\r\n preprocessing_fromdmm(args)\r\n\r\n pdb.set_trace()\r\n\r\n class_info = fold_split(args)\r\n block_size = args.block_size # spatial extent of the model for its context\r\n\r\n training_dataset = get_dataloader(args=args,train_val='training',load=True)\r\n validation_dataset = get_dataloader(args=args,train_val='validation',load=True)\r\n test_dataset = get_dataloader(args=args,train_val='testing',load=True)\r\n\r\n num_class=args.n_class\r\n\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)\r\n\r\n if args.addposition:\r\n mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,\r\n n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)\r\n\r\n model = get_model(args,mconf)\r\n\r\n string_logs = f\"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/\"\r\n\r\n tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,\r\n lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,\r\n num_workers=20,string_logs=string_logs, args=args)\r\n\r\n trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)\r\n\r\n output_mode = args.output_mode.split(\"_\")\r\n\r\n if len(output_mode)>1:\r\n trainer.topweight_vis(len(output_mode))\r\n else:\r\n trainer.predict()\r\n\r\n\r\n\r\n\r\n\r\n ", "\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\nimport math\r\nfrom torch.utils.data import Dataset\r\nimport os\r\nimport pandas as pd\r\nimport pdb\r\nimport numpy as np\r\nimport math\r\nimport pickle\r\nimport random\r\nfrom sklearn.utils import shuffle\r\n\r\nclass TCGAPCAWG_Dataloader(Dataset):\r\n\r\n def __init__(self, dataset_name = None, \r\n data_dir=None, \r\n mode='training', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n addtriplettoken=False,\r\n addpostoken=False,\r\n addgestoken=False,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n crossdata=False,\r\n crossdatadir=None,\r\n pcawg2tgca_class=False,\r\n tcga2pcawg_class=False,\r\n mutratio = '1-0-0-0-0',\r\n adddatadir = None,\r\n input_filename=None,\r\n args = None,\r\n gx_dir=None,\r\n addepigen=False):\r\n\r\n self.dataset_name = dataset_name\r\n self.data_dir=data_dir\r\n self.mode=mode\r\n self.curr_fold=int(curr_fold)\r\n self.block_size=block_size\r\n self.load=load\r\n self.addtriplettoken=addtriplettoken\r\n self.addpostoken=addpostoken\r\n self.addrt=addrt\r\n self.nummut = nummut\r\n self.frac = frac\r\n self.addgestoken = addgestoken\r\n self.crossdata= crossdata\r\n self.crossdatadir = crossdatadir\r\n self.adddatadir = adddatadir\r\n self.args = args\r\n self.gx_dir = gx_dir\r\n self.mutratio = mutratio\r\n\r\n self.pcawg2tgca_class=pcawg2tgca_class\r\n self.tcga2pcawg_class=tcga2pcawg_class\r\n\r\n self.newformat = True\r\n self.newformat = False\r\n\r\n self.NiSi = False\r\n self.SNV = False\r\n self.indel = False\r\n self.SVMEI = False\r\n self.Normal = False\r\n\r\n self.dnn_input = 1\r\n\r\n if self.args == None:\r\n self.single_pred_vcf = False\r\n self.cwd = str(os.path.abspath('..')) + '/'\r\n else:\r\n self.single_pred_vcf = self.args.single_pred_vcf\r\n self.cwd = self.args.cwd\r\n\r\n\r\n self.input_filename = input_filename\r\n\r\n if self.nummut > 0 :\r\n self.block_size = self.nummut\r\n\r\n if self.dataset_name == 'pcawg':\r\n if self.args.multi_pred_vcf:\r\n fulltuple = []\r\n for idx in range(len(input_filename)):\r\n va = input_filename[idx]\r\n onetup = (va[:-4],'',1,1)\r\n #print(onetup)\r\n fulltuple.append(onetup)\r\n self.validation_fold = pd.DataFrame(fulltuple,columns =['samples', 'nm_class', 'slices','fold'])\r\n self.test_fold = self.validation_fold\r\n self.newformat = True\r\n \r\n if self.single_pred_vcf:\r\n self.onlyfilename = self.args.input_filename[:-4]\r\n onetup = [(self.onlyfilename,'',1,1)]\r\n self.validation_fold = pd.DataFrame(onetup,columns =['samples', 'nm_class', 'slices','fold'])\r\n self.test_fold = self.validation_fold\r\n \r\n self.newformat = True\r\n '''\r\n else:\r\n if self.newformat:\r\n self.training_fold = pd.read_csv('./dataset_utils/pcawg_train.csv',index_col=0)\r\n self.training_fold = self.training_fold.loc[self.training_fold['fold']==self.curr_fold]\r\n self.validation_fold = pd.read_csv('./dataset_utils/pcawg_val.csv',index_col=0)\r\n self.validation_fold = self.validation_fold.loc[self.validation_fold['fold']==self.curr_fold]\r\n else:\r\n self.training_fold = pd.read_csv('./oldformat/pcawg_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./oldformat/pcawg_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n '''\r\n elif self.dataset_name == 'tcga':\r\n self.training_fold = pd.read_csv('./dataset_utils/tcga_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/tcga_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n elif self.dataset_name == 'westcga':\r\n self.training_fold = pd.read_csv('./dataset_utils/tcgawes_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/tcgawes_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n elif self.dataset_name == 'wgspcawg':\r\n self.training_fold = pd.read_csv('./dataset_utils/pcawgwgs_trainfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n self.validation_fold = pd.read_csv('./dataset_utils/pcawgwgs_valfold' + str(self.curr_fold) + '.csv',index_col=0)\r\n\r\n if self.dataset_name == 'wgsgx':\r\n self.gx = pd.read_csv(self.gx_dir + 'PCAWG_gene_expression.tsv',sep='\\t',index_col=0)\r\n #self.gx = self.gx.iloc[:,-100:]\r\n\r\n self.training_fold = pd.read_csv(self.cwd + 'dataset_utils/wgsgx_train.csv',index_col=0)\r\n self.training_fold = self.training_fold.loc[self.training_fold['fold'] == self.curr_fold]\r\n\r\n self.validation_fold = pd.read_csv(self.cwd + 'dataset_utils/wgsgx_val.csv',index_col=0)\r\n self.validation_fold = self.validation_fold.loc[self.validation_fold['fold'] == self.curr_fold] \r\n self.newformat = True\r\n\r\n self.dnn_input = len(self.gx.iloc[0,:-2])\r\n #pdb.set_trace()\r\n\r\n if self.adddatadir is not None:\r\n adddata = pd.DataFrame(columns=self.validation_fold.columns)\r\n adddata.columns = self.validation_fold.columns\r\n\r\n folder = os.listdir(self.adddatadir)\r\n\r\n for i in folder:\r\n\r\n samples = os.listdir(self.adddatadir + i )\r\n for j in samples:\r\n if j[0:3] == 'new':\r\n counter = pd.read_csv(self.adddatadir + i + '/count_new_' + j[4:],index_col=0)\r\n\r\n listall = [i,j[4:]] + counter['0'].values.tolist() + [1]\r\n\r\n pds = pd.DataFrame(listall)\r\n pds = pds.T\r\n pds.columns=self.validation_fold.columns\r\n\r\n adddata = adddata.append(pds)\r\n\r\n adddata = adddata.reset_index(drop=True)\r\n\r\n self.adddata = adddata\r\n\r\n #self.validation_fold = self.validation_fold.append(self.adddata)\r\n self.validation_fold = self.adddata\r\n self.data_dir = self.adddatadir\r\n\r\n if self.single_pred_vcf:\r\n samples_names = input_filename[:-4]\r\n pd_count = pd.read_csv(args.tmp_dir + 'count_' + input_filename[:-4] + '.csv', index_col=0)['0'].to_list()\r\n onerow = ['',samples_names] + pd_count + [1]\r\n pd_data = pd.DataFrame(onerow).T\r\n pd_data.columns = ['nm_class','samples','NiSi','SNV','indel','SVMEI','Normal','fold']\r\n self.validation_fold = pd_data\r\n self.test_fold = self.validation_fold\r\n\r\n self.load_classinfo()\r\n\r\n self.vocab_mutation = pd.read_csv(self.cwd + 'extfile/dictMutation.csv',index_col=0)\r\n self.allSNV_index = 0\r\n\r\n if self.mutratio is not None:\r\n self.mutratio = mutratio.split('-')\r\n self.mutratio = [float(i) for i in self.mutratio]\r\n \r\n if self.mutratio[0]>0:\r\n self.NiSi = True \r\n if self.mutratio[1]>0:\r\n self.SNV = True\r\n if self.mutratio[2]>0:\r\n self.indel = True\r\n if self.mutratio[3]>0:\r\n self.SVMEI = True\r\n if self.mutratio[4]>0:\r\n self.Normal = True\r\n\r\n vocabsize = 0\r\n if self.NiSi:\r\n vocabsize = len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='NiSi'])\r\n if self.SNV:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='SNV'])\r\n if self.indel:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='indel']) \r\n if self.SVMEI:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ'].isin(['MEI','SV'])])\r\n if self.Normal:\r\n vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='Normal'])\r\n\r\n self.vocab_size = vocabsize + 1\r\n #print(self.vocab_size)\r\n\r\n #pdb.set_trace()\r\n\r\n self.pd_position_vocab = pd.read_csv(self.cwd + 'extfile/dictChpos.csv',index_col=0)\r\n self.pd_ges_vocab = pd.read_csv(self.cwd + 'extfile/dictGES.csv',index_col=0)\r\n\r\n self.position_size = len(self.pd_position_vocab) + 1\r\n self.ges_size = len(self.pd_ges_vocab) + 1\r\n \r\n self.rt_size = 1\r\n\r\n self.midstring = '.' + self.dataset_name + str(mutratio) + str(int(self.addtriplettoken)) + str(int(self.addpostoken)) + str(int(self.addgestoken)) + str(int(self.addrt)) + '/' \r\n \r\n if self.mode == 'validation' or self.mode == 'testing':\r\n if self.crossdata:\r\n os.makedirs(self.crossdatadir + self.midstring, exist_ok=True)\r\n self.data_dir = self.crossdatadir\r\n #pdb.set_trace()\r\n \r\n else:\r\n os.makedirs(self.data_dir + self.midstring, exist_ok=True)\r\n\r\n def load_classinfo(self):\r\n if self.dataset_name == 'pcawg':\r\n pd_data = pd.read_csv(self.cwd + 'dataset_utils/classinfo_pcawg.csv',index_col = 0)\r\n self.pd_class_info = pd.DataFrame(pd_data)\r\n elif self.dataset_name == 'wgsgx':\r\n pd_data = pd.read_csv(self.cwd + 'dataset_utils/classinfo_wgsgx.csv',index_col = 0)\r\n self.pd_class_info = pd.DataFrame(pd_data)\r\n else:\r\n num_class = os.listdir(self.data_dir)\r\n name_class = [i for i in num_class if len(i.split('.'))==1]\r\n name_class = sorted(name_class)\r\n n_samples = []\r\n for idx,nm_class in enumerate(name_class):\r\n samples = os.listdir(self.data_dir+nm_class)\r\n samples = [x for x in samples if x[:10]=='count_new_']\r\n n_samples.append(len(samples))\r\n data = list(zip(name_class, np.arange(len(name_class)),n_samples)) \r\n self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])\r\n\r\n def get_data(self,idx):\r\n \r\n if self.mode=='training':\r\n instances=self.training_fold.iloc[idx] \r\n elif self.mode=='validation':\r\n instances=self.validation_fold.iloc[idx]\r\n elif self.mode == 'testing':\r\n instances=self.test_fold.iloc[idx]\r\n\r\n if self.newformat:\r\n samples = instances['samples'] + '.csv'\r\n target_name = instances['nm_class']\r\n\r\n if self.single_pred_vcf:\r\n pd_row = pd.read_csv(self.data_dir +'/count_' + samples,index_col=0).T\r\n row_count = pd_row.values[0]\r\n else:\r\n pd_row = pd.read_csv(self.data_dir + target_name +'/count_' + samples,index_col=0).T\r\n row_count = pd_row.values[0]\r\n \r\n else:\r\n target_name = instances['nm_class']\r\n samples = instances[1]\r\n row_count = instances[['NiSi','SNV','indel','SVMEI','Normal']].to_numpy()\r\n\r\n if self.mutratio is not None:\r\n avail_count = np.asarray(self.mutratio) * self.block_size \r\n \r\n diff = avail_count - row_count\r\n pos = diff>0\r\n avail_count1 = row_count * pos\r\n diff = row_count > avail_count\r\n\r\n avail_count2 = avail_count * diff\r\n avail_count3 = avail_count1 + avail_count2\r\n shadowavail_count3 = avail_count3\r\n shadowavail_count3[0] = row_count[0]\r\n\r\n if sum(shadowavail_count3) > self.block_size:\r\n diff = self.block_size - sum(avail_count3) \r\n shadowavail_count3[0] = diff + avail_count3[0]\r\n \r\n avail_count2 = shadowavail_count3.astype(int)\r\n\r\n if avail_count2[0]<0:\r\n \r\n secondmax = avail_count2[np.argmax(avail_count2)]\r\n avail_count2 = avail_count2 * 0.7\r\n\r\n avail_count = avail_count2\r\n\r\n diff = avail_count - row_count\r\n pos = diff>0\r\n avail_count1 = row_count * pos\r\n diff = row_count > avail_count\r\n\r\n avail_count2 = avail_count * diff\r\n avail_count3 = avail_count1 + avail_count2\r\n shadowavail_count3 = avail_count3\r\n shadowavail_count3[0] = row_count[0]\r\n\r\n if sum(shadowavail_count3) > self.block_size:\r\n diff = self.block_size - sum(avail_count3) \r\n shadowavail_count3[0] = diff + avail_count3[0]\r\n \r\n avail_count2 = shadowavail_count3.astype(int)\r\n\r\n avail_count = avail_count2\r\n\r\n \r\n def grab(pd_input,grabcol):\r\n return pd_input[grabcol]\r\n\r\n def allgrab(grabcol):\r\n \r\n\r\n if self.NiSi:\r\n #pdb.set_trace()\r\n if self.newformat:\r\n pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_' + samples,index_col=0)\r\n else:\r\n pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'NiSi_new_' + samples,index_col=0)\r\n pd_nisi = pd_nisi.sample(n = avail_count[0], replace = False)\r\n pd_nisi = grab(pd_nisi,grabcol)\r\n\r\n if self.SNV:\r\n if self.newformat:\r\n pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'MNV_' + samples,index_col=0)\r\n else:\r\n pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_new_' + samples,index_col=0)\r\n pd_SNV = pd_SNV.sample(n = avail_count[1], replace = False)\r\n pd_SNV = grab(pd_SNV,grabcol)\r\n pd_nisi = pd_nisi.append(pd_SNV)\r\n\r\n if self.indel:\r\n pd_indel = pd.read_csv(self.data_dir + target_name + '/' + 'indel_' + samples,index_col=0)\r\n pd_indel = pd_indel.sample(n = avail_count[2], replace = False)\r\n pd_indel = grab(pd_indel,grabcol)\r\n pd_nisi = pd_nisi.append(pd_indel)\r\n \r\n if self.SVMEI:\r\n if self.newformat:\r\n pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_' + samples,index_col=0)\r\n else:\r\n pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_new_' + samples,index_col=0)\r\n pd_meisv = pd_meisv.sample(n = avail_count[3], replace = False)\r\n pd_meisv = grab(pd_meisv,grabcol)\r\n pd_nisi = pd_nisi.append(pd_meisv)\r\n\r\n if self.Normal:\r\n if self.newformat:\r\n pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Neg_' + samples,index_col=0)\r\n else:\r\n pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Normal_new_' + samples,index_col=0)\r\n pd_normal = pd_normal.sample(n = avail_count[4], replace = False)\r\n pd_normal = grab(pd_normal,grabcol)\r\n pd_nisi = pd_nisi.append(pd_normal) \r\n\r\n pd_nisi = pd_nisi.fillna(0)\r\n return pd_nisi\r\n\r\n pd_nisi = pd.DataFrame()\r\n if self.addtriplettoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken'])\r\n pd_nisi = pd_nisi.dropna()\r\n pd_nisi.to_csv(filename) \r\n \r\n else:\r\n pd_nisi = allgrab(['triplettoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n if self.addpostoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n else:\r\n #pdb.set_trace()\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n pdb.set_trace()\r\n pd_nisi.to_csv(filename)\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken'])\r\n pd_nisi.to_csv(filename)\r\n \r\n if self.addgestoken:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken'])\r\n pd_nisi.to_csv(filename)\r\n\r\n if self.addrt:\r\n if self.mode=='training' :\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n else:\r\n filename = self.data_dir + self.midstring + 'val_' + samples\r\n if os.path.isfile(filename):\r\n try:\r\n pd_nisi = pd.read_csv(filename,index_col=0)\r\n except:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n pd_nisi.to_csv(filename)\r\n\r\n else:\r\n pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])\r\n pd_nisi.to_csv(filename)\r\n\r\n #pdb.set_trace()\r\n pd_nisi = pd_nisi.dropna()\r\n \r\n if self.nummut > 0:\r\n if self.nummut < len(pd_nisi):\r\n pd_nisi = pd_nisi.sample(n = self.nummut, replace = False)\r\n else:\r\n pd_nisi = pd_nisi.sample(n = len(pd_nisi), replace = False)\r\n \r\n #pdb.set_trace()\r\n\r\n if self.frac > 0:\r\n pd_nisi = pd_nisi.sample(frac = self.frac)\r\n\r\n if self.mode =='training':\r\n pd_nisi = pd_nisi.sample(frac = 1)\r\n\r\n #pdb.set_trace()\r\n\r\n np_triplettoken = pd_nisi.to_numpy() \r\n\r\n is_padding = False\r\n if len(pd_nisi) < self.block_size:\r\n mins = self.block_size - len(np_triplettoken)\r\n is_padding = True\r\n \r\n datanumeric = []\r\n #pdb.set_trace()\r\n for i in pd_nisi.columns:\r\n np_data = pd_nisi[i].to_numpy() \r\n if is_padding:\r\n np_data = np.copy(np.pad(np_data, ((0, mins)), mode='constant', constant_values=0))\r\n \r\n if i == 'rt':\r\n tensordata = torch.tensor(np.round(np_data, 1), dtype=torch.half)\r\n #tensordata = np.round(np_data, 3)\r\n\r\n if len(np_data) > self.block_size:\r\n np_data = np.asarray(np_data[:self.block_size],dtype=int)\r\n tensordata = torch.tensor(np_data, dtype=torch.long)\r\n else:\r\n np_data = np.asarray(np_data,dtype=int)\r\n tensordata = torch.tensor(np_data, dtype=torch.long)\r\n datanumeric.append(tensordata)\r\n \r\n datastring = samples\r\n\r\n if self.dataset_name=='wgsgx':\r\n #pdb.set_trace()\r\n gx_data = self.gx.loc[self.gx['samples']==samples[:-4]]\r\n gx_data = gx_data.iloc[:,:-2].values\r\n tensorgx_data = torch.tensor(gx_data, dtype=torch.float)\r\n\r\n datanumeric.append(tensorgx_data)\r\n\r\n #print(datanumeric)\r\n data=[datastring,datanumeric]\r\n #pdb.set_trace()\r\n\r\n if target_name != '':\r\n if self.crossdata:\r\n #pdb.set_trace()\r\n target = self.pd_class_infoto.loc[self.pd_class_infoto['class_name']==target_name].class_index.values[0]\r\n else:\r\n target = self.pd_class_info.loc[self.pd_class_info['class_name']==target_name].class_index.values[0]\r\n target = target.astype(np.int16)\r\n target = torch.tensor(target, dtype=torch.long)\r\n else:\r\n target = ''\r\n\r\n\r\n if self.adddatadir is not None:\r\n return data,[target,target_name]\r\n else: \r\n return data,target\r\n\r\n def __len__(self):\r\n\r\n if self.mode=='training':\r\n return len(self.training_fold)\r\n elif self.mode=='validation':\r\n return len(self.validation_fold)\r\n elif self.mode=='testing':\r\n return len(self.test_fold)\r\n\r\n def __getitem__(self, idx): \r\n\r\n data,target = self.get_data(idx)\r\n\r\n return data, target\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n #dataloader = PCAWG(dataset_name = 'PCAWG', data_dir='/csc/epitkane/projects/PCAWG/shuffled_samples/', mode='training',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True)\r\n\r\n #dataloader = PCAWG(dataset_name = 'pcawg_mut3_comb0', data_dir='/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/all24classes/', mode='training',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=3,addposition=False,filter=False,topk=5000)\r\n #dataloaderVal = PCAWG(dataset_name = 'pcawg_mut3_comb0', data_dir='/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/all24classes/', mode='validation',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=3,addposition=False,filter=False,topk=5000)\r\n #/csc/epitkane/projects/tcga/new23classes/\r\n #/csc/epitkane/projects/PCAWG20191001/data/modified_data/train/new24classes/\r\n\r\n #G:/experiment/data/new24classes/\r\n '''\r\n dataloaderVal = FinalTCGAPCAWG(dataset_name = 'finalpcawg', \r\n data_dir='G:/experiment/data/new24classes/', \r\n mode='validation', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n mutratio = '0.3-0.3-0.3-0-0',\r\n addtriplettoken=False,\r\n addpostoken=False,\r\n addgestoken=True,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n adddatadir='G:/experiment/data/icgc/')\r\n\r\n #pdb.set_trace()\r\n data,target = dataloaderVal.__getitem__(0)\r\n pdb.set_trace()\r\n\r\n for k in range(0,len(dataloaderVal)):\r\n print(k)\r\n data,target = dataloaderVal.__getitem__(k)\r\n '''\r\n\r\n\r\n\r\n '''\r\n WGS GX\r\n '''\r\n\r\n #/scratch/project_2001668/data/pcawg\r\n\r\n dataloaderVal = TCGAPCAWG_Dataloader(dataset_name = 'wgsgx', \r\n data_dir='/scratch/project_2001668/data/pcawg/allclasses/newformat/', \r\n mode='training', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n addtriplettoken=True,\r\n addpostoken=False,\r\n addgestoken=False,\r\n addrt=False,\r\n nummut = 0,\r\n frac = 0,\r\n mutratio = '1-0-0-0-0',\r\n adddatadir = None,\r\n input_filename=None,\r\n args = None,\r\n gx_dir = '/scratch/project_2001668/data/pcawg/PCAWG_geneexp/')\r\n \r\n data,target = dataloaderVal.__getitem__(0)\r\n pdb.set_trace()\r\n\r\n '''\r\n fold = [1,2,3,4,5,6,7,8,9,10]\r\n mutratios = ['1-0-0-0-0','0.5-0.5-0-0-0','0.4-0.3-0.3-0-0','0.3-0.3-0.20-0.20-0','0.25-0.25-0.25-0.15-0.1']\r\n\r\n retrieve = ['addtriplettoken','addpostoken','addgestoken','addrt']\r\n\r\n for fo in fold:\r\n for i in retrieve:\r\n if i == 'addtriplettoken':\r\n addtriplettoken = True\r\n else:\r\n addtriplettoken = False\r\n \r\n if i == 'addpostoken':\r\n addpostoken = True\r\n else:\r\n addpostoken = False\r\n\r\n if i == 'addgestoken':\r\n addgestoken = True\r\n else:\r\n addgestoken = False\r\n\r\n if i == 'addrt':\r\n addrt = True\r\n else:\r\n addrt = False\r\n\r\n for j in mutratios:\r\n dataloaderVal = FinalTCGAPCAWG(dataset_name = 'finalpcawg', \r\n data_dir='G:/experiment/data/new24classes/', \r\n mode='validation', \r\n curr_fold=1, \r\n block_size=5000, \r\n load=False,\r\n mutratio = j,\r\n addtriplettoken=addtriplettoken,\r\n addpostoken=addpostoken,\r\n addgestoken=addgestoken,\r\n addrt=addrt,\r\n nummut = 0,\r\n frac = 0)\r\n for k in range(0,len(dataloaderVal)):\r\n print(str(fo) + ' ' + str(k) + ' ' + i + ' ' + j + ' ' + str(addtriplettoken) + str(addpostoken) + str(addgestoken) + str(addrt))\r\n data,target = dataloaderVal.__getitem__(k)\r\n pdb.set_trace()\r\n\r\n dataloaderVal = TCGA(dataset_name = 'tcga_emb', data_dir='/csc/epitkane/projects/tcga/all23classes/', mode='validation',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=64,addposition=True,filter=True,block_size=300,withclass=True,twostream=False)\r\n\r\n for i in range(len(dataloaderVal)):\r\n data,target = dataloaderVal.__getitem__(i)\r\n\r\n dataloaderVal = TCGA(dataset_name = 'tcga_emb', data_dir='/csc/epitkane/projects/tcga/all23classes/', mode='testing',portion = [8,1,1], folds=10, curr_fold=1,load=True,load_token=True,ncontext=64,addposition=True,filter=True,block_size=300,loaddist=False,withclass=True,twostream=False)\r\n\r\n for i in range(len(dataloaderVal)):\r\n data,target = dataloaderVal.__getitem__(i)\r\n \r\n pdb.set_trace()\r\n '''\r\n\r\n " ]
[ [ "pandas.DataFrame" ], [ "pandas.read_csv", "pandas.DataFrame", "torch.tensor", "numpy.asarray", "numpy.argmax", "numpy.round", "numpy.pad" ] ]
Bartosz-D3V/ml-dataset-analysis
[ "cb2458dcb7cecba01f52be5b12e816ca00ce7da4" ]
[ "bike-sharing-demand/one_hot_encoder_transformer.py" ]
[ "import pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass OneHotEncoderTransformer(BaseEstimator, TransformerMixin):\n\n def __init__(self, columns) -> None:\n self.columns = columns\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n X = pd.get_dummies(X, columns=self.columns)\n return X\n" ]
[ [ "pandas.get_dummies" ] ]
payoto/graphcore_examples
[ "46d2b7687b829778369fc6328170a7b14761e5c6", "46d2b7687b829778369fc6328170a7b14761e5c6" ]
[ "applications/tensorflow/detection/yolov3/log.py", "applications/pytorch/miniDALL-E/train.py" ]
[ "# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nLogging utilities.\n\"\"\"\n\nimport csv\nimport datetime\nimport json\nimport logging\nimport os\nimport random\nimport subprocess\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import pywrap_tensorflow\n\n# Set Python logger\n# Match TensorFlow's default logging format.\nlogFormatter = logging.Formatter(\n '%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\n\n\ndef get_logger():\n return logger\n\n\ndef set_log_file_path(log_file_path):\n global logger\n fileHandler = logging.FileHandler(log_file_path)\n fileHandler.setFormatter(logFormatter)\n logger.addHandler(fileHandler)\n\n\ndef add_arguments(parser):\n group = parser.add_argument_group('Logging')\n group.add_argument('--log-dir', type=str, default=\"./logs/\",\n help=\"Log and weights save directory\")\n group.add_argument('--name-suffix', type=str,\n help=\"Suffix added to name string\")\n group.add_argument('--steps-per-logs', type=int, default=1,\n help=\"Logs per epoch (if number of epochs specified)\")\n group.add_argument('--steps-per-tensorboard', type=int, default=0,\n help='Number of steps between saving statistics to TensorBoard. 0 to disable.')\n return parser\n\n\ndef set_defaults(opts):\n name = opts['name']\n\n if opts[\"name_suffix\"]:\n name = name + \"_\" + opts[\"name_suffix\"]\n\n if opts.get(\"poplar_version\"):\n v = opts['poplar_version']\n # name += \"_v\" + v[v.find(\"version \") + 8: v.rfind(' ')]\n name += \"_v\" + v[v.find(\"version \") + 8: v.find(' (')]\n\n # We want this to be random even if random seeds have been set so that we don't overwrite\n # when re-running with the same seed\n random_state = random.getstate()\n random.seed()\n random.setstate(random_state)\n\n # System time with milliseconds\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]\n name += \"_{}\".format(time)\n\n if not os.path.isdir(opts[\"save_path\"]):\n os.makedirs(opts[\"save_path\"], exist_ok=True)\n\n opts[\"logs_path\"] = os.path.join(opts[\"save_path\"], name)\n opts[\"checkpoint_path\"] = os.path.join(opts[\"save_path\"], name, 'ckpt')\n\n if not os.path.isdir(opts[\"logs_path\"]):\n os.makedirs(opts[\"logs_path\"], exist_ok=True)\n\n set_log_file_path(os.path.join(opts['logs_path'], 'log.txt'))\n\n with open(os.path.join(opts[\"logs_path\"], 'arguments.json'), 'w') as fp:\n json.dump(opts, fp, sort_keys=True, indent=4, separators=(',', ': '))\n return opts\n\n\ndef write_to_csv(d, write_header, training, logs_path):\n if logs_path:\n filename = 'training.csv' if training else 'validation.csv'\n with open(os.path.join(logs_path, filename), 'a+') as f:\n w = csv.DictWriter(f, d.keys())\n if write_header:\n w.writeheader()\n w.writerow(d)\n\n\ndef print_trainable_variables(logs_path):\n logger.info('Trainable Variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n logger.info(variable)\n variable_parameters = 1\n for DIM in variable.get_shape():\n variable_parameters *= DIM.value\n total_parameters += variable_parameters\n logger.info('Total Parameters:' + str(total_parameters) + '\\n')\n\n\ndef make_histogram(values, bins=512):\n # From https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\n # License: BSD License 2.0\n # Author Michael Gygli\n\n # Logs the histogram of a list/vector of values.\n # Convert to a numpy array\n values = np.array(values)\n\n # Create histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill fields of histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values**2))\n\n # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]\n # See https://github.com/tensorflow/tensorflow/blob/r2.6/tensorflow/core/framework/summary.proto#L30\n # Thus, we drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n return hist\n # return tf.Summary.Value(tag=tag, histo=hist)\n\n\ndef save_model_statistics(checkpoint_path, summary_writer, step=0):\n initializers = load_initializers_from_checkpoint(checkpoint_path)\n summary = tf.Summary()\n for name, np_weight in initializers.items():\n name = name.replace(\":\", \"_\")\n tensor = np_weight.astype(np.float32)\n if not np.any(np.isnan(tensor)):\n summary.value.add(tag=name, histo=make_histogram(tensor))\n summary.value.add(tag=f\"L2/{name}\", simple_value=np.linalg.norm(tensor))\n\n summary_writer.add_summary(summary, step)\n summary_writer.flush()\n\n\ndef load_initializers_from_checkpoint(checkpoint_path):\n initializers = {}\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\n var_to_map = reader.get_variable_to_shape_map()\n for key, dim in var_to_map.items():\n if key == 'global_step':\n continue\n # if reader.get_tensor(key).dtype.name == 'float16':\n # int_data = np.asarray(reader.get_tensor(key), np.int32)\n # np_weight = int_data.view(dtype=np.float16).reshape(dim)\n # else:\n np_weight = reader.get_tensor(key)\n initializers[key] = np_weight\n return initializers\n\n\ndef get_git_revision():\n return subprocess.check_output([\"git\", \"describe\", \"--always\", \"--dirty\"]).strip().decode()\n", "# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n# Copyright (c) 2021 lucidrains\n\n# This file has been modified by Graphcore\n\n\nimport argparse\nfrom pathlib import Path\nimport datetime\nimport time\nfrom glob import glob\nimport os\nimport shutil\nfrom log import Logger\nimport torch\nimport poptorch\nimport popart\nimport wandb # Quit early if user doesn't have wandb installed.\nfrom poptorch.optim import Adam, AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom models.dalle import default\nfrom models import VQGanVAE, WrappedDALLE\nfrom models.loader import TextImageDataset\nfrom models.tokenizer import SimpleTokenizer, YttmTokenizer\nfrom args import parse_args\n\n\n# helpers\n\n\ndef exists(val):\n return val is not None\n\n\ndef get_trainable_params(model, weight_decay=0):\n # Do not apply weight_decay for one-dimensional parameters\n regularized_params = []\n non_regularized_params = []\n for param in model.parameters():\n if param.requires_grad:\n if len(param.shape) == 1:\n non_regularized_params.append(param)\n else:\n regularized_params.append(param)\n\n params = [\n {\"params\": regularized_params, \"weight_decay\": weight_decay},\n {\"params\": non_regularized_params, \"weight_decay\": 0}\n ]\n\n return params\n\n\ndef cp_path_to_dir(cp_path, tag):\n \"\"\"Convert a checkpoint path to a directory with `tag` inserted.\n If `cp_path` is already a directory, return it unchanged.\n \"\"\"\n if not isinstance(cp_path, Path):\n cp_path = Path(cp_path)\n if cp_path.is_dir():\n return cp_path\n path_sans_extension = cp_path.parent / cp_path.stem\n cp_dir = Path(f'{path_sans_extension}-{tag}-cp')\n return cp_dir\n\n\ndef main(args):\n if not args.synthetic_data:\n assert Path(args.input_folder).exists(), f'The path {args.input_folder} was not found.'\n\n abs_pathd = os.path.abspath(args.checkpoint_output_dir)\n os.makedirs(abs_pathd, exist_ok=True)\n log = Logger(abs_pathd+\"/\"+datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')+'.log',\n level='INFO')\n\n # tokenizer\n\n if exists(args.bpe_path):\n klass = YttmTokenizer\n tokenizer = klass(args.bpe_path)\n else:\n tokenizer = SimpleTokenizer()\n\n # reconstitute vae\n if exists(args.pretrained_checkpoint):\n dalle_path = Path(args.pretrained_checkpoint)\n\n assert dalle_path.exists(), 'DALL-E model file does not exist'\n loaded_obj = torch.load(str(dalle_path), map_location='cpu')\n\n dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']\n opt_state = loaded_obj.get('opt_state')\n scheduler_state = loaded_obj.get('scheduler_state')\n\n vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)\n\n dalle_params = dict(\n **dalle_params\n )\n resume_epoch = loaded_obj.get('epoch', 0)\n else:\n print('using pretrained VAE for encoding images to tokens')\n vae_params = None\n\n vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)\n\n dalle_params = dict(\n num_text_tokens=tokenizer.vocab_size,\n text_seq_len=args.text_seq_len,\n dim=args.hidden_size,\n depth=args.num_hidden_layers,\n heads=args.num_attention_heads,\n dim_head=args.dim_head,\n loss_img_weight=args.loss_img_weight,\n attn_types=tuple(args.attn_types.split(',')),\n ff_dropout=args.ff_dropout,\n attn_dropout=args.attn_dropout,\n sandwich_norm=args.sandwich_norm,\n embedding_ipu_id=args.embedding_ipu_id,\n embedding_serialization_factor=args.embedding_serialization_factor,\n layers_per_ipu=args.layers_per_ipu,\n cls_ipu_id=args.cls_ipu_id,\n fp16=args.fp16\n )\n resume_epoch = 0\n\n\n # create dataset and dataloader\n\n ds = TextImageDataset(\n args.input_folder,\n text_len=args.text_seq_len,\n image_size=vae.image_size,\n resize_ratio=1.0,\n truncate_captions=args.truncate_captions,\n tokenizer=tokenizer,\n shuffle=True,\n synthetic=args.synthetic_data,\n fp16=args.fp16\n )\n\n assert len(ds) > 0, 'dataset is empty'\n print(f'{len(ds)} image-text pairs found for training')\n\n\n opts = poptorch.Options()\n opts.autoRoundNumIPUs(True)\n opts.deviceIterations(args.batches_per_step)\n opts.replicationFactor(args.replication_factor)\n opts.Training.gradientAccumulation(args.gradient_accumulation)\n opts.Training.accumulationAndReplicationReductionType(poptorch.ReductionType.Mean)\n opts.Precision.enableStochasticRounding(args.stochastic_rounding)\n opts.anchorMode(poptorch.AnchorMode.Final)\n opts.TensorLocations.setOptimizerLocation(\n poptorch.TensorLocationSettings().useOnChipStorage(True))\n\n if args.enable_rts:\n opts.TensorLocations.setOptimizerLocation(\n poptorch.TensorLocationSettings().useReplicatedTensorSharding(True).minElementsForReplicatedTensorSharding(args.replication_factor))\n\n opts.randomSeed(args.random_seed)\n opts.setExecutionStrategy(\n poptorch.PipelinedExecution(poptorch.AutoStage.AutoIncrement))\n\n mem_prop = {\n f'IPU{i}': args.matmul_proportion[i]\n for i in range(args.ipus_per_replica)\n }\n opts.setAvailableMemoryProportion(mem_prop)\n\n # PopART options\n opts._Popart.set(\"disableGradAccumulationTensorStreams\", True)\n opts._Popart.set(\"outlineThreshold\", 10.0)\n\n if args.enable_half_partials:\n opts.Precision.setPartialsType(torch.float16)\n else:\n opts.Precision.setPartialsType(torch.float32)\n\n dl = poptorch.DataLoader(options=opts, dataset=ds, batch_size=args.batch_size, num_workers=args.dataloader_workers,\n persistent_workers=True, shuffle=True, drop_last=True, sampler=None)\n steps_per_epoch = len(dl)\n\n # initialize DALL-E\n\n dalle = WrappedDALLE(vae=vae, **dalle_params)\n\n # if using fp16:\n if args.fp16:\n dalle = dalle.half()\n\n if exists(args.pretrained_checkpoint):\n dalle.load_state_dict(weights)\n\n # optimizer\n first_order_type = torch.float16 if args.enable_half_first_order_momentum else torch.float32\n accum_type = torch.float16 if args.fp16 else torch.float32\n if args.optimizer == \"Adam\":\n opt = Adam(get_trainable_params(dalle, args.weight_decay), lr=args.learning_rate, eps=1e-6, loss_scaling=args.loss_scaling,\n accum_type=accum_type, first_order_momentum_accum_type=first_order_type, second_order_momentum_accum_type=torch.float32)\n elif args.optimizer == \"AdamW\":\n opt = AdamW(get_trainable_params(dalle, args.weight_decay), lr=args.learning_rate, eps=1e-6, loss_scaling=args.loss_scaling,\n accum_type=accum_type, first_order_momentum_accum_type=first_order_type, second_order_momentum_accum_type=torch.float32)\n else:\n raise ValueError(\"Unknown Optimizer:\", args.optimizer)\n if exists(args.pretrained_checkpoint) and opt_state:\n opt.load_state_dict(opt_state)\n poptorch_dalle = poptorch.trainingModel(dalle,\n options=opts,\n optimizer=opt)\n if args.lr_decay:\n scheduler = ReduceLROnPlateau(\n opt,\n mode=\"min\",\n factor=0.5,\n patience=10,\n cooldown=10,\n min_lr=1e-6,\n verbose=True,\n )\n if exists(args.pretrained_checkpoint) and scheduler_state:\n scheduler.load_state_dict(scheduler_state)\n else:\n scheduler = None\n\n # experiment tracker\n\n model_config = dict(\n depth=args.num_hidden_layers,\n heads=args.num_attention_heads,\n dim_head=args.dim_head\n )\n\n if args.wandb:\n run = wandb.init(\n project=args.wandb_project_name,\n entity=None,\n resume=False,\n config=model_config,\n settings=wandb.Settings(console='off')\n )\n\n\n def save_model(path, epoch=0):\n if not path:\n return\n\n save_obj = {\n 'hparams': dalle_params,\n 'vae_params': vae_params,\n 'epoch': epoch,\n }\n\n save_obj = {\n **save_obj,\n 'weights': dalle.state_dict(),\n 'opt_state': opt.state_dict(),\n }\n save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)\n filename = f\"dalle_{epoch}.pt\"\n save_path = os.path.join(path, filename)\n torch.save(save_obj, save_path)\n\n # Compile model\n log.logger.info(\"---------- Compilation Started ---------\")\n start_compile = time.perf_counter()\n text, images = next(iter(dl))\n poptorch_dalle.compile(text, images)\n duration_compilation = time.perf_counter() - start_compile\n log.logger.info(f\"Compiled model in {duration_compilation} secs\")\n log.logger.info(\"---------------------------------------\")\n\n # Training loop\n log.logger.info(\"---------- Training Started -----------\")\n\n save_model(args.checkpoint_output_dir, epoch=resume_epoch)\n global_batch_size = args.batch_size * args.gradient_accumulation * args.replication_factor\n samples_per_step = global_batch_size * args.batches_per_step\n training_steps = args.epochs * steps_per_epoch\n start_train = time.perf_counter()\n start_step = time.perf_counter()\n for epoch in range(resume_epoch, args.epochs):\n for i, (text, images) in enumerate(dl):\n current_step = i + epoch * steps_per_epoch\n loss = poptorch_dalle(text, images)\n # Average loss across replicas\n if args.replication_factor == 1:\n mean_loss = loss\n else:\n mean_loss = loss.mean()\n step_length = time.perf_counter() - start_step\n step_throughput = samples_per_step / step_length\n msg = (\"Epoch: {:.2f}/{} \"\n \"Step: {}/{} \"\n \"Lr: {:.6f} \"\n \"Loss: {:.3f} \"\n \"Throughput: {:.2f} samples/sec\"\n ).format(epoch, args.epochs,\n current_step, training_steps,\n opt.param_groups[0]['lr'],\n mean_loss.item(),\n step_throughput)\n log.logger.info(msg)\n if args.wandb:\n wandb.log({\"LR\": opt.param_groups[0]['lr'],\n \"Throughput\": step_throughput,\n \"Loss\": mean_loss.item()})\n\n start_step = time.perf_counter()\n if i % args.checkpoint_save_steps == 0:\n save_model(args.checkpoint_output_dir, epoch=epoch)\n\n if args.lr_decay:\n scheduler.step(mean_loss)\n\n save_model(args.checkpoint_output_dir, epoch=epoch)\n\n if args.wandb:\n wandb.finish()\n\n stop_train = time.perf_counter()\n log.logger.info(\"---------------------------------------\")\n\n log.logger.info(\"---------- Training Metrics -----------\")\n log.logger.info(f\"global_batch_size: {global_batch_size}\")\n log.logger.info(f\"batches_per_step: {args.batches_per_step}\")\n log.logger.info(f\"training_steps: {training_steps}\")\n duration_run = stop_train - start_train\n num_samples = samples_per_step * training_steps\n log.logger.info(f\"Training time: {duration_run:.3f} secs\")\n log.logger.info(\"Throughput: {:5f} samples/sec.\".format(num_samples / duration_run))\n log.logger.info(\"---------------------------------------\")\n\nif __name__ == \"__main__\":\n # argument parsing\n args = parse_args()\n\n torch.manual_seed(args.random_seed)\n main(args)\n" ]
[ [ "numpy.sum", "numpy.histogram", "tensorflow.trainable_variables", "numpy.max", "tensorflow.HistogramProto", "tensorflow.pywrap_tensorflow.NewCheckpointReader", "numpy.min", "numpy.prod", "numpy.array", "numpy.linalg.norm", "numpy.isnan", "tensorflow.Summary" ], [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.manual_seed", "torch.save" ] ]
rehohoho/mmsegmentation
[ "a73ae7a421e07741fda62c9d81b335cbc4b7f7d6" ]
[ "mmseg/models/decode_heads/knet_head.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer\nfrom mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER,\n MultiheadAttention,\n build_transformer_layer)\n\nfrom mmseg.models.builder import HEADS, build_head\nfrom mmseg.models.decode_heads.decode_head import BaseDecodeHead\nfrom mmseg.utils import get_root_logger\n\n\n@TRANSFORMER_LAYER.register_module()\nclass KernelUpdator(nn.Module):\n \"\"\"Dynamic Kernel Updator in Kernel Update Head.\n\n Args:\n in_channels (int): The number of channels of input feature map.\n Default: 256.\n feat_channels (int): The number of middle-stage channels in\n the kernel updator. Default: 64.\n out_channels (int): The number of output channels.\n gate_sigmoid (bool): Whether use sigmoid function in gate\n mechanism. Default: True.\n gate_norm_act (bool): Whether add normalization and activation\n layer in gate mechanism. Default: False.\n activate_out: Whether add activation after gate mechanism.\n Default: False.\n norm_cfg (dict | None): Config of norm layers.\n Default: dict(type='LN').\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU').\n \"\"\"\n\n def __init__(\n self,\n in_channels=256,\n feat_channels=64,\n out_channels=None,\n gate_sigmoid=True,\n gate_norm_act=False,\n activate_out=False,\n norm_cfg=dict(type='LN'),\n act_cfg=dict(type='ReLU', inplace=True),\n ):\n super(KernelUpdator, self).__init__()\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.out_channels_raw = out_channels\n self.gate_sigmoid = gate_sigmoid\n self.gate_norm_act = gate_norm_act\n self.activate_out = activate_out\n self.act_cfg = act_cfg\n self.norm_cfg = norm_cfg\n self.out_channels = out_channels if out_channels else in_channels\n\n self.num_params_in = self.feat_channels\n self.num_params_out = self.feat_channels\n self.dynamic_layer = nn.Linear(\n self.in_channels, self.num_params_in + self.num_params_out)\n self.input_layer = nn.Linear(self.in_channels,\n self.num_params_in + self.num_params_out,\n 1)\n self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)\n self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)\n if self.gate_norm_act:\n self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]\n\n self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]\n\n self.activation = build_activation_layer(act_cfg)\n\n self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)\n self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n def forward(self, update_feature, input_feature):\n \"\"\"Forward function of KernelUpdator.\n\n Args:\n update_feature (torch.Tensor): Feature map assembled from\n each group. It would be reshaped with last dimension\n shape: `self.in_channels`.\n input_feature (torch.Tensor): Intermediate feature\n with shape: (N, num_classes, conv_kernel_size**2, channels).\n Returns:\n Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is\n the number of classes, C1 and C2 are the feature map channels of\n KernelUpdateHead and KernelUpdator, respectively.\n \"\"\"\n\n update_feature = update_feature.reshape(-1, self.in_channels)\n num_proposals = update_feature.size(0)\n # dynamic_layer works for\n # phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper\n parameters = self.dynamic_layer(update_feature)\n param_in = parameters[:, :self.num_params_in].view(\n -1, self.feat_channels)\n param_out = parameters[:, -self.num_params_out:].view(\n -1, self.feat_channels)\n\n # input_layer works for\n # phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper\n input_feats = self.input_layer(\n input_feature.reshape(num_proposals, -1, self.feat_channels))\n input_in = input_feats[..., :self.num_params_in]\n input_out = input_feats[..., -self.num_params_out:]\n\n # `gate_feats` is F^G in K-Net paper\n gate_feats = input_in * param_in.unsqueeze(-2)\n if self.gate_norm_act:\n gate_feats = self.activation(self.gate_norm(gate_feats))\n\n input_gate = self.input_norm_in(self.input_gate(gate_feats))\n update_gate = self.norm_in(self.update_gate(gate_feats))\n if self.gate_sigmoid:\n input_gate = input_gate.sigmoid()\n update_gate = update_gate.sigmoid()\n param_out = self.norm_out(param_out)\n input_out = self.input_norm_out(input_out)\n\n if self.activate_out:\n param_out = self.activation(param_out)\n input_out = self.activation(input_out)\n\n # Gate mechanism. Eq.(5) in original paper.\n # param_out has shape (batch_size, feat_channels, out_channels)\n features = update_gate * param_out.unsqueeze(\n -2) + input_gate * input_out\n\n features = self.fc_layer(features)\n features = self.fc_norm(features)\n features = self.activation(features)\n\n return features\n\n\[email protected]_module()\nclass KernelUpdateHead(nn.Module):\n \"\"\"Kernel Update Head in K-Net.\n\n Args:\n num_classes (int): Number of classes. Default: 150.\n num_ffn_fcs (int): The number of fully-connected layers in\n FFNs. Default: 2.\n num_heads (int): The number of parallel attention heads.\n Default: 8.\n num_mask_fcs (int): The number of fully connected layers for\n mask prediction. Default: 3.\n feedforward_channels (int): The hidden dimension of FFNs.\n Defaults: 2048.\n in_channels (int): The number of channels of input feature map.\n Default: 256.\n out_channels (int): The number of output channels.\n Default: 256.\n dropout (float): The Probability of an element to be\n zeroed in MultiheadAttention and FFN. Default 0.0.\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU').\n ffn_act_cfg (dict): Config of activation layers in FFN.\n Default: dict(type='ReLU').\n conv_kernel_size (int): The kernel size of convolution in\n Kernel Update Head for dynamic kernel updation.\n Default: 1.\n feat_transform_cfg (dict | None): Config of feature transform.\n Default: None.\n kernel_init (bool): Whether initiate mask kernel in mask head.\n Default: False.\n with_ffn (bool): Whether add FFN in kernel update head.\n Default: True.\n feat_gather_stride (int): Stride of convolution in feature transform.\n Default: 1.\n mask_transform_stride (int): Stride of mask transform.\n Default: 1.\n kernel_updator_cfg (dict): Config of kernel updator.\n Default: dict(\n type='DynamicConv',\n in_channels=256,\n feat_channels=64,\n out_channels=256,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN')).\n \"\"\"\n\n def __init__(self,\n num_classes=150,\n num_ffn_fcs=2,\n num_heads=8,\n num_mask_fcs=3,\n feedforward_channels=2048,\n in_channels=256,\n out_channels=256,\n dropout=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_act_cfg=dict(type='ReLU', inplace=True),\n conv_kernel_size=1,\n feat_transform_cfg=None,\n kernel_init=False,\n with_ffn=True,\n feat_gather_stride=1,\n mask_transform_stride=1,\n kernel_updator_cfg=dict(\n type='DynamicConv',\n in_channels=256,\n feat_channels=64,\n out_channels=256,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN'))):\n super(KernelUpdateHead, self).__init__()\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.fp16_enabled = False\n self.dropout = dropout\n self.num_heads = num_heads\n self.kernel_init = kernel_init\n self.with_ffn = with_ffn\n self.conv_kernel_size = conv_kernel_size\n self.feat_gather_stride = feat_gather_stride\n self.mask_transform_stride = mask_transform_stride\n\n self.attention = MultiheadAttention(in_channels * conv_kernel_size**2,\n num_heads, dropout)\n self.attention_norm = build_norm_layer(\n dict(type='LN'), in_channels * conv_kernel_size**2)[1]\n self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg)\n\n if feat_transform_cfg is not None:\n kernel_size = feat_transform_cfg.pop('kernel_size', 1)\n transform_channels = in_channels\n self.feat_transform = ConvModule(\n transform_channels,\n in_channels,\n kernel_size,\n stride=feat_gather_stride,\n padding=int(feat_gather_stride // 2),\n **feat_transform_cfg)\n else:\n self.feat_transform = None\n\n if self.with_ffn:\n self.ffn = FFN(\n in_channels,\n feedforward_channels,\n num_ffn_fcs,\n act_cfg=ffn_act_cfg,\n dropout=dropout)\n self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n self.mask_fcs = nn.ModuleList()\n for _ in range(num_mask_fcs):\n self.mask_fcs.append(\n nn.Linear(in_channels, in_channels, bias=False))\n self.mask_fcs.append(\n build_norm_layer(dict(type='LN'), in_channels)[1])\n self.mask_fcs.append(build_activation_layer(act_cfg))\n\n self.fc_mask = nn.Linear(in_channels, out_channels)\n\n def init_weights(self):\n \"\"\"Use xavier initialization for all weight parameter and set\n classification head bias as a specific value when use focal loss.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n # adopt the default initialization for\n # the weight and bias of the layer norm\n pass\n if self.kernel_init:\n logger = get_root_logger()\n logger.info(\n 'mask kernel in mask head is normal initialized by std 0.01')\n nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01)\n\n def forward(self, x, proposal_feat, mask_preds, mask_shape=None):\n \"\"\"Forward function of Dynamic Instance Interactive Head.\n\n Args:\n x (Tensor): Feature map from FPN with shape\n (batch_size, feature_dimensions, H , W).\n proposal_feat (Tensor): Intermediate feature get from\n diihead in last stage, has shape\n (batch_size, num_proposals, feature_dimensions)\n mask_preds (Tensor): mask prediction from the former stage in shape\n (batch_size, num_proposals, H, W).\n\n Returns:\n Tuple: The first tensor is predicted mask with shape\n (N, num_classes, H, W), the second tensor is dynamic kernel\n with shape (N, num_classes, channels, K, K).\n \"\"\"\n N, num_proposals = proposal_feat.shape[:2]\n if self.feat_transform is not None:\n x = self.feat_transform(x)\n\n C, H, W = x.shape[-3:]\n\n mask_h, mask_w = mask_preds.shape[-2:]\n if mask_h != H or mask_w != W:\n gather_mask = F.interpolate(\n mask_preds, (H, W), align_corners=False, mode='bilinear')\n else:\n gather_mask = mask_preds\n\n sigmoid_masks = gather_mask.softmax(dim=1)\n\n # Group Feature Assembling. Eq.(3) in original paper.\n # einsum is faster than bmm by 30%\n x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x)\n\n # obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C]\n proposal_feat = proposal_feat.reshape(N, num_proposals,\n self.in_channels,\n -1).permute(0, 1, 3, 2)\n obj_feat = self.kernel_update_conv(x_feat, proposal_feat)\n\n # [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C]\n obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2)\n obj_feat = self.attention_norm(self.attention(obj_feat))\n # [N, B, K*K*C] -> [B, N, K*K*C]\n obj_feat = obj_feat.permute(1, 0, 2)\n\n # obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C]\n obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels)\n\n # FFN\n if self.with_ffn:\n obj_feat = self.ffn_norm(self.ffn(obj_feat))\n\n mask_feat = obj_feat\n\n for reg_layer in self.mask_fcs:\n mask_feat = reg_layer(mask_feat)\n\n # [B, N, K*K, C] -> [B, N, C, K*K]\n mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2)\n\n if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1):\n mask_x = F.interpolate(\n x, scale_factor=0.5, mode='bilinear', align_corners=False)\n H, W = mask_x.shape[-2:]\n else:\n mask_x = x\n # group conv is 5x faster than unfold and uses about 1/5 memory\n # Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms\n # Group conv vs. unfold vs. concat batch, 278 : 1420 : 369\n # but in real training group conv is slower than concat batch\n # so we keep using concat batch.\n # fold_x = F.unfold(\n # mask_x,\n # self.conv_kernel_size,\n # padding=int(self.conv_kernel_size // 2))\n # mask_feat = mask_feat.reshape(N, num_proposals, -1)\n # new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x)\n # [B, N, C, K*K] -> [B*N, C, K, K]\n mask_feat = mask_feat.reshape(N, num_proposals, C,\n self.conv_kernel_size,\n self.conv_kernel_size)\n # [B, C, H, W] -> [1, B*C, H, W]\n new_mask_preds = []\n for i in range(N):\n new_mask_preds.append(\n F.conv2d(\n mask_x[i:i + 1],\n mask_feat[i],\n padding=int(self.conv_kernel_size // 2)))\n\n new_mask_preds = torch.cat(new_mask_preds, dim=0)\n new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W)\n if self.mask_transform_stride == 2:\n new_mask_preds = F.interpolate(\n new_mask_preds,\n scale_factor=2,\n mode='bilinear',\n align_corners=False)\n\n if mask_shape is not None and mask_shape[0] != H:\n new_mask_preds = F.interpolate(\n new_mask_preds,\n mask_shape,\n align_corners=False,\n mode='bilinear')\n\n return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape(\n N, num_proposals, self.in_channels, self.conv_kernel_size,\n self.conv_kernel_size)\n\n\[email protected]_module()\nclass IterativeDecodeHead(BaseDecodeHead):\n \"\"\"K-Net: Towards Unified Image Segmentation.\n\n This head is the implementation of\n `K-Net: <https://arxiv.org/abs/2106.14855>`_.\n\n Args:\n num_stages (int): The number of stages (kernel update heads)\n in IterativeDecodeHead. Default: 3.\n kernel_generate_head:(dict): Config of kernel generate head which\n generate mask predictions, dynamic kernels and class predictions\n for next kernel update heads.\n kernel_update_head (dict): Config of kernel update head which refine\n dynamic kernels and class predictions iteratively.\n\n \"\"\"\n\n def __init__(self, num_stages, kernel_generate_head, kernel_update_head,\n **kwargs):\n super(BaseDecodeHead, self).__init__(**kwargs)\n assert num_stages == len(kernel_update_head)\n self.num_stages = num_stages\n self.kernel_generate_head = build_head(kernel_generate_head)\n self.kernel_update_head = nn.ModuleList()\n self.align_corners = self.kernel_generate_head.align_corners\n self.num_classes = self.kernel_generate_head.num_classes\n self.input_transform = self.kernel_generate_head.input_transform\n self.ignore_index = self.kernel_generate_head.ignore_index\n\n for head_cfg in kernel_update_head:\n self.kernel_update_head.append(build_head(head_cfg))\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n feats = self.kernel_generate_head._forward_feature(inputs)\n sem_seg = self.kernel_generate_head.cls_seg(feats)\n seg_kernels = self.kernel_generate_head.conv_seg.weight.clone()\n seg_kernels = seg_kernels[None].expand(\n feats.size(0), *seg_kernels.size())\n\n stage_segs = [sem_seg]\n for i in range(self.num_stages):\n sem_seg, seg_kernels = self.kernel_update_head[i](feats,\n seg_kernels,\n sem_seg)\n stage_segs.append(sem_seg)\n if self.training:\n return stage_segs\n # only return the prediction of the last stage during testing\n return stage_segs[-1]\n\n def losses(self, seg_logit, seg_label):\n losses = dict()\n for i, logit in enumerate(seg_logit):\n loss = self.kernel_generate_head.losses(logit, seg_label)\n for k, v in loss.items():\n losses[f'{k}.s{i}'] = v\n\n return losses\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.ModuleList", "torch.einsum", "torch.cat", "torch.nn.functional.interpolate" ] ]
tsutterley/captoolkit
[ "314c4d34f49012c25286478c943b0ab13c893c62" ]
[ "captoolkit/readgla12.py" ]
[ "#!/usr/bin/env python\r\n\"\"\"\r\n Reads GLA12 Release 634 HDF5.\r\n \r\n Reads several files in parallel if njobs > 1 is specified.\r\n \r\n Extracts a subset of the data based on a mask.tif file.\r\n \r\n Example:\r\n \r\n python readgla.py /mnt/devon-r0/shared_data/icesat/GLAH12.034/ /mnt/devon-r0/shared_data/icesat/grounded/ /mnt/devon-r0/shared_data/masks/ANT_groundedice_240m.tif 3031 A 600 1\r\n \r\n See full GLA12 parameters at:\r\n \r\n http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah12.html\r\n \r\n Notes:\r\n \r\n For previous releases the path of some fields have changed!\r\n \r\n Corrections applied by default (i.e. data come corrected):\r\n \r\n instrument corrections - was applied\r\n atmospheric delays (wet/dry tropo) - was applied\r\n tides and load - was applied\r\n GC offset - was applied\r\n \r\n saturation (d_satElevCorr) - was NOT applied [1]\r\n inter-campaign bias - was NOT applied\r\n \r\n [1] If it is invalid, then the elevation should not be used.\r\n The saturation correction flag (sat_corr_flg) is an important\r\n flag to understand the possible quality of the elevation data.\r\n \r\n To REMOVE the tide and load cor, and APPLY saturation cor:\r\n \r\n elev_retide = d_elev + d_ocElv + d_ldElv + d_satElevCorr\r\n \r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport h5py\r\nimport pyproj\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom joblib import Parallel, delayed\r\nfrom gdalconst import *\r\nfrom osgeo import gdal, osr\r\nfrom scipy.ndimage import map_coordinates\r\n\r\n\r\ndef geotiffread(ifile,metaData):\r\n \"\"\"Read raster from file.\"\"\"\r\n \r\n file = gdal.Open(ifile, GA_ReadOnly)\r\n \r\n projection = file.GetProjection()\r\n src = osr.SpatialReference()\r\n src.ImportFromWkt(projection)\r\n proj = src.ExportToWkt()\r\n \r\n Nx = file.RasterXSize\r\n Ny = file.RasterYSize\r\n \r\n trans = file.GetGeoTransform()\r\n \r\n dx = trans[1]\r\n dy = trans[5]\r\n \r\n if metaData == \"A\":\r\n \r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n \r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n \r\n X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]\r\n \r\n if metaData == \"P\":\r\n \r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n \r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n \r\n X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + Xp*trans[4] + Yp*trans[5]\r\n\r\n band = file.GetRasterBand(1)\r\n\r\n Z = band.ReadAsArray()\r\n \r\n dx = np.abs(dx)\r\n dy = np.abs(dy)\r\n \r\n return X, Y, Z, dx, dy, proj\r\n\r\n\r\ndef bilinear2d(xd,yd,data,xq,yq, **kwargs):\r\n \"\"\"Bilinear interpolation from grid.\"\"\"\r\n \r\n xd = np.flipud(xd)\r\n yd = np.flipud(yd)\r\n data = np.flipud(data)\r\n \r\n xd = xd[0,:]\r\n yd = yd[:,0]\r\n \r\n nx, ny = xd.size, yd.size\r\n (x_step, y_step) = (xd[1]-xd[0]), (yd[1]-yd[0])\r\n \r\n assert (ny, nx) == data.shape\r\n assert (xd[-1] > xd[0]) and (yd[-1] > yd[0])\r\n \r\n if np.size(xq) == 1 and np.size(yq) > 1:\r\n xq = xq*ones(yq.size)\r\n elif np.size(yq) == 1 and np.size(xq) > 1:\r\n yq = yq*ones(xq.size)\r\n \r\n xp = (xq-xd[0])*(nx-1)/(xd[-1]-xd[0])\r\n yp = (yq-yd[0])*(ny-1)/(yd[-1]-yd[0])\r\n\r\n coord = np.vstack([yp,xp])\r\n \r\n zq = map_coordinates(data, coord, **kwargs)\r\n \r\n return zq\r\n\r\n\r\ndef wrap_to_180(lon):\r\n \"\"\"Wrapps longitude to -180 to 180 degrees.\"\"\"\r\n lon[lon>180] -= 360.\r\n return lon\r\n\r\n\r\ndef list_files(path, endswith='.h5'):\r\n \"\"\" List files in dir recursively. \"\"\"\r\n return [os.path.join(dpath, f)\r\n for dpath, dnames, fnames in os.walk(path)\r\n for f in fnames if f.endswith(endswith)]\r\n\r\ndef track_type(time, lat):\r\n \"\"\"\r\n Determines ascending and descending tracks.\r\n Defines unique tracks as segments with time breaks > tmax,\r\n and tests whether lat increases or decreases w/time.\r\n \"\"\"\r\n \r\n # Generate track segment\r\n tracks = np.zeros(lat.shape)\r\n \r\n # Set values for segment\r\n tracks[0:np.argmax(np.abs(lat))] = 1\r\n\r\n # Output index array\r\n i_asc = np.zeros(tracks.shape, dtype=bool)\r\n\r\n # Loop trough individual tracks\r\n for track in np.unique(tracks):\r\n \r\n # Get all points from an individual track\r\n i_track, = np.where(track == tracks)\r\n\r\n # Test tracks length\r\n if len(i_track) < 2:\r\n continue\r\n \r\n # Test if lat increases (asc) or decreases (des) w/time\r\n i_min = time[i_track].argmin()\r\n i_max = time[i_track].argmax()\r\n lat_diff = lat[i_track][i_max] - lat[i_track][i_min]\r\n \r\n # Determine track type\r\n if lat_diff > 0:\r\n i_asc[i_track] = True\r\n\r\n # Output index vector's\r\n return i_asc, np.invert(i_asc)\r\n\r\n\r\nindir = sys.argv[1] # input dir\r\noutdir = sys.argv[2] # output dir\r\nfmask = sys.argv[3] # geotiff file with mask\r\nproj = str(sys.argv[4]) # epsg number\r\nmeta = sys.argv[5] # \"A\" or \"P\"\r\nindex = int(sys.argv[6]) # mission index\r\nnjobs = int(sys.argv[7]) # number of parallel jobs\r\n\r\n# Generate file list\r\nfiles = list_files(indir, endswith='.H5')\r\n\r\nprint(('input dir:', indir))\r\nprint(('output dir:', outdir))\r\nprint(('mask file:', fmask))\r\nprint(('epsg num:', proj))\r\nprint(('metadata:', meta))\r\nprint(('njobs:', njobs))\r\nprint(('# files:', len(files)))\r\n\r\n\r\n# Projection - unprojected lat/lon\r\nprojGeo = pyproj.Proj(\"+init=EPSG:4326\")\r\n\r\n# Make pyproj format\r\nprojection = '+init=EPSG:' + proj\r\n\r\n# Projection - prediction grid\r\nprojGrd = pyproj.Proj(projection)\r\n\r\niter = 1\r\nindex = 600\r\n\r\n# Test for mask\r\nif fmask != 'None':\r\n \r\n # Read in masking grid\r\n (Xm, Ym, Zm, dX, dY, Proj) = geotiffread(fmask, meta)\r\n\r\ndef main(fname):\r\n \r\n print(('readg:', fname, '...'))\r\n \r\n global iter\r\n \r\n f = h5py.File(fname)\r\n \r\n d = {} # Dictionary for input fields\r\n \r\n d['t_sec'] = f['Data_40HZ/Time/d_UTCTime_40'] # [secs since 2000-01-01 12:00:00 UTC]\r\n \r\n d['lat'] = f['Data_40HZ/Geolocation/d_lat'] # [deg]\r\n d['lon'] = f['Data_40HZ/Geolocation/d_lon'] # [deg]\r\n\r\n d['num_pk'] = f['Data_40HZ/Waveform/i_numPk'] # Num Peaks found in the Return\r\n d['gain'] = f['Data_40HZ/Waveform/i_gval_rcv'] # counts [unitless]\r\n d['rec_nrg'] = f['Data_40HZ/Reflectivity/d_RecNrgAll'] # [joules]\r\n d['tx_nrg'] = f['Data_40HZ/Transmit_Energy/d_TxNrg'] # [joules]\r\n \r\n d['h_sat'] = f['Data_40HZ/Elevation_Corrections/d_satElevCorr'] # saturation cor [m]\r\n d['h_gc'] = f['Data_40HZ/Elevation_Corrections/d_GmC'] # GC-offset cor [m]\r\n d['h_dry'] = f['Data_40HZ/Elevation_Corrections/d_dTrop'] # dry tropo [m]\r\n d['h_wet'] = f['Data_40HZ/Elevation_Corrections/d_wTrop'] # wet tropo [m]\r\n \r\n d['h_sol'] = f['Data_40HZ/Geophysical/d_erElv'] # solid tide [m]\r\n d['h_geo'] = f['Data_40HZ/Geophysical/d_poTide'] # geoc pole tide [m]\r\n d['h_equi'] = f['Data_40HZ/Geophysical/d_eqElv'] # equilib tide [m]\r\n d['h_ellip'] = f['Data_40HZ/Geophysical/d_deltaEllip'] # h_TP - h_WGS84 [m]\r\n d['h_tide'] = f['Data_40HZ/Geophysical/d_ocElv'] # ocean tide [m]\r\n d['h_load'] = f['Data_40HZ/Geophysical/d_ldElv'] # load tide [m]\r\n \r\n d['h_cor'] = f['Data_40HZ/Elevation_Surfaces/d_elev'] # corrected height [m]\r\n d['misfit'] = f['Data_40HZ/Elevation_Surfaces/d_IceSVar'] # gaussian misfit [volts] [2]\r\n \r\n d['rec_ndx'] = f['Data_40HZ/Time/i_rec_ndx'] # record index\r\n d['shot_count'] = f['Data_40HZ/Time/i_shot_count'] # shot index within record\r\n \r\n # Elevation quality flag: 0=valid, 1=not_valid\r\n d['use_flg'] = f['Data_40HZ/Quality/elev_use_flg']\r\n \r\n # Cloud contamination flag: 0=false, 1=true\r\n d['cloud_flg'] = f['Data_40HZ/Elevation_Flags/elv_cloud_flg']\r\n \r\n # Attitude quality flag: 0=good, 50=warning, 100=bad, 127=not_valid\r\n d['att_flg'] = f['Data_40HZ/Quality/sigma_att_flg']\r\n \r\n # Saturation Correction Flag:\r\n # 0=not_saturated, 1=inconsequential, 2=applicable 3=not_computed 4=not_applicable\r\n d['sat_flg'] = f['Data_40HZ/Quality/sat_corr_flg']\r\n \r\n # 1Hz Track\r\n track_01Hz = f['Data_1HZ/Geolocation/i_track'][:]\r\n \r\n # Get unique track numbers\r\n track_id = np.unique(track_01Hz)\r\n \r\n # Initialize vector\r\n track_40Hz = np.empty((0,1), dtype='int')\r\n \r\n # Construct 40 Hz track vector - IMPROVE! SLOW WAY OF DOING IT\r\n for i in range(len(track_01Hz)):\r\n \r\n # Create 40 Hz vector\r\n track_40Hz = np.vstack((track_40Hz, np.ones((40,1),dtype='int') * track_01Hz[i]))\r\n\r\n # Construct cycle vector\r\n #cycle = int(fname[fname.rfind('/') + 1:].split('_')[3]) * np.ones(track_40Hz.shape)\r\n \r\n # Induvidual track identifier\r\n #d['orbit'] = np.char.add(cycle.astype('int').astype('str'), track_40Hz.astype('int').astype('str')).astype('int')\r\n \r\n '''\r\n [2] For postprocessing: The RMS error converged to about 0.25 m after\r\n removing the data with the 5% highest waveform misfits in each campaign, so we\r\n adopted that as a data-editing threshold, retaining 95% of the original data.\r\n Also, filter out cloud-contaminated points using the 'cloud_flg' param.\r\n '''\r\n \r\n # Wrap longitude to -180/180 degrees\r\n d['lon'] = wrap_to_180(d['lon'][:])\r\n \r\n # Reproject coordinates\r\n lon, lat = d['lon'][:], d['lat'][:]\r\n \r\n # Converte to Stereographical coordinates\r\n (x, y) = pyproj.transform(projGeo, projGrd, lon, lat)\r\n \r\n # Test for mask\r\n if fmask != 'None':\r\n \r\n # Interpolation of grid to points for masking\r\n Ii = bilinear2d(Xm, Ym, Zm, x.T, y.T, order=1)\r\n \r\n # Set all NaN's to zero\r\n Ii[np.isnan(Ii)] = 0\r\n \r\n # Convert to boolean\r\n mask = Ii == 1\r\n \r\n else:\r\n \r\n # Select all data\r\n mask = np.ones(lat.shape, dtype='bool')\r\n \r\n # Parameters for selecting valid pts\r\n h_cor = d['h_cor'][:]\r\n h_sat = d['h_sat'][:]\r\n use_flg = d['use_flg'][:]\r\n sat_flg = d['sat_flg'][:]\r\n att_flg = d['att_flg'][:]\r\n num_pk = d['num_pk'][:]\r\n \r\n # Get index of valid pts\r\n idx, = np.where(\r\n (mask == 1) &\r\n (np.abs(h_cor) < 1e10) &\r\n (np.abs(h_sat) < 1e10) &\r\n (np.abs(lat) <= 90) &\r\n (np.abs(lon) <= 180) &\r\n (use_flg == 0) &\r\n (sat_flg <= 2) &\r\n (att_flg == 0) &\r\n (num_pk == 1))\r\n\r\n # Check if no valid pts\r\n if len(idx) == 0:\r\n print(('no valid pts:', fname))\r\n return\r\n\r\n # Keep only valid pts (and load to memory)\r\n for k in list(d.keys()):\r\n \r\n # Edit all the fields\r\n d[k] = d[k][:][idx]\r\n \r\n # Unapply tides (retide)\r\n d['h_cor'] += d['h_tide'] + d['h_load']\r\n \r\n # Apply saturation cor\r\n d['h_cor'] += d['h_sat']\r\n \r\n # Convert ellipsoid: h_TP -> h_WGS84\r\n d['h_cor'] -= d['h_ellip']\r\n \r\n #FIXME: THIS IS NOT ORBIT NUMBER (ONE ID FOR EACH TRACK)!!!\r\n # Combine rec_ndx and shot_count to uniquely identify each GLAS laser shot\r\n #d['orbit'] = np.char.add(d['rec_ndx'].astype('str'),\r\n # d['shot_count'].astype('str')).astype('int')\r\n\r\n # Compute correct time - add back 'year 2000 + 12 hours' in secs\r\n d['t_sec'] += (2000 * 365.25 * 24 * 3600.) + (12 * 3600.)\r\n \r\n # Compute time in decimal years\r\n d['t_year'] = d['t_sec'] / (365.25 * 24 * 3600.)\r\n \r\n # Compute time since 1970 - remove year 1970 in secs\r\n d['t_sec'] -= 1970 * 365.25 * 24 * 3600.\r\n \r\n # Change path and/or name of read file\r\n name, ext = os.path.splitext(os.path.basename(fname))\r\n \r\n # Clip track vector\r\n tracks_40Hz = track_40Hz[idx]\r\n \r\n # Compute unique tracks\r\n tracks = np.unique(tracks_40Hz)\r\n \r\n # Create orbit array\r\n d['orbit'] = np.ones(d['lat'][:].shape) * np.nan\r\n \r\n # Select fields to save\r\n out = ['orbit',\r\n 't_sec',\r\n 't_year',\r\n 'lon',\r\n 'lat',\r\n 'h_cor',\r\n 'h_dry',\r\n 'h_ellip',\r\n 'h_equi',\r\n 'h_gc',\r\n 'h_geo',\r\n 'h_sat',\r\n 'h_sol',\r\n 'h_wet',\r\n 'gain',\r\n 'misfit',\r\n 'tx_nrg',\r\n 'rec_nrg',\r\n 'cloud_flg',]\r\n\r\n # Loop through tracks\r\n for i in range(len(tracks)):\r\n \r\n # Get index of induvidual tracks\r\n ind = (tracks_40Hz == tracks[i]).reshape(d['lat'][:].shape)\r\n \r\n # Set track datum identifier for each track\r\n (dec,year)=np.modf(d['t_year'][ind][0])\r\n month = np.round(dec * 12, decimals=0)\r\n day = np.round(dec * 365.25, decimals=0)\r\n \r\n # Datum string\r\n date = str(int(year))+'_'+str(int(month)).zfill(2)+'_'+str(int(day)).zfill(3)\r\n \r\n # Separate tracks\r\n (i_asc, i_des) = track_type(d['t_sec'][ind], d['lat'][ind])\r\n \r\n # Save ascending track\r\n if len(d['lat'][ind][i_asc]) > 0:\r\n \r\n # Psudo orbit number generation\r\n d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')\r\n \r\n # Orbit type identifier\r\n str_orb = 'READ_A'\r\n \r\n # Track number string\r\n str_trknum = '_'+str(int(iter)).zfill(6)+'_'\r\n\r\n # Fullname of output file\r\n outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)\r\n\r\n # Save data\r\n with h5py.File(outfile, 'w') as fout:\r\n [fout.create_dataset(k, data=d[k][ind][i_asc]) for k in out]\r\n\r\n # Update counter\r\n iter += 1\r\n print(('output file:', outfile))\r\n \r\n # Save descending track\r\n if len(d['lat'][ind][i_des]) > 0:\r\n \r\n # Psudo orbit number generation\r\n d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')\r\n \r\n # Orbit type identifier\r\n str_orb = 'READ_D'\r\n \r\n # Track number string\r\n str_trknum = '_'+str(int(iter)).zfill(6)+'_'\r\n \r\n # Fullname of output file\r\n outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)\r\n \r\n # Save data\r\n with h5py.File(outfile, 'w') as fout:\r\n [fout.create_dataset(k, data=d[k][ind][i_des]) for k in out]\r\n \r\n # Update counter\r\n iter += 1\r\n\r\n print(('output file:', outfile))\r\n\r\n f.close()\r\n\r\nif njobs == 1:\r\n print('running sequential code ...')\r\n [main(f) for f in files]\r\n\r\nelse:\r\n print(('running parallel code (%d jobs) ...' % njobs))\r\n from joblib import Parallel, delayed\r\n Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f) for f in files)\r\n" ]
[ [ "numpy.vstack", "numpy.ones", "numpy.flipud", "numpy.zeros", "numpy.empty", "numpy.invert", "numpy.modf", "numpy.abs", "numpy.size", "scipy.ndimage.map_coordinates", "numpy.arange", "numpy.isnan", "numpy.round", "numpy.where", "numpy.meshgrid", "numpy.unique" ] ]
mathyouf/GPT-Games
[ "bf6e558bf6ec92d1fba97770587610da0f3447eb" ]
[ "src/interactive_conditional_samples.py" ]
[ "#!/usr/bin/env python3\n\nimport fire\nimport json\nimport os\nimport re\nimport numpy as np\nimport tensorflow as tf\n\nimport model, sample, encoder\n\ndef modify_raw_text(raw_text, interviewer, interviewee):\n return interviewer+\": \\\"\" + raw_text + \"\\\" \"+ interviewee +\":\\\"\"\n\ndef interact_model(\n model_name='124M',\n seed=None,\n nsamples=1,\n batch_size=1,\n length=None,\n temperature=1,\n top_k=0,\n top_p=1,\n models_dir='models',\n):\n \"\"\"\n Interactively run the model\n :model_name=124M : String, which model to use\n :seed=None : Integer seed for random number generators, fix seed to reproduce\n results\n :nsamples=1 : Number of samples to return total\n :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :models_dir : path to parent folder containing model subfolders\n (i.e. contains the <model_name> folder)\n \"\"\"\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n enc = encoder.get_encoder(model_name, models_dir)\n hparams = model.default_hparams()\n with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if length is None:\n length = hparams.n_ctx // 2\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n with tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=context,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k, top_p=top_p\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n saver.restore(sess, ckpt)\n interviewer = input(\"What is your name? \")\n interviewee = input(\"Who are you talking to? \")\n previous_memory = \"\"\n while True:\n raw_text = input(interviewer+\" >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(interviewer+\" >>> \")\n raw_text = modify_raw_text(raw_text, interviewer, interviewee)\n previous_memory += raw_text\n response = re.match(r'(.*?)\"', '495839045')\n while not response:\n context_tokens = enc.encode(previous_memory)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n response = re.match(r'(.*?)\"\\'', text)\n if response:\n match = re.match(r'(.*?)\"', text).group(0)\n # print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n # print(\"Raw Input:\", previous_memory)\n print(interviewee+\" >>> \",match[:-1])\n # print(\"=\" * 80)\n previous_memory += match + \" \"\n\nif __name__ == '__main__':\n fire.Fire(interact_model)\n\n" ]
[ [ "tensorflow.placeholder", "numpy.random.seed", "tensorflow.Graph", "tensorflow.set_random_seed", "tensorflow.train.Saver" ] ]
rparini/cxroots
[ "037247fc47b29781b9cc66857a8395283e8ecc86" ]
[ "cxroots/tests/test_deriv.py" ]
[ "import pytest\nimport numpy as np\nfrom numpy import cos, sin\n\nfrom cxroots import Circle, Rectangle\nfrom cxroots import CxDerivative\n\[email protected]('C', [\n pytest.param(Circle(0, 2), id='circle'),\n pytest.param(Rectangle([-1.5,1.5],[-2,2]), id='rect'),\n pytest.param(None, id='default')\n])\ndef test_CxDerivative(C):\n f = lambda z: z**10 - 2*z**5 + sin(z)*cos(z/2)\n df = lambda z: 10*(z**9 - z**4) + cos(z)*cos(z/2) - 0.5*sin(z)*sin(z/2)\n\n z = np.array([-1.234, 0.3+1j, 0.1j, -0.9-0.5j])\n\n assert CxDerivative(f, z, n=1, contour=C) == pytest.approx(df(z))\n\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.cos" ] ]
kingagla/reviews_classification
[ "9bf9636035bf14fb3ce151d075a6c04f4cdbfde6" ]
[ "scripts/models/01_prepare_and_save_models.py" ]
[ "import os\nimport pickle\nimport pandas as pd\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.models import Sequential\nfrom scripts.settings import *\nfrom scripts.utils import create_dir\n\n\ndef prepare_for_learning(file_path, model_path, n_samples=5000, use_neutral=False):\n # load data\n rev_vec = pd.read_pickle(file_path)\n # remove neutral if not used\n if not use_neutral:\n rev_vec = rev_vec[rev_vec['Information'] != 'neu']\n # use only part of available data\n rev_vec = rev_vec.sample(n_samples)\n # save indices of training and validation set\n pickle.dump(rev_vec.index, open(learning_index_path, 'wb'))\n\n X, y = rev_vec[[col for col in rev_vec.columns if col.startswith('Vec')]], rev_vec['Information']\n le = LabelEncoder()\n le.fit(y.values.reshape(-1, 1))\n create_dir(os.path.dirname(model_path))\n pickle.dump(le, open(model_path, 'wb'))\n return rev_vec, X, y\n\n\ndef classification_report_to_excel(y_test, y_pred, excel_path):\n cr = classification_report(y_test, y_pred, output_dict=True)\n create_dir(os.path.dirname(excel_path))\n pd.DataFrame(cr).T.to_excel(excel_path)\n\n\ndef neural_network():\n model = Sequential()\n model.add(Dense(256, input_dim=1024, activation='relu', use_bias=True,\n kernel_initializer='random_normal'))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation='relu', use_bias=True, kernel_initializer='random_normal'))\n model.add(Dropout(0.5))\n model.add(Dense(16, activation='relu', use_bias=True, kernel_initializer='random_normal'))\n model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='random_normal'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['acc'])\n return model\n\n\ndef fit_and_save_model(X_train, y_train, model, model_path, network=False):\n # create directory for model\n create_dir(os.path.dirname(model_path))\n\n if network:\n checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True)\n model.fit(X_train, y_train, epochs=150, batch_size=512, validation_split=0.2, callbacks=[checkpoint])\n else:\n model.fit(X_train, y_train)\n pickle.dump(model, open(model_path, 'wb'))\n\n\ndef main():\n rev_vec, X, y = prepare_for_learning(rev_path,\n os.path.join(model_dir, label_encoder_file),\n n_samples=5000,\n use_neutral=False)\n le_path = os.path.join(model_dir, label_encoder_file)\n le = pickle.load(open(le_path, 'rb'))\n y = le.transform(y)\n # learn random forest\n rf = RandomForestClassifier(n_estimators=100, max_depth=5,\n min_samples_leaf=2,\n class_weight='balanced', criterion='entropy')\n fit_and_save_model(X, y, rf, os.path.join(model_dir, random_forest_file), network=False)\n\n # use DBSCAN to find negative\n dbs = DBSCAN(eps=0.01, min_samples=2)\n pickle.dump(dbs, open(os.path.join(model_dir, dbscan_file), 'wb'))\n\n # use neural network\n network = neural_network()\n fit_and_save_model(X, y, network, os.path.join(model_dir, network_file), network=True)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.keras.models.Sequential", "pandas.read_pickle", "tensorflow.keras.layers.Dropout", "sklearn.metrics.classification_report", "sklearn.cluster.DBSCAN", "pandas.DataFrame", "sklearn.ensemble.RandomForestClassifier", "sklearn.preprocessing.LabelEncoder", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ModelCheckpoint" ] ]
Jarino/cgp-optimization
[ "3b50813a591c3535c7846b7e8acf5f5959122d02" ]
[ "tengp_eval/optimizers/sa.py" ]
[ "from configparser import ConfigParser\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport pygmo as pg\n\nfrom tengp.individual import IndividualBuilder, NPIndividual\nfrom tengp import Parameters, FunctionSet\nfrom tengp_eval.coevolution import TrainersSet, GaPredictors\n\n\ndef fitness_function(individual, x, y):\n output = individual.transform(x)\n try:\n #return adjusted_r2_score(y, output, len(x), len(individual.genes))\n return mean_squared_error(output, y)\n except ValueError:\n return 10e10\n\nclass cost_function:\n def __init__(self, X, Y, params, bounds):\n self.params = params\n self.bounds = bounds\n self.X = X\n self.Y = Y\n\n def fitness(self, x):\n individual = NPIndividual(list(x), self.bounds, self.params)\n\n fitness = fitness_function(individual, self.X, self.Y)\n\n return [fitness]\n\n\n def get_bounds(self):\n return self.bounds\n\ndef define_cgp_system(n_nodes, n_inputs, n_outputs, funset, max_back):\n \"\"\"\n define CCGP system\n\n Return:\n IndividualBuilder object\n Parameters\n bounds (tuple)\n \"\"\"\n params = Parameters(n_inputs, n_outputs, 1, n_nodes, funset, real_valued=True, max_back=max_back)\n ib = IndividualBuilder(params)\n bounds = ib.create().bounds\n return ib, params, bounds\n\ndef run_benchmark_coevolution(cp, x_train, y_train, funset):\n ib, params, bounds = define_cgp_system(\n cp.getint('CGPPARAMS', 'n_nodes'),\n x_train.shape[1] if len(x_train.shape) > 1 else 1,\n y_train.shape[1] if len(y_train.shape) > 1 else 1,\n funset,\n cp.getint('CGPPARAMS', 'max_back'))\n\n # setup the coevolution elements\n ts = TrainersSet(ib, 16, fitness_function, x_train, y_train)\n predictors = GaPredictors(x_train, y_train, 10, 24)\n predictors.evaluate_fitness(ts)\n x_reduced, y_reduced = predictors.best_predictors_data()\n\n GENS_STEP = 50\n\n cf = cost_function(x_reduced, y_reduced, params, bounds)\n prob = pg.problem(cf)\n algo = pg.algorithm(pg.pso(\n gen=GENS_STEP,\n omega=cp.getfloat('OPTIMPARAMS', 'omega'),\n eta1=cp.getfloat('OPTIMPARAMS', 'eta1'),\n eta2=cp.getfloat('OPTIMPARAMS', 'eta2'),\n memory=True))\n algo.set_verbosity(1)\n pop = pg.population(prob, cp.getint('DEFAULT', 'population_size'))\n n_gens = GENS_STEP\n\n\n while n_gens < 500:\n\n pop = algo.evolve(pop)\n\n # calculate exact fitness of champion and\n # add it to the trainers set\n champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)\n try:\n champion.fitness = fitness_function(champion, x_train, y_train)\n ts.add_trainer(champion)\n except ValueError:\n print('unsuccessful adding of champion')\n\n # update random population\n ts.update_random_population()\n\n predictors.predictors_evolution_step(ts)\n print('changing the subset, best predictor: ', predictors.best_predictor.fitness)\n\n x_reduced, y_reduced = predictors.best_predictors_data()\n pop.problem.extract(object).X = x_reduced\n pop.problem.extract(object).Y = y_reduced\n n_gens += GENS_STEP\n\n uda = algo.extract(pg.pso)\n\n champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)\n champion.fitness = fitness_function(champion, x_train, y_train)\n\n\n fitnesses = [x[2] for x in uda.get_log()]\n fitnesses.append(champion.fitness)\n return fitnesses\n\n\ndef run_benchmark(cp, x_train, y_train, funset):\n ib, params, bounds = define_cgp_system(\n cp.getint('CGPPARAMS', 'n_nodes'),\n x_train.shape[1] if len(x_train.shape) > 1 else 1,\n y_train.shape[1] if len(y_train.shape) > 1 else 1,\n funset,\n cp.getint('CGPPARAMS', 'max_back'))\n cf = cost_function(x_train, y_train, params, bounds)\n prob = pg.problem(cf)\n\n algo = pg.algorithm(pg.simulated_annealing(\n Ts=cp.getfloat('OPTIMPARAMS', 'Ts'),\n Tf=cp.getfloat('OPTIMPARAMS', 'Tf'),\n n_T_adj=cp.getint('OPTIMPARAMS', 'n_T_adj'),\n n_range_adj=cp.getint('OPTIMPARAMS', 'n_range_adj'),\n bin_size=cp.getint('OPTIMPARAMS', 'bin_size'),\n start_range=cp.getfloat('OPTIMPARAMS', 'start_range')))\n\n algo.set_verbosity(100)\n pop = pg.population(prob, 1)\n pop = algo.evolve(pop)\n uda = algo.extract(pg.simulated_annealing)\n\n return [x[2] for x in uda.get_log()]\n\nRUNNERS = [run_benchmark]\n" ]
[ [ "sklearn.metrics.mean_squared_error" ] ]
THU-DA-6D-Pose-Group/Self6D-Diff-Renderer
[ "408330a9c7d7010a5af0a5b0b469f1ef695d18de", "408330a9c7d7010a5af0a5b0b469f1ef695d18de" ]
[ "core/dr_utils/dib_renderer_x/utils/sphericalcoord.py", "core/dr_utils/dib_renderer_x/renderer/texrender_batch.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport numpy as np\n\n\n##################################################################\n# symmetric over z axis\ndef get_spherical_coords_z(X):\n # X is N x 3\n rad = np.linalg.norm(X, axis=1)\n # Inclination\n theta = np.arccos(X[:, 2] / rad)\n # Azimuth\n phi = np.arctan2(X[:, 1], X[:, 0])\n\n # Normalize both to be between [-1, 1]\n vv = (theta / np.pi) * 2 - 1\n uu = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n# symmetric over x axis\ndef get_spherical_coords_x(X):\n # X is N x 3\n rad = np.linalg.norm(X, axis=1)\n # Inclination\n # y == 1\n # cos = 0\n # y == -1\n # cos = pi\n theta = np.arccos(X[:, 0] / rad)\n # Azimuth\n phi = np.arctan2(X[:, 2], X[:, 1])\n\n # Normalize both to be between [-1, 1]\n uu = (theta / np.pi) * 2 - 1\n vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n# symmetric spherical projection\ndef get_symmetric_spherical_tex_coords(vertex_pos, symmetry_axis=1, up_axis=2, front_axis=0):\n # vertex_pos is N x 3\n length = np.linalg.norm(vertex_pos, axis=1)\n # Inclination\n theta = np.arccos(vertex_pos[:, front_axis] / length)\n # Azimuth\n phi = np.abs(np.arctan2(vertex_pos[:, symmetry_axis], vertex_pos[:, up_axis]))\n\n # Normalize both to be between [-1, 1]\n uu = (theta / np.pi) * 2 - 1\n # vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1\n vv = (phi / np.pi) * 2 - 1\n # Return N x 2\n return np.stack([uu, vv], 1)\n\n\n#########################################################################\nif __name__ == \"__main__\":\n\n from utils.utils_mesh import loadobj, savemeshtes\n import cv2\n\n p, f = loadobj(\"2.obj\")\n uv = get_spherical_coords_x(p)\n uv[:, 0] = -uv[:, 0]\n\n uv[:, 1] = -uv[:, 1]\n uv = (uv + 1) / 2\n savemeshtes(p, uv, f, \"./2_x.obj\")\n\n tex = np.zeros(shape=(256, 512, 3), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (10, 200)\n fontScale = 5\n fontColor = (0, 255, 255)\n lineType = 2\n\n cv2.putText(tex, \"Hello World!\", bottomLeftCornerOfText, font, fontScale, fontColor, lineType)\n cv2.imshow(\"\", tex)\n cv2.waitKey()\n cv2.imwrite(\"2_x.png\", np.transpose(tex, [1, 0, 2]))\n", "from __future__ import print_function\nfrom __future__ import division\n\nfrom ..rasterizer import linear_rasterizer\nfrom ..utils import datanormalize\nfrom .fragment_shaders.frag_tex import fragmentshader\nfrom .vertex_shaders.perpsective import perspective_projection\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\n##################################################################\nclass TexRenderBatch(nn.Module):\n def __init__(self, height, width, filtering=\"nearest\"):\n super(TexRenderBatch, self).__init__()\n\n self.height = height\n self.width = width\n self.filtering = filtering\n\n def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):\n \"\"\"\n points: b x [points_1xpx3, faces_fx3]\n cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]\n uv_bxpx2: b x [1xpx2]\n texture_bx3xthxtw: b x [1x3xthxtw]\n ft_fx3: b x [fx3]\n \"\"\"\n b = len(points)\n assert b > 0, b\n points3d_1xfx9_list = []\n points2d_1xfx6_list = []\n normalz_1xfx1_list = []\n normal1_1xfx3_list = []\n uv_1xfx9_list = []\n\n single_intrinsic = True\n if cameras[2].ndim == 3:\n assert cameras[2].shape[0] == b\n single_intrinsic = False\n\n for i in range(b):\n ##############################################################\n # first, MVP projection in vertexshader\n points_1xpx3, faces_fx3 = points[i]\n if single_intrinsic:\n cam_params = [cameras[0][i : i + 1], cameras[1][i : i + 1], cameras[2]]\n else:\n cam_params = [cameras[0][i : i + 1], cameras[1][i : i + 1], cameras[2][i]]\n # use faces_fx3 as ft_fx3 if not given\n if ft_fx3 is None:\n ft_fx3_single = faces_fx3\n else:\n ft_fx3_single = ft_fx3[i]\n\n points3d_1xfx9, points2d_1xfx6, normal_1xfx3 = perspective_projection(points_1xpx3, faces_fx3, cam_params)\n\n ################################################################\n # normal\n\n # decide which faces are front and which faces are back\n normalz_1xfx1 = normal_1xfx3[:, :, 2:3]\n # normalz_bxfx1 = torch.abs(normalz_bxfx1)\n\n # normalize normal\n normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)\n\n ############################################################\n # second, rasterization\n uv_1xpx2 = uv_bxpx2[i]\n\n c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]\n c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]\n c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]\n mask = torch.ones_like(c0[:, :, :1])\n uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)\n\n # append data\n points3d_1xfx9_list.append(points3d_1xfx9)\n points2d_1xfx6_list.append(points2d_1xfx6)\n normalz_1xfx1_list.append(normalz_1xfx1)\n normal1_1xfx3_list.append(normal1_1xfx3)\n uv_1xfx9_list.append(uv_1xfx9)\n\n # put the object with larger depth earlier\n\n # imrender = torch.empty((1, self.height, self.width, 3), device=device, dtype=torch.float32)\n # improb_1xhxwx1 = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)\n # fg_mask = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)\n ren_ims = []\n ren_masks = []\n ren_probs = []\n for i in range(b):\n imfeat, improb_1xhxwx1_i = linear_rasterizer(\n self.width,\n self.height,\n points3d_1xfx9_list[i],\n points2d_1xfx6_list[i],\n normalz_1xfx1_list[i],\n uv_1xfx9_list[i],\n )\n imtexcoords = imfeat[:, :, :, :2] # (1,H,W,2)\n hardmask = imfeat[:, :, :, 2:3] # (1,H,W,1) mask\n # fragrement shader\n texture_1x3xthxtw = texture_bx3xthxtw[i]\n imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)\n ren_ims.append(imrender_i) # 1HW3\n ren_probs.append(improb_1xhxwx1_i)\n ren_masks.append(hardmask)\n\n imrender = torch.cat(ren_ims, dim=0) # bHW3\n improb_bxhxwx1 = torch.cat(ren_probs, dim=0)\n mask_bxhxwx1 = torch.cat(ren_masks, dim=0)\n # return imrender, improb_1xhxwx1, normal1_1xfx3_list\n return imrender, improb_bxhxwx1, normal1_1xfx3_list, mask_bxhxwx1\n" ]
[ [ "numpy.arctan2", "numpy.transpose", "numpy.zeros", "numpy.arccos", "numpy.stack", "numpy.linalg.norm" ], [ "torch.ones_like", "torch.cat" ] ]
gioramponi/LOGEL
[ "e862324816c57dd5d07691ee8583259a6a62116c" ]
[ "gridworld/lfl/mdp_utils.py" ]
[ "\"\"\"utils for entropy-regularized discrete MDPs.\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\n\n\ndef softmax(x, tau=1.):\n e = np.exp(x * tau)\n z = -np.log(sum(e))\n return np.exp(x * tau + z)\n\n\ndef score_policy(pi, r, p, alpha, gamma):\n \"\"\"Returns expected score J(pi) = v_pi(start) using soft policy evaluation.\"\"\"\n n_states, n_actions, _ = p.shape\n q_pi = np.random.rand(n_states, n_actions)\n v_pi = np.zeros(n_states)\n for _ in range(1000):\n v_pi = np.zeros(n_states)\n for state in range(n_states):\n for action_ in range(n_actions):\n v_pi[state] += pi[state, action_] * \\\n (q_pi[state, action_] - alpha * np.log(pi[state, action_]))\n\n q_pi *= 0\n for state in range(n_states):\n for action in range(n_actions):\n q_pi[state, action] = r[state, action]\n for state_ in range(n_states):\n q_pi[state, action] += gamma * p[state, action, state_] * v_pi[state_]\n\n j_pi = v_pi[0]\n return j_pi\n\n\ndef solve_entropy_regularized_mdp(r, p, alpha, gamma):\n \"\"\"Returns optimal (soft) policy pi* and score J(pi*).\"\"\"\n n_states, n_actions, _ = p.shape\n q = np.zeros((n_states, n_actions))\n v = np.log(np.sum(np.exp(q), 1))\n # <<<<<<< HEAD\n print(\"r, p: \", r.shape, p.shape)\n # =======\n #\n # >>>>>>> aed0552fe0dea9129b017edf7ec4b9d4c4dcf9f2\n for _ in range(1000):\n q = r + gamma * np.sum(p * np.tile(v, (n_states, n_actions, 1)), 2)\n v = alpha * np.log(np.sum(np.exp(q / alpha), 1))\n\n pi_star = np.zeros((n_states, n_actions))\n for state in range(n_states):\n pi_star[state, :] = softmax(q[state, :] / alpha)\n\n j_pi_star = v[0]\n return pi_star, j_pi_star\n\n\ndef sample_sa_trajectory(p, pi, length):\n \"\"\"Returns a trajectory sampled from the learner's policy pi.\"\"\"\n n_states, n_actions, _ = p.shape\n trajectory = []\n state = 0\n action = np.random.choice(range(n_actions), p=pi[state, :])\n for _ in range(length):\n new_state = np.random.choice(range(n_states), p=p[state, action, :])\n new_action = np.random.choice(range(n_actions), p=pi[new_state, :])\n trajectory.append((state, action))\n state = new_state\n action = new_action\n return trajectory\n\n\ndef sample_sar_trajectory(p, pi, r, length):\n \"\"\"Returns a trajectory sampled from the learner's policy pi.\"\"\"\n n_states, n_actions, _ = p.shape\n trajectory = []\n state = 0\n action = np.random.choice(range(n_actions), p=pi[state, :])\n for _ in range(length):\n new_state = np.random.choice(range(n_states), p=p[state, action, :])\n new_action = np.random.choice(range(n_actions), p=pi[new_state, :])\n trajectory.append((state, action, r[state, action]))\n state = new_state\n action = new_action\n return trajectory" ]
[ [ "numpy.tile", "numpy.zeros", "numpy.exp", "numpy.log", "numpy.random.rand" ] ]
b3ttin4/network_simulation_and_analysis
[ "56ec3fd497ad95eee6eec00042d332133495288e" ]
[ "network_model/tools/bn_tools_t.py" ]
[ "import numpy as np\n\n\n# Nonlinearity functions (Numpy implementation)\nnl_linear = lambda x: x\nnl_tanh = lambda x: np.tanh(x)\nnl_sigmoid = lambda x: 1./(1+np.exp(-x)) \nnl_rect = lambda x: np.clip(x, 0, np.inf)\n#nl_rect = lambda x: np.clip(x, -np.inf, np.inf)\nnl_shallow_rect = lambda x: np.clip(0.1*x, 0, np.inf)\nnl_clip = lambda x: np.clip(x, 0, 1)\nnl_softplus = lambda x: np.log(1. + np.exp(x)) #\n#'''\n# Nonlinearity functions (Theano implementation)\nimport numpy, theano\nimport numpy.distutils\nimport numpy.distutils.__config__\nimport theano.tensor as T\nnl_linear_t = lambda x: x\nnl_tanh_t = lambda x: T.tanh(x) \nnl_sigmoid_t = lambda x: T.nnet.sigmoid(x) \nnl_fermi_t = lambda x: T.nnet.sigmoid(x*50)\nnl_clip_t = lambda x: T.clip(x, 0., 1.)\nnl_rect_t = lambda x: T.maximum(x, 0.)\nnl_rect_squared_t = lambda x: T.maximum(x**2, 0.)\nnl_shallow_rect_t = lambda x: T.maximum(0.1*x, 0.)\n#'''\ndef convert_input_const_to_time(inp, num_frames):\n if inp.shape[0] != 1:\n raise Exception(\"First axis of inp has to be 1-dim.\")\n if inp.shape[1] != 1:\n inp = inp[:, 0:1, :]\n print('WARNING (bn_tools): Input has more than one frame. Only first frame will be broadcast.')\n \n inp = np.tile(inp, (1, num_frames, 1))\n return inp\n \ndef check_nonlinearities():\n import matplotlib.pyplot as plt\n x_np=np.arange(-5,5,0.1).astype('float32')\n x=theano.shared(x_np) \n# for fkt in [nl_linear_t,nl_rect_t,nl_clip_t,nl_sigmoid_t, nl_tanh_t]:\n for fkt in [nl_clip_t,nl_sigmoid_t]:\n\n y= fkt(x)\n tf = theano.function([],y)\n plt.plot(x_np, tf())\n plt.show()\n \nif __name__=='__main__':\n check_nonlinearities()\n" ]
[ [ "numpy.tile", "numpy.exp", "numpy.arange", "matplotlib.pyplot.show", "numpy.clip", "numpy.tanh" ] ]
project-k-0-1/project-k
[ "fa5be043a3c82daee992d28db25519e2b1b53289" ]
[ "sa_numeric.py" ]
[ "\"\"\" Numerical functions \"\"\"\nimport math\nimport numpy as np\nimport pymysql.cursors\nfrom sa_db import sa_db_access\nACCESS_OBJ = sa_db_access()\nDB_USR = ACCESS_OBJ.username()\nDB_PWD = ACCESS_OBJ.password()\nDB_NAME = ACCESS_OBJ.db_name()\nDB_SRV = ACCESS_OBJ.db_server()\n\ndef get_pct_change(ini_val, new_val):\n \"\"\" xxx \"\"\"\n if not new_val == 0:\n if new_val < ini_val:\n return_data = ((ini_val - new_val) / ini_val) * (-1)\n else:\n return_data = (new_val - ini_val) / new_val\n else:\n return_data = 0\n\n return return_data\n\n\ndef get_stdev(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with just one numerical value to compute standard deviation\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n list_data = list(cursor.fetchall())\n return_data = np.std(list_data)\n cursor.close()\n connection.close()\n\n return return_data\n\ndef get_volatility_risk(sql, is_portf, symbol):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with one numerical column to compute volatility risk\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n\n if is_portf:\n sql_i = \"SELECT account_reference FROM instruments WHERE symbol='\"+ str(symbol) +\"'\"\n cursor.execute(sql_i)\n res = cursor.fetchall()\n for row in res:\n reference = row[0]\n else:\n cursor.execute(sql)\n res = cursor.fetchall()\n for row in res:\n reference = row[0]\n cursor.close()\n connection.close()\n\n stdev = get_stdev(sql)\n ref_price = reference - stdev\n return_data = abs(get_pct_change(reference, ref_price))\n return return_data\n\ndef get_mdd(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with just one numerical value to compute maximum drawdown\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n res = cursor.fetchall()\n top = 0\n breset = math.pow(10, 100)\n bottom = breset\n pct_dd = 0\n cur_dd = 0\n for row in res:\n val = row[0]\n\n if val > top:\n top = val\n bottom = breset\n\n if val < bottom:\n bottom = val\n\n if bottom < top:\n cur_dd = abs(get_pct_change(bottom, top))\n else:\n cur_dd = 0\n\n if cur_dd > pct_dd:\n pct_dd = cur_dd\n cursor.close()\n connection.close()\n\n return_data = pct_dd\n return return_data\n\ndef get_romad(sql):\n \"\"\" xxx \"\"\"\n return_data = 0\n #sql with one column as numerical value to compute return on maximum drawdown\n #ordered by date ASC\n connection = pymysql.connect(host=DB_SRV,\n user=DB_USR,\n password=DB_PWD,\n db=DB_NAME,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cursor = connection.cursor(pymysql.cursors.SSCursor)\n cursor.execute(sql)\n res = cursor.fetchall()\n i = 0\n first = 0\n last = 0\n for row in res:\n if i == 0:\n first = row[0]\n last = row[0]\n i += 1\n cursor.close()\n connection.close()\n\n instrument_returns = get_pct_change(first, last)\n drawdown = get_mdd(sql)\n\n if drawdown >0:\n return_data = instrument_returns / drawdown\n else:\n return_data = 0\n\n return return_data\n" ]
[ [ "numpy.std" ] ]
parachutel/garage
[ "e9d4301278f5dd31e3cbd20df1422befa2d0b6c4" ]
[ "tests/benchmarks/test_benchmark_trpo.py" ]
[ "'''\nThis script creates a regression test over garage-TRPO and baselines-TRPO.\n\nUnlike garage, baselines doesn't set max_path_length. It keeps steps the action\nuntil it's done. So we introduced tests.wrappers.AutoStopEnv wrapper to set\ndone=True when it reaches max_path_length. We also need to change the\ngarage.tf.samplers.BatchSampler to smooth the reward curve.\n'''\nimport datetime\nimport os.path as osp\nimport random\n\nfrom baselines import logger as baselines_logger\nfrom baselines.bench import benchmarks\nfrom baselines.common.tf_util import _PLACEHOLDER_CACHE\nfrom baselines.ppo1.mlp_policy import MlpPolicy\nfrom baselines.trpo_mpi import trpo_mpi\nimport dowel\nfrom dowel import logger as dowel_logger\nimport gym\nimport pytest\nimport tensorflow as tf\n\nfrom garage.envs import normalize\nfrom garage.experiment import deterministic\nfrom garage.tf.algos import TRPO\nfrom garage.tf.baselines import GaussianMLPBaseline\nfrom garage.tf.envs import TfEnv\nfrom garage.tf.experiment import LocalTFRunner\nfrom garage.tf.policies import GaussianMLPPolicy\nimport tests.helpers as Rh\nfrom tests.wrappers import AutoStopEnv\n\n\nclass TestBenchmarkPPO:\n '''Compare benchmarks between garage and baselines.'''\n\n @pytest.mark.huge\n def test_benchmark_trpo(self):\n '''\n Compare benchmarks between garage and baselines.\n\n :return:\n '''\n mujoco1m = benchmarks.get_benchmark('Mujoco1M')\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')\n benchmark_dir = './data/local/benchmarks/trpo/%s/' % timestamp\n result_json = {}\n for task in mujoco1m['tasks']:\n env_id = task['env_id']\n env = gym.make(env_id)\n baseline_env = AutoStopEnv(env_name=env_id, max_path_length=100)\n\n seeds = random.sample(range(100), task['trials'])\n\n task_dir = osp.join(benchmark_dir, env_id)\n plt_file = osp.join(benchmark_dir,\n '{}_benchmark.png'.format(env_id))\n baselines_csvs = []\n garage_csvs = []\n\n for trial in range(task['trials']):\n _PLACEHOLDER_CACHE.clear()\n seed = seeds[trial]\n\n trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)\n garage_dir = trial_dir + '/garage'\n baselines_dir = trial_dir + '/baselines'\n\n with tf.Graph().as_default():\n # Run garage algorithms\n env.reset()\n garage_csv = run_garage(env, seed, garage_dir)\n\n # Run baseline algorithms\n baseline_env.reset()\n baselines_csv = run_baselines(baseline_env, seed,\n baselines_dir)\n\n garage_csvs.append(garage_csv)\n baselines_csvs.append(baselines_csv)\n\n Rh.plot(\n b_csvs=baselines_csvs,\n g_csvs=garage_csvs,\n g_x='Iteration',\n g_y='AverageReturn',\n b_x='EpThisIter',\n b_y='EpRewMean',\n trials=task['trials'],\n seeds=seeds,\n plt_file=plt_file,\n env_id=env_id,\n x_label='Iteration',\n y_label='AverageReturn')\n\n result_json[env_id] = Rh.create_json(\n b_csvs=baselines_csvs,\n g_csvs=garage_csvs,\n seeds=seeds,\n trails=task['trials'],\n g_x='Iteration',\n g_y='AverageReturn',\n b_x='TimestepsSoFar',\n b_y='EpRewMean',\n factor_g=1024,\n factor_b=1)\n env.close()\n\n Rh.write_file(result_json, 'TRPO')\n\n\ndef run_garage(env, seed, log_dir):\n '''\n Create garage model and training.\n\n Replace the trpo with the algorithm you want to run.\n\n :param env: Environment of the task.\n :param seed: Random seed for the trial.\n :param log_dir: Log dir path.\n :return:import baselines.common.tf_util as U\n '''\n deterministic.set_seed(seed)\n\n with LocalTFRunner() as runner:\n env = TfEnv(normalize(env))\n\n policy = GaussianMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n output_nonlinearity=None,\n )\n\n baseline = GaussianMLPBaseline(\n env_spec=env.spec,\n regressor_args=dict(\n hidden_sizes=(32, 32),\n use_trust_region=True,\n ),\n )\n\n algo = TRPO(\n env_spec=env.spec,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n gae_lambda=0.98,\n max_kl_step=0.01,\n policy_ent_coeff=0.0,\n )\n\n # Set up logger since we are not using run_experiment\n tabular_log_file = osp.join(log_dir, 'progress.csv')\n dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))\n dowel_logger.add_output(dowel.StdOutput())\n dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))\n\n runner.setup(algo, env)\n runner.train(n_epochs=976, batch_size=1024)\n\n dowel_logger.remove_all()\n\n return tabular_log_file\n\n\ndef run_baselines(env, seed, log_dir):\n '''\n Create baselines model and training.\n\n Replace the trpo and its training with the algorithm you want to run.\n\n :param env: Environment of the task.\n :param seed: Random seed for the trial.\n :param log_dir: Log dir path.\n :return\n '''\n with tf.compat.v1.Session().as_default():\n baselines_logger.configure(log_dir)\n\n def policy_fn(name, ob_space, ac_space):\n return MlpPolicy(\n name=name,\n ob_space=ob_space,\n ac_space=ac_space,\n hid_size=32,\n num_hid_layers=2)\n\n trpo_mpi.learn(\n env,\n policy_fn,\n timesteps_per_batch=1024,\n max_kl=0.01,\n cg_iters=10,\n cg_damping=0.1,\n max_timesteps=int(1e6),\n gamma=0.99,\n lam=0.98,\n vf_iters=5,\n vf_stepsize=1e-3)\n env.close()\n\n return osp.join(log_dir, 'progress.csv')\n" ]
[ [ "tensorflow.Graph", "tensorflow.compat.v1.Session" ] ]
zahrag/3DHARSOM
[ "f934d0b5786d2edac29a7a18be31fa74aafcb881" ]
[ "codes/SOM.py" ]
[ "\n\"\"\"\n Author: Zahra Gharaee.\n This code is written for the 3D-Human-Action-Recognition Project, started March 14 2014.\n \"\"\"\n\nimport numpy as np\nfrom numpy import linalg as LA\n\n\nclass SOM:\n\n def __init__(self, learning, outputsize_x, outputsize_y, inputsize, sigma, softmax_exponent, max_epoch):\n\n self.name = 'SOM'\n self.learning = learning\n self.outputsize_x = outputsize_x\n self.outputsize_y = outputsize_y\n self.inputsize = inputsize\n self.sigma = sigma\n self.softmax_exponent = softmax_exponent\n self.max_epoch = max_epoch\n self.metric = 'Euclidean'\n self.normalize_input = False\n self.normalize_weights = False\n self.softmax_normalization = True\n self.neighborhood_decay = 0.9999\n self.neighborhood_min = 1\n self.learningRate = 0.1\n self.learningRate_decay = 0.9999\n self.learningRate_min = 0.01\n self.neighborhood_radius = outputsize_x\n self.node_map = np.zeros((outputsize_x, outputsize_y, 2))\n self.weights = np.random.rand(outputsize_x, outputsize_y, inputsize) # Rows, Columns, Depth\n\n for i in range(outputsize_x):\n for j in range(outputsize_y):\n self.node_map[i, j, 0] = i\n self.node_map[i, j, 1] = j\n\n def normalize(self, state):\n\n if self.normalize_input:\n state /= LA.norm(np.expand_dims(state, axis=0))\n\n return state\n\n def soft_max_normalization(self, state):\n\n m = np.max(state)\n if m != 0:\n state /= m\n\n return state\n\n def set_activity(self, state):\n\n if self.metric == 'Euclidean':\n dist = np.sum((state - self.weights) ** 2, axis=2)\n activity = np.exp(-dist / self.sigma)\n\n else:\n # Scalar Product\n mat_mul = state * self.weights\n activity = mat_mul.sum(axis=2)\n\n if self.softmax_exponent != 1:\n activity = activity ** self.softmax_exponent\n\n if self.softmax_normalization:\n activity = self.soft_max_normalization(activity)\n\n return activity\n\n def find_winning_node(self, activity):\n\n winner_x, winner_y = np.unravel_index(np.argmax(activity, axis=None), activity.shape)\n winning_node = np.array([winner_x, winner_y])\n\n return winning_node\n\n def learn(self, state, winner):\n\n dis = np.sum((self.node_map - winner) ** 2, axis=2)\n gus = np.exp(-dis / (2 * self.neighborhood_radius ** 2))\n err = state - self.weights\n self.weights += self.learningRate * (err.T * gus.T).T\n\n def learning_decay(self):\n\n self.learningRate *= self.learningRate_decay\n if self.learningRate < self.learningRate_min:\n self.learningRate = self.learningRate_min\n\n self.neighborhood_radius *= self.neighborhood_decay\n if self.neighborhood_radius < self.neighborhood_min:\n self.neighborhood_radius = self.neighborhood_min\n\n def run_SOM(self, state):\n\n state = self.normalize(state)\n\n activity = self.set_activity(state)\n\n winner = self.find_winning_node(activity)\n\n if self.learning:\n self.learn(state, winner)\n self.learning_decay()\n\n return activity, winner\n\n\n\n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.exp", "numpy.argmax", "numpy.max", "numpy.expand_dims", "numpy.random.rand", "numpy.array" ] ]
JPLMLIA/libeos
[ "3ad25c22159edf79d407454e32b8f07333cb57c2" ]
[ "pims/els_data.py" ]
[ "# Cassini CAPS ELS data reader\n# Modeled after Gary's MDIS reader\n# Kiri Wagstaff, 11/28/18\n\nimport os\nfrom datetime import datetime\nfrom collections import defaultdict\nimport numpy as np\nfrom pds.core.parser import Parser\nfrom scipy.interpolate import interp1d\n\nGEOMFILE = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'ref',\n 'geometricfactor.npz'\n)\n_EARRAY = None\n_GEOM = None\n\nE_CHARGE_COULOMBS = 1.602176487e-19\nE_MASS_KG = 9.10938188e-31\n\ndef _load_gfactors():\n \"\"\"\n Using global variables here because we only want to read these values from\n file once, then cache them at the module level\n \"\"\"\n global _EARRAY\n global _GEOM\n if _EARRAY is None:\n sav = np.load(GEOMFILE)\n _EARRAY = sav['earray']\n _GEOM = sav['geom']\n\ndef needs_gfactors(f):\n \"\"\"\n Decorator for any function that needs to have the geometric factors loaded\n first (calls `_load_gfactors` prior to calling the function).\n \"\"\"\n def fprime(*args, **kwargs):\n _load_gfactors()\n return f(*args, **kwargs)\n return fprime\n\n@needs_gfactors\ndef compute_def(e, counts):\n \"\"\"\n Computes the Differential Energy Flux (DEF)\n Units: m^-2 sr^-1 s^-1\n\n According to Abi's script and the CAPS User Guide, this is done by dividing\n the counts by the anode- and energy-specific geometric factors.\n \"\"\"\n\n # According to section 9.2 of the CAPS PDS User Guide, the proper thing to\n # do is interpolate the geometric factors: \"If the ELS data record you are\n # working with has energy summing ... then you can use the above table to\n # interpolate the value you need for G.\"\n geom_interp = interp1d(\n _EARRAY, _GEOM, axis=0,\n fill_value='extrapolate',\n bounds_error=False,\n assume_sorted=True,\n )\n G = geom_interp(e)\n\n # newaxis is for the \"phi\" dimension of the data\n return counts / G[..., np.newaxis]\n\ndef compute_dnf(e, def_data):\n \"\"\"\n Computes the Differential Number Flux (DNF)\n Units: m^-2 sr^-1 s^-1 J^-1\n\n Following Abi's script and the CAPS User Guide, this is the DEF divided by\n the product of the energy and the charge of the particle (electron).\n \"\"\"\n # Add the new axes to broadcast across the theta/phi dimensions\n return def_data / (E_CHARGE_COULOMBS*e[..., np.newaxis, np.newaxis])\n\ndef compute_psd(e, def_data):\n \"\"\"\n Computes the Phase Space Density (PSD)\n Units: m^-6 s^-3\n\n Following Abi's script and the CAPS User Guide, this is the DEF times a\n factor of (mass^2 / (2 q^2 E^2)).\n the product of the energy and the charge of the particle (electron).\n \"\"\"\n qE_squared = (E_CHARGE_COULOMBS*e)**2\n # Add the new axes to broadcast across the theta/phi dimensions\n return (\n def_data * (E_MASS_KG**2) /\n (2 * qE_squared[..., np.newaxis, np.newaxis])\n )\n\ndef parse_dates(datearray):\n return np.array([\n datetime.strptime(row.tostring(), '%Y-%jT%H:%M:%S.%f')\n for row in datearray\n ])\n\ndef reshape_data(data):\n # Dimensions taken from ELS_V01.FMT\n # (records, energy, theta, phi)\n return data.reshape((-1, 63, 8, 1))\n\nclass ELS(object):\n\n COLUMNS = (\n # Values obtained from ELS_V01.FMT\n # Name, start byte, dtype, items, missing constant\n ('start_date', 1, np.uint8, 21, None),\n ('dead_time_method', 22, np.uint8, 1, None),\n ('record_dur', 25, np.float32, 1, 65535.0),\n ('acc_time', 29, np.float32, 63, 65535.0),\n ('data', 281, np.float32, 504, 65535.0),\n ('dim1_e', 2297, np.float32, 63, 65535.0),\n ('dim1_e_upper', 2549, np.float32, 63, 65535.0),\n ('dim1_e_lower', 2801, np.float32, 63, 65535.0),\n ('dim2_theta', 3053, np.float32, 8, 65535.0),\n ('dim2_theta_upper', 3085, np.float32, 8, 65535.0),\n ('dim2_theta_lower', 3117, np.float32, 8, 65535.0),\n ('dim3_phi', 3149, np.float32, 1, 65535.0),\n ('dim3_phi_upper', 3153, np.float32, 1, 65535.0),\n ('dim3_phi_lower', 3157, np.float32, 1, 65535.0),\n )\n\n POSTPROCESS = {\n 'start_date': parse_dates,\n 'data': reshape_data,\n }\n\n def __init__(self, data_path, lbl_path=None, verbose=False):\n \"\"\"\n If the LBL file path is not specified, we'll assume that it is\n sitting right next to the DAT file (and raise an Error if not).\n \"\"\"\n self.data_path = data_path\n if lbl_path is None:\n # Infer the LBL path if not supplied\n data_base, data_ext = os.path.splitext(data_path)\n if data_ext.lower() == data_ext:\n lbl_path = data_base + '.lbl'\n else:\n lbl_path = data_base + '.LBL'\n\n if not os.path.exists(lbl_path):\n raise ValueError('Expected LBL file \"%s\" does not exist' % lbl_path)\n\n self.lbl_path = lbl_path\n self.verbose = verbose\n\n self._load()\n\n def _log(self, msg):\n if self.verbose:\n print(msg)\n\n def _load(self):\n with open(self.lbl_path, 'r') as f:\n parser = Parser()\n labels = parser.parse(f)\n\n record_bytes = int(labels['RECORD_BYTES'])\n nrecords = int(labels['FILE_RECORDS'])\n\n columns = defaultdict(list)\n with open(self.data_path, 'rb') as f:\n for i in range(nrecords):\n for cname, cstart, ctype, citems, _ in ELS.COLUMNS:\n # Subtract 1 because they are indexed from 1 in the .FMT\n f.seek(i*record_bytes + cstart - 1)\n columns[cname].append(f.read(np.dtype(ctype).itemsize*citems))\n\n for cname, _, ctype, citems, missing in ELS.COLUMNS:\n cstr = ''.join(columns[cname])\n col = np.fromstring(cstr, dtype=ctype, count=nrecords*citems)\n col = np.squeeze(col.reshape((nrecords, citems)))\n\n # Replace missing value with NaN\n if missing is not None:\n col[col == missing] = np.nan\n\n # Apply post-processing steps to appropriate columns\n if cname in ELS.POSTPROCESS:\n col = ELS.POSTPROCESS[cname](col)\n\n # Store column as object attribute\n setattr(self, cname, col)\n\n # Add iso_data by summing across theta/phi\n self.iso_data = np.sum(self.data, axis=(-2, -1))\n\n # Compute DEF, DNF, and PSD\n self.def_data = compute_def(self.dim1_e, self.data)\n self.dnf_data = compute_dnf(self.dim1_e, self.def_data)\n self.psd_data = compute_psd(self.dim1_e, self.def_data)\n" ]
[ [ "numpy.load", "numpy.sum", "scipy.interpolate.interp1d", "numpy.dtype", "numpy.fromstring" ] ]
Shuai-Xie/openseg.pytorch
[ "79116a58782ccd2150f9eb9054e70cfd42fc9773" ]
[ "lib/loss/loss_helper.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Donny You, RainbowSecret\n## Microsoft Research\n## [email protected]\n## Copyright (c) 2019\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pdb\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom lib.utils.tools.logger import Logger as Log\n\n\nclass WeightedFSOhemCELoss(nn.Module):\n def __init__(self, configer):\n super().__init__()\n self.configer = configer\n self.thresh = self.configer.get('loss', 'params')['ohem_thresh']\n self.reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n self.reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n def forward(self, predict, target, min_kept=1, weight=None, ignore_index=-1, **kwargs):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n \"\"\"\n prob_out = F.softmax(predict, dim=1)\n tmp_target = target.clone()\n tmp_target[tmp_target == ignore_index] = 0\n prob = prob_out.gather(1, tmp_target.unsqueeze(1))\n mask = target.contiguous().view(-1,) != ignore_index\n sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()\n min_threshold = sort_prob[min(min_kept, sort_prob.numel() - 1)]\n threshold = max(min_threshold, self.thresh)\n loss_matrix = F.cross_entropy(predict, target, weight=weight, ignore_index=ignore_index, reduction='none').contiguous().view(-1,)\n sort_loss_matrix = loss_matrix[mask][sort_indices]\n select_loss_matrix = sort_loss_matrix[sort_prob < threshold]\n if self.reduction == 'sum':\n return select_loss_matrix.sum()\n elif self.reduction == 'elementwise_mean':\n return select_loss_matrix.mean()\n else:\n raise NotImplementedError('Reduction Error!')\n\n\n# Cross-entropy Loss\nclass FSCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSCELoss, self).__init__()\n self.configer = configer\n weight = None\n if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):\n weight = self.configer.get('loss', 'params')['ce_weight']\n weight = torch.FloatTensor(weight).cuda()\n\n reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n ignore_index = -1\n if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):\n ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']\n\n self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)\n\n def forward(self, inputs, *targets, weights=None, **kwargs):\n loss = 0.0\n if isinstance(inputs, tuple) or isinstance(inputs, list):\n if weights is None:\n weights = [1.0] * len(inputs)\n\n for i in range(len(inputs)):\n if len(targets) > 1:\n target = self._scale_target(targets[i], (inputs[i].size(2), inputs[i].size(3)))\n loss += weights[i] * self.ce_loss(inputs[i], target)\n else:\n target = self._scale_target(targets[0], (inputs[i].size(2), inputs[i].size(3)))\n loss += weights[i] * self.ce_loss(inputs[i], target)\n\n else:\n target = self._scale_target(targets[0], (inputs.size(2), inputs.size(3)))\n loss = self.ce_loss(inputs, target)\n\n return loss\n\n @staticmethod\n def _scale_target(targets_, scaled_size):\n targets = targets_.clone().unsqueeze(1).float()\n targets = F.interpolate(targets, size=scaled_size, mode='nearest')\n return targets.squeeze(1).long()\n\n\nclass FSOhemCELoss(nn.Module):\n def __init__(self, configer):\n super(FSOhemCELoss, self).__init__()\n self.configer = configer\n self.thresh = self.configer.get('loss', 'params')['ohem_thresh']\n self.min_kept = max(1, self.configer.get('loss', 'params')['ohem_minkeep'])\n weight = None\n if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):\n weight = self.configer.get('loss', 'params')['ce_weight']\n weight = torch.FloatTensor(weight).cuda()\n\n self.reduction = 'elementwise_mean'\n if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):\n self.reduction = self.configer.get('loss', 'params')['ce_reduction']\n\n ignore_index = -1\n if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):\n ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']\n\n self.ignore_label = ignore_index\n self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction='none')\n\n def forward(self, predict, target, **kwargs):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n weight (Tensor, optional): a manual rescaling weight given to each class.\n If given, has to be a Tensor of size \"nclasses\"\n \"\"\"\n prob_out = F.softmax(predict, dim=1)\n tmp_target = target.clone()\n tmp_target[tmp_target == self.ignore_label] = 0\n prob = prob_out.gather(1, tmp_target.unsqueeze(1))\n mask = target.contiguous().view(-1,) != self.ignore_label\n sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()\n min_threshold = sort_prob[min(self.min_kept, sort_prob.numel() - 1)]\n threshold = max(min_threshold, self.thresh)\n loss_matirx = self.ce_loss(predict, target).contiguous().view(-1,)\n sort_loss_matirx = loss_matirx[mask][sort_indices]\n select_loss_matrix = sort_loss_matirx[sort_prob < threshold]\n if self.reduction == 'sum':\n return select_loss_matrix.sum()\n elif self.reduction == 'elementwise_mean':\n return select_loss_matrix.mean()\n else:\n raise NotImplementedError('Reduction Error!')\n\n\nclass FSAuxOhemCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSAuxOhemCELoss, self).__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n if self.configer.get('loss', 'loss_type') == 'fs_auxohemce_loss':\n self.ohem_ce_loss = FSOhemCELoss(self.configer)\n else:\n assert self.configer.get('loss', 'loss_type') == 'fs_auxslowohemce_loss'\n self.ohem_ce_loss = FSSlowOhemCELoss(self.configer)\n\n def forward(self, inputs, targets, **kwargs):\n aux_out, seg_out = inputs\n seg_loss = self.ohem_ce_loss(seg_out, targets)\n aux_loss = self.ce_loss(aux_out, targets)\n loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss\n loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss\n return loss\n\n\nclass FSAuxCELoss(nn.Module):\n def __init__(self, configer=None):\n super(FSAuxCELoss, self).__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n\n def forward(self, inputs, targets, **kwargs):\n aux_out, seg_out = inputs\n seg_loss = self.ce_loss(seg_out, targets)\n aux_loss = self.ce_loss(aux_out, targets)\n loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss\n loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss\n return loss\n\n\nclass SegFixLoss(nn.Module):\n \"\"\"\n We predict a binary mask to categorize the boundary pixels as class 1 and otherwise as class 0\n Based on the pixels predicted as 1 within the binary mask, we further predict the direction for these\n pixels.\n \"\"\"\n\n def __init__(self, configer=None):\n super().__init__()\n self.configer = configer\n self.ce_loss = FSCELoss(self.configer)\n\n def calc_weights(self, label_map, num_classes):\n\n weights = []\n for i in range(num_classes):\n weights.append((label_map == i).sum().data)\n weights = torch.FloatTensor(weights)\n weights_sum = weights.sum()\n return (1 - weights / weights_sum).cuda() \n\n def forward(self, inputs, targets, **kwargs):\n\n from lib.utils.helpers.offset_helper import DTOffsetHelper\n\n pred_mask, pred_direction = inputs\n\n seg_label_map, distance_map, angle_map = targets[0], targets[1], targets[2]\n gt_mask = DTOffsetHelper.distance_to_mask_label(distance_map, seg_label_map, return_tensor=True)\n\n gt_size = gt_mask.shape[1:]\n mask_weights = self.calc_weights(gt_mask, 2)\n\n pred_direction = F.interpolate(pred_direction, size=gt_size, mode=\"bilinear\", align_corners=True)\n pred_mask = F.interpolate(pred_mask, size=gt_size, mode=\"bilinear\", align_corners=True)\n mask_loss = F.cross_entropy(pred_mask, gt_mask, weight=mask_weights, ignore_index=-1)\n\n mask_threshold = float(os.environ.get('mask_threshold', 0.5))\n binary_pred_mask = torch.softmax(pred_mask, dim=1)[:, 1, :, :] > mask_threshold\n\n gt_direction = DTOffsetHelper.angle_to_direction_label(\n angle_map,\n seg_label_map=seg_label_map,\n extra_ignore_mask=(binary_pred_mask == 0),\n return_tensor=True\n )\n\n direction_loss_mask = gt_direction != -1\n direction_weights = self.calc_weights(gt_direction[direction_loss_mask], pred_direction.size(1))\n direction_loss = F.cross_entropy(pred_direction, gt_direction, weight=direction_weights, ignore_index=-1)\n\n if self.training \\\n and self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0 \\\n and torch.cuda.current_device() == 0:\n Log.info('mask loss: {} direction loss: {}.'.format(mask_loss, direction_loss))\n\n mask_weight = float(os.environ.get('mask_weight', 1))\n direction_weight = float(os.environ.get('direction_weight', 1))\n\n return mask_weight * mask_loss + direction_weight * direction_loss" ]
[ [ "torch.FloatTensor", "torch.nn.functional.softmax", "torch.nn.CrossEntropyLoss", "torch.cuda.current_device", "torch.nn.functional.cross_entropy", "torch.softmax", "torch.nn.functional.interpolate" ] ]
paulfioravanti/Reinforcement-Learning-In-Motion
[ "e09afd23b82040d76c95875b077ba0a5af517470" ]
[ "Unit-7-The-Cartpole/q_learning.py" ]
[ "import gym\nimport numpy as np\nfrom util import plot_running_average\n\n# pylint: disable-msg=redefined-outer-name\ndef max_action(estimates, state):\n values = np.array([estimates[state, i] for i in range(2)])\n action = np.argmax(values)\n return action\n\ndef get_state(observation):\n cart_x, cart_x_dot, cart_theta, cart_theta_dot = observation\n cart_x = int(np.digitize(cart_x, CART_POS_SPACE))\n cart_x_dot = int(np.digitize(cart_x_dot, CART_VEL_SPACE))\n cart_theta = int(np.digitize(cart_theta, POLE_THETA_SPACE))\n cart_theta_dot = int(np.digitize(cart_theta_dot, POLE_THETA_VEL_SPACE))\n\n return (cart_x, cart_x_dot, cart_theta, cart_theta_dot)\n\n# discretize the spaces\nPOLE_THETA_SPACE = np.linspace(-0.20943951, 0.20943951, 10)\nPOLE_THETA_VEL_SPACE = np.linspace(-4, 4, 10)\nCART_POS_SPACE = np.linspace(-2.4, 2.4, 10)\nCART_VEL_SPACE = np.linspace(-4, 4, 10)\n\nif __name__ == \"__main__\":\n ENV = gym.make(\"CartPole-v0\")\n # model hyperparameters\n STEP_SIZE = 0.1\n DISCOUNT = 1.0\n EPSILON = 1.0\n\n # construct state space\n STATES = []\n for i in range(len(CART_POS_SPACE) + 1):\n for j in range(len(CART_VEL_SPACE) + 1):\n for k in range(len(POLE_THETA_SPACE) + 1):\n for l in range(len(POLE_THETA_VEL_SPACE) + 1):\n STATES.append((i, j, k, l))\n\n ESTIMATES = {}\n for state in STATES:\n for action in range(2):\n ESTIMATES[state, action] = 0\n\n NUM_EPISODES = 50000\n REPORT_INTERVAL = 5000\n TOTAL_REWARDS = np.zeros(NUM_EPISODES)\n for i in range(NUM_EPISODES):\n if i % REPORT_INTERVAL == 0:\n print(\"starting game \", i)\n done = False\n episode_rewards = 0\n observation = ENV.reset()\n while not done:\n state = get_state(observation)\n rand = np.random.random()\n if rand < (1 - EPSILON):\n action = max_action(ESTIMATES, state)\n else:\n action = ENV.action_space.sample()\n observation_, reward, done, info = ENV.step(action)\n episode_rewards += reward\n state_ = get_state(observation_)\n action_ = max_action(ESTIMATES, state_)\n ESTIMATES[state, action] = (\n ESTIMATES[state, action] + STEP_SIZE\n * (\n reward + DISCOUNT\n * ESTIMATES[state_, action_] - ESTIMATES[state, action]\n )\n )\n observation = observation_\n if EPSILON - 2 / NUM_EPISODES > 0:\n EPSILON -= 2 / NUM_EPISODES\n else:\n EPSILON = 0\n TOTAL_REWARDS[i] = episode_rewards\n\n plot_running_average(TOTAL_REWARDS)\n" ]
[ [ "numpy.zeros", "numpy.argmax", "numpy.random.random", "numpy.linspace", "numpy.digitize" ] ]
vardhanaleti/AdversarialQuerying
[ "f2ed5960f345ba448eeb4c9a1f5c819c41d092da" ]
[ "models/R2D2_embedding.py" ]
[ "import torch.nn as nn\nimport torch\nimport math\n\n# Embedding network used in Meta-learning with differentiable closed-form solvers\n# (Bertinetto et al., in submission to NIPS 2018).\n# They call the ridge rigressor version as \"Ridge Regression Differentiable Discriminator (R2D2).\"\n \n# Note that they use a peculiar ordering of functions, namely conv-BN-pooling-lrelu,\n# as opposed to the conventional one (conv-BN-lrelu-pooling).\n \ndef R2D2_conv_block(in_channels, out_channels, retain_activation=True, keep_prob=1.0, activation='LeakyReLU'):\n block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.MaxPool2d(2)\n )\n if retain_activation:\n if activation == 'LeakyReLU':\n block.add_module(\"LeakyReLU\", nn.LeakyReLU(0.1))\n elif activation == 'ReLU':\n block.add_module(\"ReLU\", nn.ReLU())\n elif activation == 'Softplus':\n block.add_module(\"Softplus\", nn.Softplus())\n\n if keep_prob < 1.0:\n block.add_module(\"Dropout\", nn.Dropout(p=1 - keep_prob, inplace=False))\n\n return block\n\nclass R2D2Embedding(nn.Module):\n def __init__(self, x_dim=3, h1_dim=96, h2_dim=192, h3_dim=384, z_dim=512, \\\n retain_last_activation=False, denoise = False, activation='LeakyReLU'):\n super(R2D2Embedding, self).__init__()\n\n self.block1 = R2D2_conv_block(x_dim, h1_dim, activation=activation)\n self.block2 = R2D2_conv_block(h1_dim, h2_dim, activation=activation)\n self.block3 = R2D2_conv_block(h2_dim, h3_dim, keep_prob=0.9, activation=activation)\n self.denoise = denoise\n # In the last conv block, we disable activation function to boost the classification accuracy.\n # This trick was proposed by Gidaris et al. (CVPR 2018).\n # With this trick, the accuracy goes up from 50% to 51%.\n # Although the authors of R2D2 did not mention this trick in the paper,\n # we were unable to reproduce the result of Bertinetto et al. without resorting to this trick.\n self.block4 = R2D2_conv_block(h3_dim, z_dim, retain_activation=retain_last_activation, keep_prob=0.7)\n \n def forward(self, x):\n b1 = self.block1(x)\n b2 = self.block2(b1)\n if self.denoise:\n #print(\"before denoise\", b2.size())\n _, n_in, H, W = b2.size()\n theta = nn.Conv2d(n_in, int(n_in / 2), 1,\n stride=1, bias=False).to('cuda')\n phi = nn.Conv2d(n_in, int(n_in / 2), 1,\n stride=1, bias=False).to('cuda')\n g = b2\n f = torch.einsum('niab,nicd->nabcd', theta(b2), phi(b2))\n orig_shape = f.size()\n f = torch.reshape(f, (-1, H * W, H * W))\n f = f / math.sqrt(n_in)\n softmax = torch.nn.Softmax(dim = 0)\n f = softmax(f)\n f = torch.reshape(f, orig_shape)\n f = torch.einsum('nabcd,nicd->niab', f, g)\n final_conv = nn.Conv2d(f.size()[1], f.size()[1], 1, stride=1, bias=False).to('cuda')\n f = final_conv(f)\n b2 = b2 + f\n #print(\"after denoise\", b2.size())\n b3 = self.block3(b2)\n b4 = self.block4(b3)\n # Flatten and concatenate the output of the 3rd and 4th conv blocks as proposed in R2D2 paper.\n return torch.cat((b3.view(b3.size(0), -1), b4.view(b4.size(0), -1)), 1)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Softmax", "torch.reshape", "torch.nn.Conv2d", "torch.einsum", "torch.nn.Softplus", "torch.nn.ReLU", "torch.nn.Dropout", "torch.nn.LeakyReLU" ] ]
brianjo/pytorch
[ "fd8004b42e2a2348ec8837e3fb524b960c1b4cdb" ]
[ "torch/testing/_internal/distributed/distributed_test.py" ]
[ "import copy\nimport itertools\nimport math\nimport os\nimport random\nimport sys\nimport tempfile\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager, suppress\nfrom datetime import timedelta\nfrom functools import reduce\nfrom typing import Union, NamedTuple, Callable, Any\n\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\nimport torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\nimport torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD\nimport torch.distributed.algorithms.model_averaging.averagers as averagers\nimport torch.distributed.algorithms.model_averaging.utils as model_averaging_utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR\nfrom torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default\nfrom torch.distributed.algorithms.ddp_comm_hooks import (\n quantization as quantization_hooks,\n)\nfrom torch.distributed.distributed_c10d import (\n get_world_size,\n _get_default_group,\n AllreduceOptions,\n GroupMember,\n)\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n TEST_SKIPS,\n initialize_temp_directories,\n cleanup_temp_dir,\n simple_sparse_reduce_tests,\n skip_if_rocm,\n skip_if_small_worldsize,\n skip_if_lt_x_gpu,\n nccl_skip_if_lt_x_gpu,\n skip_if_no_gpu,\n require_n_gpus_for_nccl_backend,\n requires_nccl_version,\n captured_output,\n with_nccl_blocking_wait,\n with_dist_debug_levels,\n verify_ddp_error_logged,\n)\nfrom torch.testing._internal.common_utils import (\n IS_MACOS,\n IS_WINDOWS,\n FILE_SCHEMA,\n IS_FBCODE,\n NO_MULTIPROCESSING_SPAWN,\n sandcastle_skip,\n sandcastle_skip_if,\n)\n\nif not IS_WINDOWS:\n import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer\n from torch.distributed.optim.functional_sgd import _FunctionalSGD\n\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n import torchvision\n\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\n\nif sys.platform == \"win32\":\n import msvcrt\nelse:\n import fcntl\n\n\nclass Foo:\n def __init__(self, x):\n # Can be tensor or int\n self.x = x\n\n def __eq__(self, other):\n def eq(value, other):\n if isinstance(value, torch.Tensor):\n return torch.equal(value, other)\n return value == other\n\n for attr, value in self.__dict__.items():\n other_value = other.__dict__[attr]\n if not eq(value, other_value):\n return False\n return True\n\n\nf = Foo(10)\nf.bar = 1\n\nfoo_cpu_tensor = Foo(torch.randn(3, 3))\n\n\nCOLLECTIVES_OBJECT_TEST_LIST = [\n {\"key1\": 3, \"key2\": 4, \"key3\": {\"nested\": True}},\n f,\n foo_cpu_tensor,\n \"foo\",\n [1, 2, True, \"string\", [4, 5, \"nested\"]],\n]\n\n# Allowlist of distributed backends where profiling collectives is supported.\nPROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.NCCL,\n dist.Backend.GLOO,\n dist.Backend.MPI,\n]\n\n# Allowlist of distributed backends where profiling is supported with use_cuda=True\nCUDA_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.GLOO,\n dist.Backend.MPI,\n dist.Backend.NCCL,\n]\n\n# Allowlist of distributed backends where profiling is supported for p2p ops\nSEND_RECV_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.MPI,\n dist.Backend.GLOO,\n dist.Backend.NCCL,\n]\n\n# Dummy NamedTuple data structures to test DDP support for NamedTuple types.\nEXPECTED_FIELDS = (\"a\", \"b\")\nTestNamedTupleInput_0 = namedtuple(\"NamedTuple\", EXPECTED_FIELDS)\n\n\nclass TestNamedTupleInput_1(NamedTuple):\n a: torch.tensor\n b: torch.tensor\n\n\nskipIfNoTorchVision = sandcastle_skip_if(not HAS_TORCHVISION, \"no torchvision\")\n\nBACKEND = os.environ[\"BACKEND\"]\nINIT_METHOD = os.getenv(\"INIT_METHOD\", \"env://\")\n\nDEFAULT_TIMEOUT = 300\nCUSTOMIZED_TIMEOUT = {\"test_DistributedDataParallel\": 500}\n\n\ndef get_profiling_event(postfix, profiler):\n event_list = (\n profiler.events()\n if isinstance(profiler, torch.profiler.profile)\n else profiler.function_events\n )\n return [event for event in event_list if event.name.endswith(postfix)]\n\n\n# Base error message substring on unfinished reductions.\nddp_prev_reduction_unfinished_str = (\n \"Expected to have finished reduction in the prior iteration\"\n)\n# Error message substring when find_unused_parameters=True has not been passed\nddp_recommend_find_unused_params_str = (\n \"passing the keyword argument `find_unused_parameters=True`\"\n)\n# Error message substring when find_unused_parameters=True is enabled\nddp_find_unused_params_enabled_str = \"Since `find_unused_parameters=True` is enabled\"\n# Error message substring for possibility of not all model outputs being used\n# in loss computation\nddp_outputs_not_used_in_loss_str = (\n \"`forward` function outputs participate in calculating loss\"\n)\n# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG\nddp_suggest_debug_mode_str = (\n \"set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL\"\n)\n\n\nclass DDPUnevenTestInput(NamedTuple):\n name: str\n model: nn.Module\n inp: Union[torch.tensor, tuple]\n sync_interval: int\n throw_on_early_termination: bool = False\n hook: Callable = None\n state: Any = None\n\n\nclass _FC2(nn.Module):\n def __init__(self):\n super(_FC2, self).__init__()\n self.fc = nn.Linear(10, 50, bias=True)\n self.fc.bias.requires_grad = False\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = _FC2()\n self.fc3 = nn.Linear(50, 4, bias=False)\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(\n torch.tensor([2, 2]).long(), requires_grad=False\n )\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n return F.softmax(x, dim=1)\n\n\nclass LargeNet(nn.Module):\n def __init__(self):\n super(LargeNet, self).__init__()\n self.fc1 = nn.Linear(1000, 2000, bias=False)\n self.fc2 = nn.Linear(2000, 500, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\nclass Task(nn.Module):\n def __init__(self):\n super().__init__()\n self.p = nn.Parameter(torch.ones(2, 2))\n\n def forward(self, x):\n return self.p + x\n\n\nclass BatchNormNet(nn.Module):\n def __init__(self, affine=True):\n super(BatchNormNet, self).__init__()\n self.fc1 = nn.Linear(2, 40, bias=False)\n self.bn = nn.BatchNorm1d(4, affine=affine)\n self.fc2 = nn.Linear(40, 4, bias=False)\n\n def forward(self, x):\n x = torch.reshape(self.fc1(x), (-1, 4, 10))\n x = self.bn(x)\n x = torch.reshape(x, (-1, 40))\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n\n\nclass TwoLinLayerNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Linear(10, 10, bias=False)\n self.b = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n a = self.a(x)\n b = self.b(x)\n return (a, b)\n\n\nclass EmbeddingNet(nn.Module):\n def __init__(self, rank):\n super().__init__()\n embedding_dim = 500 if rank == 0 else 50\n self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim)\n self.lin = nn.Linear(embedding_dim, 1)\n\n def forward(self, x):\n x = self.embedding(x)\n return self.lin(x)\n\n\nclass ControlFlowToyModel(nn.Module):\n def __init__(self):\n super(ControlFlowToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n # Second layer is used dependent on input x.\n use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device))\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n\nDDP_NET = Net()\nBN_NET = BatchNormNet()\nBN_NET_NO_AFFINE = BatchNormNet(affine=False)\nONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)\n\n\ndef get_timeout(test_id):\n test_name = test_id.split(\".\")[-1]\n if test_name in CUSTOMIZED_TIMEOUT:\n return CUSTOMIZED_TIMEOUT[test_name]\n else:\n return DEFAULT_TIMEOUT\n\n\ndefault_pg_timeout = 60\n\nCUSTOM_PG_TIMEOUT = {\n # This test runs slowly and needs additional time to complete, otherwise can\n # be taken down by NCCL_ASYNC_ERROR_HANDLING\n \"test_ddp_uneven_inputs\": 300,\n # This test has a short timeout since it tests being taken down by\n # NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly.\n \"test_ddp_model_diff_across_ranks\": 5,\n}\n\n\ndef require_backend(backends):\n if BACKEND not in backends:\n return sandcastle_skip(\"Test requires backend to be one of %s\" % backends)\n return lambda func: func\n\n\ndef require_backends_available(backends):\n def check(backend):\n if backend == dist.Backend.GLOO:\n return dist.is_gloo_available()\n if backend == dist.Backend.NCCL:\n return dist.is_nccl_available()\n if backend == dist.Backend.MPI:\n return dist.is_mpi_available()\n return False\n\n if not all(check(dist.Backend(backend)) for backend in backends):\n return sandcastle_skip(\"Test requires backends to be available %s\" % backends)\n return lambda func: func\n\n\ndef require_world_size(world_size):\n if int(os.environ[\"WORLD_SIZE\"]) < world_size:\n return sandcastle_skip(\"Test requires world size of %d\" % world_size)\n return lambda func: func\n\n\ndef apply_hack_for_nccl():\n # This is a hack for a known NCCL issue using multiprocess\n # in conjunction with multiple threads to manage different GPUs which\n # may cause ncclCommInitRank to fail.\n # http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4\n # It slows down the performance of collective operations.\n # Without this setting NCCL might throw unhandled error.\n os.environ[\"NCCL_MAX_NRINGS\"] = \"1\"\n\n\n@contextmanager\ndef _lock():\n TEMP_DIR = os.environ[\"TEMP_DIR\"]\n lockfile = os.path.join(TEMP_DIR, \"lockfile\")\n with open(lockfile, \"w\") as lf:\n try:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1)\n yield\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_EX)\n yield\n finally:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1)\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_UN)\n lf.close()\n\n\ndef _build_tensor(size, value=None, dtype=torch.float, device_id=None):\n if value is None:\n value = size\n if device_id is None:\n return torch.empty(size, size, size, dtype=dtype).fill_(value)\n else:\n return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id)\n\n\ndef _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float):\n if value is None:\n value = size\n return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value)\n\n\ndef _create_autograd_profiler():\n return torch.autograd.profiler.profile(record_shapes=True)\n\n\ndef _create_torch_profiler():\n return torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n ],\n record_shapes=True,\n )\n\n\nclass Barrier(object):\n barrier_id = 0\n\n @classmethod\n def init(cls):\n cls.barrier_id = 0\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n for f_name in os.listdir(barrier_dir):\n os.unlink(os.path.join(barrier_dir, f_name))\n\n @classmethod\n def sync(cls, wait_for=None, timeout=10):\n if wait_for is None:\n wait_for = dist.get_world_size()\n cls.barrier_id += 1\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n pid = str(os.getpid())\n barrier_file = os.path.join(barrier_dir, pid)\n with _lock():\n with open(barrier_file, \"w\") as f:\n f.write(str(cls.barrier_id))\n\n start_time = time.time()\n while True:\n arrived = 0\n with _lock():\n for f_name in os.listdir(barrier_dir):\n with open(os.path.join(barrier_dir, f_name), \"r\") as f:\n data = f.read()\n if int(data) >= cls.barrier_id:\n arrived += 1\n if arrived == wait_for:\n break\n\n if time.time() - start_time > timeout:\n raise RuntimeError(\"barrier timeout\")\n time.sleep(0.1)\n\n\nclass TestDistBackend(MultiProcessTestCase):\n @classmethod\n def setUpClass(cls):\n os.environ[\"MASTER_ADDR\"] = str(MASTER_ADDR)\n os.environ[\"MASTER_PORT\"] = str(MASTER_PORT)\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # such as test_batch_isend_irecv_nccl will test NCCL_BLOCKING_WAIT as\n # expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n super().setUpClass()\n\n def setUp(self):\n super().setUp()\n # initialize temp directories\n initialize_temp_directories()\n # initialize Barrier\n Barrier.init()\n # Skip return code checking for following tests as they are expected to\n # crash a process due to NCCL_ASYNC_ERROR_HANDLING.\n self.skip_return_code_checks = []\n\n def tearDown(self):\n cleanup_temp_dir()\n super().tearDown()\n\n @property\n def init_method(self):\n return \"{}{file_name}\".format(FILE_SCHEMA, file_name=self.file_name)\n\n @classmethod\n def _run(cls, rank, test_name, file_name, pipe):\n if BACKEND == \"nccl\" and not torch.cuda.is_available():\n sys.exit(TEST_SKIPS[\"no_cuda\"].exit_code)\n self = cls(test_name)\n self.rank = rank\n self.file_name = file_name\n\n if torch.cuda.is_available() and torch.cuda.device_count() < int(\n self.world_size\n ):\n sys.exit(TEST_SKIPS[f\"multi-gpu-{self.world_size}\"].exit_code)\n try:\n pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)\n timeout = timedelta(seconds=pg_timeout_seconds)\n dist.init_process_group(\n init_method=self.init_method,\n backend=BACKEND,\n world_size=int(self.world_size),\n rank=self.rank,\n timeout=timeout,\n )\n except RuntimeError as e:\n if \"recompile\" in e.args[0]:\n sys.exit(TEST_SKIPS[\"backend_unavailable\"].exit_code)\n\n raise\n\n # Execute barrier prior to running test to ensure that every process\n # has finished initialization and that the following test\n # immediately exiting due to a skip doesn't cause flakiness.\n self._barrier()\n\n self.run_test(test_name, pipe)\n self._barrier()\n dist.destroy_process_group()\n sys.exit(0)\n\n # Needed since MultiProcessTestCase assumes a world_size of 4, but we\n # run these tests under other various world_sizes.\n @property\n def world_size(self):\n return os.environ[\"WORLD_SIZE\"]\n\n\nclass DistributedTest:\n class _DistTestBase:\n def _barrier(self, *args, **kwargs):\n Barrier.sync(*args, **kwargs)\n\n def _init_group_test(self, **kwargs):\n group = [1, 2]\n group_id = dist.new_group(group, **kwargs)\n rank = dist.get_rank()\n if rank not in group:\n return ([], None, rank)\n\n return (group, group_id, rank)\n\n def _init_full_group_test(self, **kwargs):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.new_group(**kwargs)\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n def _init_global_test(self):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.group.WORLD\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n # HELPER FOR MULTIGPU TESTS\n def _init_multigpu_helper(self):\n \"\"\"Multigpu tests are designed to simulate the multi nodes with multi\n GPUs on each node. Nccl backend requires equal #GPUs in each process.\n On a single node, all visible GPUs are evenly\n divided to subsets, each process only uses a subset.\n \"\"\"\n nGPUs = torch.cuda.device_count()\n world_size = dist.get_world_size()\n visible_devices = range(nGPUs)\n\n if BACKEND == \"nccl\":\n apply_hack_for_nccl()\n\n # If rank is lesser than or equal to number of available GPU's\n # then each rank can be mapped to corresponding GPU.\n nGPUs_per_process = 1\n if world_size > nGPUs:\n nGPUs_per_process = nGPUs // world_size\n rank_to_GPU = {\n i: list(\n visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]\n )\n for i in range(world_size)\n }\n return rank_to_GPU\n\n def test_dump_DDP_relevant_env_vars(self):\n with captured_output() as (out, _):\n _dump_DDP_relevant_env_vars()\n lines = out.getvalue().splitlines()\n\n def format_line(var):\n return \"env:%s=%s\" % (\n var,\n os.environ[var] if var in os.environ else \"N/A\",\n )\n\n # Check relevant env vars\n vars = [\n \"MASTER_ADDR\",\n \"MASTER_PORT\",\n \"WORLD_SIZE\",\n \"NCCL_TOPO_DUMP_FILE\", # N/A\n \"NCCL_ASYNC_ERROR_HANDLING\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertIn(line, lines)\n # Check irrelevant env vars\n vars = [\n \"xxx\",\n \"yyy\",\n \"zzz\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertNotIn(line, lines)\n\n # GET RANK\n def test_get_rank(self):\n test_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"test_dir\")\n pid = str(os.getpid())\n num_processes = dist.get_world_size()\n with open(os.path.join(test_dir, pid), \"w\") as f:\n f.write(str(dist.get_rank()))\n\n self._barrier()\n\n all_ranks = set()\n for f_name in os.listdir(test_dir):\n with open(os.path.join(test_dir, f_name), \"r\") as f:\n all_ranks.add(int(f.read()))\n self.assertEqual(len(all_ranks), num_processes)\n\n self._barrier()\n\n if dist.get_rank() == 0:\n for f_name in os.listdir(test_dir):\n os.unlink(os.path.join(test_dir, f_name))\n\n self._barrier()\n\n def test_get_backend(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n backend_str = BACKEND.lower()\n self.assertEqual(dist.get_backend(), backend_str)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_backend(group_id), backend_str)\n else:\n with self.assertRaisesRegex(\n RuntimeError, \"Invalid process group specified\"\n ):\n dist.get_backend(group_id)\n\n def test_Backend_enum_class(self):\n # test parsing\n backend = BACKEND.lower()\n self.assertEqual(dist.Backend(BACKEND.upper()), backend)\n self.assertEqual(dist.Backend(BACKEND), backend)\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'undefined'\"):\n dist.Backend(\"undefined\")\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'xYz'\"):\n dist.Backend(\"xYz\")\n with self.assertRaises(ValueError):\n dist.Backend(None)\n with self.assertRaises(ValueError):\n dist.Backend(3)\n with self.assertRaises(ValueError):\n dist.Backend([\"gloo\"])\n\n # Test destroy\n def test_destroy_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of group\n def test_get_rank_size_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_world_size(group_id), 2)\n self.assertTrue(dist.get_rank(group_id) in list(range(2)))\n else:\n self.assertEqual(dist.get_world_size(group_id), -1)\n self.assertEqual(dist.get_rank(group_id), -1)\n\n # Test destroy full groups\n def test_destroy_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of full group\n def test_get_rank_size_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())\n self.assertEqual(dist.get_rank(group_id), dist.get_rank())\n\n def _test_barrier_timeout(self, group_id, timeout):\n local_rank = dist.get_rank(group_id)\n\n # Only execute barrier on rank == 0, causing it to timeout\n if local_rank == 0:\n expected_time = time.time() + timeout.total_seconds()\n # In debug mode, we execute a monitored_barrier before the\n # collective, so assert on that.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n exception_ctx = self.assertRaisesRegex(\n Exception, \"failed to pass monitoredBarrier\"\n )\n else:\n exception_ctx = self.assertRaisesRegex(\n Exception, \" (Timed out|closed|timeout) \"\n )\n with exception_ctx:\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1)\n else:\n pass\n\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n @sandcastle_skip_if(\n not INIT_METHOD.startswith(\"file://\"),\n \"Requires file:// initialization method. \"\n + \"Both tcp:// and env:// rely on the TCP store for which \"\n \"reinitialization has proven racy.\",\n )\n def test_barrier_timeout_global(self):\n dist.destroy_process_group()\n\n # Explicitly pass world size to the barrier because we've\n # just destroyed any state in torch.distributed.\n self._barrier(wait_for=int(os.environ[\"WORLD_SIZE\"]))\n\n # Reinitialize global process group\n timeout = timedelta(seconds=1)\n dist.init_process_group(\n init_method=INIT_METHOD,\n backend=BACKEND,\n world_size=int(os.environ[\"WORLD_SIZE\"]),\n rank=self.rank,\n timeout=timeout,\n )\n self._test_barrier_timeout(dist.group.WORLD, timeout)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_group(self):\n timeout = timedelta(seconds=5)\n _, group_id, _ = self._init_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n @sandcastle_skip_if(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_full_group(self):\n timeout = timedelta(seconds=1)\n _, group_id, _ = self._init_full_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n # This test helper can only be used when using the Gloo or NCCL backend\n # **and** both the Gloo and NCCL backends are available.\n # See the @skip annotations below.\n def _test_group_override_backend(self, initializer):\n if BACKEND == \"gloo\":\n new_backend = \"nccl\"\n if BACKEND == \"nccl\":\n new_backend = \"gloo\"\n\n group, group_id, rank = initializer(backend=new_backend)\n if group_id is None:\n return\n\n if new_backend == \"gloo\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))\n if new_backend == \"nccl\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))\n\n self.assertEqual(rank, group[dist.get_rank(group_id)])\n self.assertEqual(len(group), dist.get_world_size(group_id))\n\n # Pin device (so we avoid NCCL race conditions/deadlocks).\n group_rank = dist.get_rank(group_id)\n torch.cuda.set_device(group_rank)\n\n # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).\n tensor = _build_tensor(2, value=group_rank).cuda()\n dist.broadcast(tensor, src=group[0], group=group_id)\n self.assertEqual(_build_tensor(2, value=0), tensor.to(\"cpu\"))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @require_world_size(3)\n @skip_if_lt_x_gpu(2)\n def test_backend_group(self):\n self._test_group_override_backend(self._init_group_test)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(3)\n def test_backend_full_group(self):\n self._test_group_override_backend(self._init_full_group_test)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(2)\n def test_new_subgroups(self):\n subgroup_size = 2\n cur_subgroup, subgroups = dist.new_subgroups(subgroup_size)\n\n world_size = dist.get_world_size()\n self.assertEqual(cur_subgroup.size(), subgroup_size)\n self.assertEqual(len(subgroups), world_size / subgroup_size)\n self.assertFalse(dist._rank_not_in_group(cur_subgroup))\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_group_size_exceeds_world_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The arg 'group_size' must not exceed the world size\"\n ):\n dist.new_subgroups(100)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_world_size_not_divisible_by_group_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The world size must be divisible by 'group_size'\"\n ):\n dist.new_subgroups(3)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 2], [1, 3]]\n )\n if device_id >= 4:\n self.assertIsNone(cur_subgroup)\n else:\n self.assertEqual(cur_subgroup.size(), 2)\n self.assertEqual(len(subgroups), 2)\n if device_id == 0 or device_id == 2:\n self.assertEqual(cur_subgroup, subgroups[0])\n else:\n self.assertEqual(cur_subgroup, subgroups[1])\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = get_world_size(group_id)\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 1], [world_size, 2]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_by_enumeration_negative_input_rank(self):\n group, group_id, rank = self._init_global_test()\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[-1, -2], [-3, -4]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_overlap_not_allowed(self):\n with self.assertRaisesRegex(\n ValueError, \"Rank 1 has appeared in both subgroup\"\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0], [1, 2], [1, 3]]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_average_parameters(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n\n model = nn.Sequential(\n nn.Conv2d(3, 3, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Linear(1, 5, bias=False),\n ).cuda(device_id)\n # Test global model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data)\n model_averaging_utils.average_parameters(\n params=model.parameters(), process_group=None\n )\n # Every element will be the same as the input.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data))\n\n # Test partial model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data) * rank\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n model_averaging_utils.average_parameters(\n params=model.parameters(), process_group=group_nccl\n )\n if not dist._rank_not_in_group(group_nccl):\n # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * 0.5)\n else:\n # Every element on device not in the subgroup should remain the same.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * rank)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_periodic_model_averager(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = dist.get_world_size()\n\n model = nn.Linear(1, 5, bias=False).cuda(device_id)\n param = next(model.parameters())\n tensor = torch.ones_like(param.data) * rank\n expected_avg_tensor = (\n torch.ones_like(param.data) * sum(range(world_size)) / world_size\n )\n period = 4\n for warmup_steps in [12, 13, 14, 15]:\n averager = averagers.PeriodicModelAverager(period=period, warmup_steps=warmup_steps)\n for step in range(0, 20):\n # Reset the parameters at every step.\n param.data = copy.deepcopy(tensor)\n averager.average_parameters(model.parameters())\n if step >= warmup_steps and (step - warmup_steps) % period == 0:\n self.assertEqual(param.data, expected_avg_tensor)\n else:\n # No model averaging, so the parameters are not updated.\n self.assertEqual(param.data, tensor)\n\n # NCCL Batch SEND RECV\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n for val in [\"1\", \"0\"]:\n os.environ[\"NCCL_BLOCKING_WAIT\"] = val\n for src in range(0, dist.get_world_size()):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(src + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_self_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n p2p_op_list = []\n\n if rank == 0:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, 0)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_no_rank_zero_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n if rank == 1:\n peer = 2\n elif rank == 2:\n peer = 1\n\n if rank in [1, 2]:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, peer)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU\n @sandcastle_skip_if(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU with provided tags\n @sandcastle_skip_if(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo_tags(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # NCCL Batch SEND RECV Tensor Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_tensor_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(\n RuntimeError, \"Tensors must be CUDA and dense\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op = dist.P2POp(dist.isend, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV Op Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``op``\"):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n send_op = dist.P2POp(dist.broadcast, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV p2p_op_list Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_list_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``p2p_op_list``\"):\n send_tensor = _build_tensor(rank + 1)\n req = dist.batch_isend_irecv([1, 2])\n req.wait()\n\n # NCCL Batch SEND RECV Mixed Backend Error\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_mixed_backend_err(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n group_gloo = dist.new_group(ranks=[0, 1], backend=\"gloo\")\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n if rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, \"All groups need to use the same backend\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)\n send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)\n req = dist.batch_isend_irecv([send_op_gloo, send_op_nccl])\n req.wait()\n\n # NCCL SEND RECV\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def _test_send_recv_nccl(self, profiler_ctx=None):\n # TODO: now that nccl send/recv is supported, there does not seem to\n # be a need to have nccl send/recv be tested separately.\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n tensor = _build_tensor(rank + 1, device_id=device_id)\n profiler_cls = profiler_ctx if profiler_ctx is not None else suppress()\n with profiler_cls as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n expected_tensor = _build_tensor(src + 1)\n output_tensor = _build_tensor(\n src + 1, value=-1, device_id=device_id\n )\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n self.assertTrue(events)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl(self):\n self._test_send_recv_nccl()\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl_autograd_profiler(self):\n profiler_ctx = torch.autograd.profiler.profile(record_shapes=True)\n self._test_send_recv_nccl(profiler_ctx)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_nccl_torch_profiler(self):\n profiler_ctx = torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ],\n record_shapes=True,\n )\n self._test_send_recv_nccl(profiler_ctx)\n\n # SEND RECV\n def _test_send_recv(self, profiler_ctx):\n rank = dist.get_rank()\n send_size = rank + 1\n tensor = _build_tensor(send_size)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n recv_size = src + 1\n expected_tensor = _build_tensor(recv_size)\n output_tensor = _build_tensor(recv_size, value=-1)\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks.\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.is_async)\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv(self):\n self._test_send_recv(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV ANY SOURCE\n def _test_send_recv_any_source(self, profiler_ctx):\n rank = dist.get_rank()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n recv_ranks = list()\n irecv_ranks = list()\n\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n # Recv mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n\n for recv in [\"recv\", \"irecv\"]:\n output_tensor = _build_tensor(send_recv_size, value=-1)\n\n if recv == \"recv\":\n sender = dist.recv(output_tensor)\n recv_ranks.append(sender)\n elif recv == \"irecv\":\n work = dist.irecv(output_tensor)\n work.wait()\n sender = work._source_rank()\n irecv_ranks.append(sender)\n\n # Assert the scalar value \"sender\" that should be\n # equal to the rank of the sender is equal to all\n # values in the received tensor.\n self.assertTrue(output_tensor.eq(sender).all())\n else:\n # Send mode\n dist.send(tensor, dst) # recv\n dist.send(tensor, dst) # irecv\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recvAnySource\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from other rank twice.\n self.assertEqual(\n sum(event.count for event in events),\n 2 * (dist.get_world_size() - 1),\n )\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n # Each rank would have 2 * (world_size - 1) sends, verify that\n # globally we receive the same amount on the other end.\n recv_ranks_tensor = torch.cat(\n (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0\n )\n global_recv_ranks = [\n torch.empty_like(recv_ranks_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(global_recv_ranks, recv_ranks_tensor)\n global_recv_ranks_list = []\n for tensor in global_recv_ranks:\n global_recv_ranks_list += tensor.tolist()\n\n from itertools import groupby\n\n global_recv_ranks_list.sort()\n frequency = [\n len(list(group)) for key, group in groupby(global_recv_ranks_list)\n ]\n self.assertEqual(dist.get_world_size(), len(frequency))\n self.assertEqual(\n [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source(self):\n self._test_send_recv_any_source(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_any_source_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV WITH TAG\n def _test_send_recv_with_tag(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, world_size):\n if dst == rank:\n # Recv mode\n for src in range(0, world_size):\n if src == rank:\n continue\n output_tensor = _build_tensor(send_recv_size, value=-1)\n dist.recv(output_tensor, src, tag=src)\n self.assertTrue(output_tensor.eq(src).all())\n else:\n # Send mode\n dist.send(tensor, dst, tag=rank)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, event_name)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag(self):\n self._test_send_recv_with_tag(profiler_ctx=None)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_with_tag_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx)\n\n # ISEND\n def _test_isend(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n if rank == 0:\n requests = [\n dist.isend(_build_tensor(dest, 10), dest)\n for dest in range(1, world_size)\n ]\n for request in requests:\n request.wait()\n self.assertTrue(request.is_completed())\n else:\n tensor = _build_tensor(rank, -1)\n dist.recv(tensor, 0)\n self.assertEqual(tensor, _build_tensor(rank, 10))\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n expected_event_name = (\n f\"{backend}:send\" if rank == 0 else f\"{backend}:recv\"\n )\n events = get_profiling_event(expected_event_name, prof)\n event_count = sum(e.count for e in events)\n expected_count = dist.get_world_size() - 1 if rank == 0 else 1\n self.assertEqual(expected_count, event_count)\n # Event ordering is not guaranteed, so simply ensure the shapes are\n # found in the following map.\n expected_shapes = {\n r: [[r] * 3] for r in range(1, dist.get_world_size())\n }\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, expected_event_name)\n if rank == 0:\n self.assertTrue(\n event.input_shapes in expected_shapes.values()\n )\n else:\n self.assertEqual(event.input_shapes, expected_shapes[rank])\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend(self):\n self._test_isend(profiler_ctx=None)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_isend(profiler_ctx=autograd_profiler_ctx)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support isend\")\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_isend_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n self._test_isend(profiler_ctx=torch_profiler_ctx)\n\n # IRECV\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support irecv\")\n def test_irecv(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n if rank == 0:\n expected_tensors = [\n _build_tensor(src, -1) for src in range(1, world_size)\n ]\n requests = [\n dist.irecv(expected_tensors[src - 1], src)\n for src in range(1, world_size)\n ]\n\n for src in range(1, world_size):\n requests[src - 1].wait()\n self.assertTrue(requests[src - 1].is_completed())\n self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))\n else:\n tensor = _build_tensor(rank, 10)\n dist.send(tensor, 0)\n\n self._barrier()\n\n # BROADCAST\n def _test_broadcast_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n with_options=False,\n ):\n for dtype, value, requires_cuda in [\n (torch.float, -1e-10, False),\n (torch.double, -1e-100, False),\n (torch.half, -0.1, True),\n (torch.int8, -2, False),\n (torch.uint8, 129, False),\n (torch.int, -1e5, False),\n (torch.long, -1e15, False),\n ]:\n if requires_cuda and not cuda:\n continue\n for src in group:\n expected_tensor = _build_tensor(src + 1, value, dtype)\n if cuda:\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n if rank == src:\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\",\n True,\n group_id.broadcast,\n [expected_tensor],\n opts,\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n expected_tensor,\n src,\n group_id,\n )\n else:\n tensor = _build_tensor(src + 1, -1, dtype)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\", True, group_id.broadcast, [tensor], opts\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n tensor,\n src,\n group_id,\n )\n self.assertEqual(tensor.size(), expected_tensor.size())\n self.assertEqual(\n tensor.ne(expected_tensor).max(), torch.tensor(False)\n )\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast(self):\n group, group_id, rank = self._init_global_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and Nccl backend supports CUDA allReduce\",\n )\n @skip_if_no_gpu\n def test_broadcast_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\",\n \"Only NCCL backend supports high priority stream\",\n )\n @skip_if_no_gpu\n def test_nccl_high_priority_stream(self):\n group, _, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n new_port = str(MASTER_PORT + 1)\n os.environ[\"MASTER_PORT\"] = new_port\n gen_iterator = dist.rendezvous(\"env://\", rank, dist.get_world_size())\n store, rank, size = next(gen_iterator)\n store = dist.PrefixStore(new_port, store)\n\n opts = dist.ProcessGroupNCCL.Options()\n opts.is_high_priority_stream = False\n group_id = dist.ProcessGroupNCCL(store, rank, size, opts)\n\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)\n\n # REDUCE\n def _test_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensor = _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensor,\n src,\n op,\n group_id,\n tensor_shapes=[tensor.shape],\n )\n if rank == src:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # REDUCE TWICE\n def _test_reduce_twice_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensors = [\n _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n for i in range(2)\n ]\n if cuda:\n for i in range(2):\n tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensors[0],\n src,\n op,\n group_id,\n secondary_op_call=lambda: dist.reduce(\n tensors[1], src, op, group_id\n ),\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n for tensor in tensors:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum_twice(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda_twice(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @skip_if_no_gpu\n @require_backend({\"gloo\", \"nccl\"})\n def test_all_reduce_result_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n for src in group:\n if rank == src:\n tensor = _build_tensor(src + 1, 2)\n else:\n tensor = _build_tensor(src + 1, 10)\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n\n opts = AllreduceOptions()\n opts.reduceOp = dist.ReduceOp.SUM\n\n if group_id == GroupMember.WORLD:\n work = _get_default_group().allreduce([tensor], opts)\n else:\n work = group_id.allreduce([tensor], opts)\n\n if BACKEND == \"gloo\":\n # Calling result right the work is finished should throw exception.\n # Here we have a race condition, we may not assume the work is not\n # finished by the time we run next lines.\n try:\n with self.assertRaisesRegex(\n RuntimeError,\n \"Work needs to be completed before calling result\",\n ):\n work.result()\n except AssertionError:\n # Exception was not raised, ensure is_completed()\n self.assertTrue(work.is_completed())\n\n work.wait()\n result = work.result()\n else:\n # In case of NCCL we should be able to retrieve pointer to the result\n # even before work is finished.\n result = work.result()\n work.wait()\n\n expected_value = 2 + (10 * (len(group) - 1))\n self.assertEqual(result, [_build_tensor(src + 1, expected_value)])\n self._barrier()\n\n def call_dist_op(\n self,\n profiling_title_postfix,\n is_async,\n op,\n *args,\n expect_event=True,\n secondary_op_call=None,\n profile_cuda=False,\n tensor_shapes=None,\n **kwargs,\n ):\n op_calls = [lambda: op(*args, **kwargs)]\n if secondary_op_call is not None:\n op_calls.append(secondary_op_call)\n\n autograd_profiler_ctx = torch.autograd.profiler.profile(\n use_cuda=profile_cuda, record_shapes=True\n )\n\n # TODO: move this test to use torch.profiler once kineto issues are\n # fixed internally.\n with autograd_profiler_ctx as prof:\n works = [op_call() for op_call in op_calls]\n if is_async:\n for work in works:\n work.wait()\n\n if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS:\n events = get_profiling_event(\n profiling_title_postfix, autograd_profiler_ctx\n )\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL:\n self.assertEqual(len(events), len(op_calls))\n for e in events:\n self.assertTrue(e.is_async)\n self.assertEqual(e.count, 1)\n self.assertGreaterEqual(e.cpu_time, 0)\n # Verify tensor shapes if given\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if (\n tensor_shapes is not None\n and dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL\n ):\n self.assertEqual(\n e.input_shapes,\n tensor_shapes,\n f\"event shape: {e.input_shapes} vs tensor {tensor_shapes}\",\n )\n\n # ALL REDUCE\n def _test_all_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n async_op=False,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n\n tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if tensor.dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensor).shape]\n else:\n tensor_shapes = [tensor.shape]\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n tensor_shapes=tensor_shapes,\n )\n # Currently, only Gloo backend has profiling tested with CUDA enabled.\n # Only run cuda profiling test for one rank to speed up since\n # running with different src_rank does not affect the correctness.\n if (\n src == 0\n and cuda\n and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS\n ):\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n profile_cuda=True,\n tensor_shapes=tensor_shapes,\n )\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_async(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n async_op=True,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_async(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n async_op=True,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_complex_unsupported_ops(self):\n unsupported_ops = [\n dist.ReduceOp.MAX,\n dist.ReduceOp.MIN,\n dist.ReduceOp.PRODUCT,\n dist.ReduceOp.BAND,\n dist.ReduceOp.BOR,\n dist.ReduceOp.BXOR,\n ]\n group, group_id, rank = self._init_global_test()\n for unsupported_op in unsupported_ops:\n with self.assertRaisesRegex(\n RuntimeError, \"all_reduce does not support\"\n ):\n dist.all_reduce(\n _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id\n )\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_complex(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # SPARSE ALL REDUCE\n def _test_sparse_all_reduce_sum(self, fn):\n group, group_id, rank = self._init_global_test()\n\n tests = simple_sparse_reduce_tests(\n rank, dist.get_world_size(), num_inputs=1\n )\n for (inputs, outputs) in tests:\n tensors = [fn(input) for input in inputs]\n dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)\n self.assertEqual(tensors[0], outputs[0])\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n def test_sparse_all_reduce_sum(self):\n self._test_sparse_all_reduce_sum(lambda t: t)\n\n @sandcastle_skip_if(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n @skip_if_no_gpu\n def test_sparse_all_reduce_sum_cuda(self):\n self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())\n\n # ALL REDUCE - COALESCED\n @staticmethod\n def _all_reduce_coalesced_sum_test_cases(group_size):\n return (\n [2, 3, complex(2, 3)],\n [10, 11, complex(10, 11)],\n [\n 2 + 10 * (group_size - 1),\n 3 + 11 * (group_size - 1),\n complex(2, 3) + complex(10, 11) * (group_size - 1),\n ],\n [torch.float, torch.float, torch.cfloat],\n )\n\n @staticmethod\n def _all_reduce_coalesced_product_test_cases(group_size):\n return (\n [1, 2],\n [3, 4],\n [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_min_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [1, 3],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_max_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [2, 4],\n [torch.float, torch.float],\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_coalesced_max_complex_unsupported(self):\n group, group_id, rank = self._init_global_test()\n with self.assertRaisesRegex(RuntimeError, \"all_reduce does not support\"):\n dist.all_reduce_coalesced(\n [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id\n )\n\n def _test_all_reduce_coalesced_helper(\n self,\n group,\n group_id,\n rank,\n op,\n cuda=False,\n rank_to_GPU=None,\n ):\n test_case_func = {\n dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,\n dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,\n dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,\n dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases,\n }[op]\n\n master_values, worker_values, expected_values, dtypes = test_case_func(\n len(group)\n )\n\n for src in group:\n curr_values = master_values if rank == src else worker_values\n tensors = [\n _build_tensor(src + 1, val, dtype=dtype)\n for dtype, val in zip(dtypes, curr_values)\n ]\n if cuda:\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n tensor_shapes = []\n for tensor in tensors:\n if tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(tensor).shape)\n else:\n tensor_shapes.append(tensor.shape)\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_coalesced,\n tensors,\n op,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n expected_tensors = [\n _build_tensor(src + 1, expected_value, dtype=dtype)\n for dtype, expected_value in zip(dtypes, expected_values)\n ]\n self.assertEqual(tensors, expected_tensors)\n\n self._barrier()\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n # SCATTER\n def _test_scatter_helper(self, group, group_id, rank, dtype=torch.float):\n for dest in group:\n tensor = _build_tensor(dest + 1, -1, dtype=dtype)\n expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = (\n [_build_tensor(dest + 1, i, dtype=dtype) for i in group]\n if rank == dest\n else []\n )\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(t).shape for t in tensors]\n else:\n tensor_shapes = [t.shape for t in tensors]\n self.call_dist_op(\n \":scatter\",\n False,\n dist.scatter,\n tensor,\n src=dest,\n scatter_list=tensors,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_scatter_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify scatter_list argument only on source rank.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, src=0, scatter_list=scatter_list)\n else:\n dist.scatter(output, src=0)\n self.assertEqual(output, one * rank)\n\n # Don't specify src argument.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, scatter_list=scatter_list)\n else:\n dist.scatter(output)\n self.assertEqual(output, one * rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n @skip_if_small_worldsize\n def test_scatter_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n # GATHER\n def _test_gather_helper(self, group, group_id, rank):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank)\n tensors = (\n [_build_tensor(dest + 1, -1) for i in group] if rank == dest else []\n )\n self.call_dist_op(\n \":gather\",\n False,\n dist.gather,\n tensor,\n dst=dest,\n gather_list=tensors,\n group=group_id,\n tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None,\n )\n if rank == dest:\n expected_tensors = [_build_tensor(dest + 1, i) for i in group]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify gather_list argument only on destination rank.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, dst=0, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank, dst=0)\n\n # Don't specify dst argument.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n # ALL GATHER\n def _test_all_gather_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n if tensors[0].dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensors[0]).shape]\n else:\n tensor_shapes = [tensors[0].shape]\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather,\n tensors,\n tensor,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n expected_tensors = [\n _build_tensor(dest + 1, i, dtype=dtype) for i in group\n ]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n def _run_all_gather_coalesced_and_verify(\n self, output_tensor_lists, input_tensors, expected_tensors, group_id\n ):\n \"\"\"\n Helper that runs all_gather_coalesced and returns true if output\n matches expectations.\n \"\"\"\n tensor_shapes = []\n for input_tensor in input_tensors:\n if input_tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(input_tensor).shape)\n else:\n tensor_shapes.append(input_tensor.shape)\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather_coalesced,\n output_tensor_lists,\n input_tensors,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n for l1, l2 in zip(output_tensor_lists, expected_tensors):\n for t1, t2 in zip(l1, l2):\n if not torch.equal(t1, t2):\n return False\n return True\n\n def _test_all_gather_coalesced_helper(\n self, group, group_id, rank, dtype=torch.float\n ):\n # TODO: Instead we should probably go through _rank_not_in_group\n # mechanism to disable sending tensors\n if group_id is not None:\n for test_case_id in range(2, 5):\n # Make sure we create tensors of incompatible sizes, e.g.\n # [1], [2x2], [3x3x3] ... to be sent in one batch\n input_tensors = [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n output_tensor_lists = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, -1, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for rank_iter in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensor_lists, input_tensors, expected_tensors, group_id\n ), \"output tensors do not match expected ouputs\"\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_simple(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @sandcastle_skip_if(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @sandcastle_skip_if(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_with_empty(self):\n group, group_id, rank = self._init_global_test()\n input_tensors = [\n rank * torch.ones([2, 2]),\n torch.ones([0]),\n (rank + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n output_tensors_lists = [\n [\n -1 * torch.ones([2, 2]),\n -1 * torch.ones([0]),\n -1 * torch.ones([3, 3]),\n -1 * torch.ones([0]),\n -1 * torch.ones([0]),\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n r * torch.ones([2, 2]),\n torch.ones([0]),\n (r + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n for r in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensors_lists, input_tensors, expected_tensors, group_id\n )\n self._barrier()\n\n # AllToAll\n def _test_all_to_all_single_equal_split_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_tensor = torch.ones([size, size], dtype=dtype) * rank\n expected_tensor = torch.cat(\n [torch.ones([1, size], dtype=dtype) * i for i in group]\n )\n out_tensor = torch.ones([size, size], dtype=dtype) * -1\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(in_tensor).shape]\n else:\n tensor_shapes = [in_tensor.shape]\n self.call_dist_op(\n \":all_to_all\",\n False,\n dist.all_to_all_single,\n out_tensor,\n in_tensor,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_single_unequal_split_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n out_splits = [rank + 1 for _ in group]\n in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank\n out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)\n expected_tensor = torch.cat(\n [torch.ones([rank + 1, size], dtype=dtype) * i for i in group]\n )\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n dist.all_to_all_single(\n out_tensor, in_tensor, out_splits, in_splits, group=group_id\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n in_tensors = [\n torch.ones([in_splits[i], size], dtype=dtype) * rank\n for i, _ in enumerate(group)\n ]\n out_tensors = [\n torch.ones([(rank + 1), size], dtype=dtype) for _ in group\n ]\n expected_tensors = [\n torch.ones([rank + 1, size], dtype=dtype) * i for i in group\n ]\n if cuda:\n in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]\n expected_tensors = [\n t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors\n ]\n out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]\n dist.all_to_all(out_tensors, in_tensors, group=group_id)\n for t1, t2 in zip(out_tensors, expected_tensors):\n self.assertEqual(t1, t2)\n self._barrier()\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n @skip_if_small_worldsize\n def test_all_to_all_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_small_worldsize\n @skip_if_rocm\n def test_all_to_all_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @sandcastle_skip_if(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n # BARRIER\n def _test_barrier_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None\n ):\n WAIT_TIME = 0.3 # seconds\n\n for dest in group:\n expected_time = torch.DoubleTensor(1).fill_(0.0)\n if cuda:\n expected_time = expected_time.cuda(rank_to_GPU[rank][0])\n if dest == rank:\n expected_time.fill_(time.time() + WAIT_TIME)\n dist.broadcast(expected_time, dest, group_id)\n time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer\n dist.barrier(group_id)\n else:\n dist.broadcast(expected_time, dest, group_id)\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(\n float(time.time()),\n float(expected_time[0]),\n \"destination rank: %d, my rank: %d\" % (dest, rank)\n + \" (if you see this failure, please report in #14554)\",\n )\n\n # Use higher timeout for the instance where the test runs\n # against a subgroup and uses a CUDA tensor for expected time.\n # The CUDA initialization for the participating processes can\n # take long enough for the barrier timeout to trigger on the\n # process that doesn't participate in the group.\n self._barrier(timeout=20)\n\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier(self):\n group, group_id, rank = self._init_global_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @skip_if_small_worldsize\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):\n for src in group:\n expected_tensor = _build_tensor(src + 1)\n tensors = [\n _build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]\n ]\n if rank == src:\n tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])\n\n dist.broadcast_multigpu(tensors, src, group_id)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL broadcast multigpu skipped\")\n @skip_if_no_gpu\n def test_broadcast_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n def _test_all_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n dtype=torch.float,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, curr_value, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_multigpu,\n tensors,\n op,\n group_id,\n )\n expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n @sandcastle_skip_if(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @sandcastle_skip_if(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n (complex(2, 3) + complex(10, 11) * (len(group) - 1))\n * len(rank_to_GPU[0]),\n dtype=torch.cfloat,\n )\n\n def _test_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n ):\n for src in group:\n tensor_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, tensor_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \"reduce\",\n False,\n dist.reduce_multigpu,\n tensors,\n src,\n op,\n group_id,\n expect_event=len(tensors) == 1,\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n expected_tensor = _build_tensor(src + 1, expected_value)\n self.assertEqual(tensors[0], expected_tensor)\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports reduce multigpu\"\n )\n @skip_if_no_gpu\n def test_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n def _test_all_gather_multigpu_helper(\n self, group, group_id, rank, rank_to_GPU, dtype=torch.float\n ):\n for dest in group:\n tensors = [\n _build_tensor(dest + 1, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n\n # construct expected output along with\n # a place holder to receive all gather results\n output_tensors = []\n expected_output = []\n output_per_gpu = (\n [_build_tensor(dest + 1, -1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n expected_per_gpu = (\n [_build_tensor(dest + 1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n for gpu in rank_to_GPU[rank]:\n output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])\n expected_output.append(\n [t.cuda(device=gpu) for t in expected_per_gpu]\n )\n self.call_dist_op(\n \"all_gather\",\n False,\n dist.all_gather_multigpu,\n output_tensors,\n tensors,\n group_id,\n expect_event=len(expected_output) == 1,\n )\n self.assertEqual(output_tensors, expected_output)\n\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(\n group, group_id, rank, rank_to_GPU, dtype=torch.cfloat\n )\n\n def _model_step(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad = None\n\n def _model_step_with_zero_grad(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad.requires_grad_(False)\n param.grad.zero_()\n\n def _prepare_dummy_data(self, local_bs):\n # global_bs for DDP should be divisible by WORLD_SIZE\n world_size = int(os.environ[\"WORLD_SIZE\"])\n global_bs = world_size * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n return global_bs, input_cpu, target, loss\n\n # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL\n def _test_DDP_helper(\n self, model, input_var, target, loss, scale_factor=1.0, memory_format=None\n ):\n model.train()\n output = model(input_var)\n l = loss(output, target) * scale_factor\n l.backward()\n if memory_format is not None:\n self.assertTrue(output.is_contiguous(memory_format=memory_format))\n\n def _assert_equal_param(self, param_gpu, param_DDP):\n self.assertEqual(len(param_gpu), len(param_DDP))\n for p_gpu, p_DDP in zip(param_gpu, param_DDP):\n self.assertEqual(p_gpu, p_DDP)\n\n def _test_DDP_niter(\n self,\n model_base,\n model_DDP,\n input,\n target,\n loss,\n local_bs,\n rank,\n batch_size,\n test_save,\n offset=None,\n world_size=0,\n zero_grad=False,\n memory_format=None,\n n_iter=5,\n ):\n for idx in range(n_iter):\n # single cpu/gpu training\n self._test_DDP_helper(\n model_base, input, target, loss, memory_format=memory_format\n )\n\n if offset is None:\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n world_size * local_bs / batch_size if world_size != 0 else 1,\n memory_format=memory_format,\n )\n\n # Update weights and run a second iteration to shake out errors\n if zero_grad:\n self._model_step_with_zero_grad(model_base)\n self._model_step_with_zero_grad(model_DDP)\n else:\n self._model_step(model_base)\n self._model_step(model_DDP)\n self._assert_equal_param(\n list(model_base.parameters()), list(model_DDP.module.parameters())\n )\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n # save the model in the middle and reload\n if test_save and idx == 2 and INIT_METHOD.startswith(\"file://\"):\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n with tempfile.TemporaryFile() as tmp_file:\n torch.save(model_DDP, tmp_file)\n tmp_file.seek(0)\n saved_model = torch.load(tmp_file)\n for k in model_DDP.state_dict():\n self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])\n\n def _test_DistributedDataParallel(\n self,\n gpu_subset,\n rank,\n output_device=None,\n gradient_as_bucket_view=False,\n static_graph=False,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = DDP_NET\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = copy.deepcopy(model)\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP,\n device_ids=gpu_subset,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n if static_graph:\n model_DDP._set_static_graph()\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # dummy data initialization\n local_bs = len(gpu_subset)\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):\n # Run a simple end to end DDP-CPU model, use result of single node\n # model as baseline\n group, group_id, rank = self._init_global_test()\n\n # cpu training setup\n model_base = DDP_NET\n\n # DDP-CPU training setup\n model_DDP = copy.deepcopy(model_base)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, gradient_as_bucket_view=gradient_as_bucket_view\n )\n\n # dummy data initialization\n local_bs = 2\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_base,\n model_DDP,\n input_cpu,\n target,\n loss,\n local_bs,\n rank,\n global_bs,\n False,\n zero_grad=True,\n )\n self._barrier()\n\n return model_DDP\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU(self):\n self._test_DistributedDataParallelCPU()\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU_grad_is_view(self):\n self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_DistributedDataParallel_requires_grad(self):\n # a module without gradients shouldn't be accepted\n self.assertRaises(\n RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_DistributedDataParallel_non_default_stream(self):\n stream = torch.cuda.Stream(self.rank)\n rank = self.rank\n with torch.cuda.stream(stream):\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]\n )\n for i in range(1000):\n # Clear gradients manually\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net.module.weight.grad\n avg = grad.clone()\n # All-reducing the gradient averages should give us the gradient\n # average. If not, then one of the workers has not correctly\n # written back the averaged gradient before this all-reduce call.\n dist.all_reduce(avg)\n world_size = int(os.environ[\"WORLD_SIZE\"])\n avg.div_(world_size)\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(\n avg[0, 0],\n expected_grad,\n msg=f\"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}\",\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_comm_hook_logging(self):\n hooks = [\n default.allreduce_hook,\n default.fp16_compress_hook,\n powerSGD.powerSGD_hook,\n powerSGD.batched_powerSGD_hook,\n quantization_hooks.quantization_pertensor_hook,\n quantization_hooks.quantization_perchannel_hook,\n ]\n\n cpp_builtin_hooks = [\n dist.BuiltinCommHookType.ALLREDUCE,\n dist.BuiltinCommHookType.FP16_COMPRESS,\n ]\n\n for hook in hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model.register_comm_hook(None, hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), hook.__qualname__)\n\n for hook in cpp_builtin_hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model._register_builtin_comm_hook(hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), str(hook))\n\n # No hook registered\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n # After second forward pass, hook should still be empty string\n for i in range(2):\n inp = torch.ones(1, 1, device=self.rank)\n loss = ddp_model(inp).sum()\n loss.backward()\n\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"comm_hook\", \"\"), \"\")\n\n def _test_ddp_hook_with_optimizer_parity(\n self, grad_as_bucket_view, static_graph\n ):\n rank = self.rank\n torch.cuda.set_device(rank)\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n models_to_test = [\n (LargeNet(), torch.randn(1, 1000).cuda()),\n ]\n if HAS_TORCHVISION:\n models_to_test.append(\n (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda())\n )\n # Enable determinism in cudnn operators\n for (model, inp) in models_to_test:\n with torch.backends.cudnn.flags(\n enabled=True, deterministic=True, benchmark=False\n ):\n sgd_lr = 1e-2\n sgd_momentum = 0.9\n sgd_weight_decay = 0.01\n ddp_model_with_optimizer_hook = (\n torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view,\n )\n )\n if static_graph:\n ddp_model_with_optimizer_hook._set_static_graph()\n\n # Register hook that runs allreduce + functional SGD step.\n allreduce_hook = default.allreduce_hook\n opt_hook_state = default._OptimizerHookState(\n _FunctionalSGD,\n sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n )\n ddp_model_with_optimizer_hook.register_comm_hook(\n None,\n default._hook_then_optimizer(allreduce_hook, opt_hook_state),\n )\n # Create DDP model with no hook that does optimizer after\n # backward.\n ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view,\n )\n if static_graph:\n ddp_model_with_no_hook._set_static_graph()\n\n sgd_no_hook = torch.optim.SGD(\n ddp_model_with_no_hook.parameters(),\n lr=sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n )\n\n # Verify parameters are equal initially.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Save old parameters to later verify optimizer modified them.\n opt_hook_init_params = copy.deepcopy(\n list(ddp_model_with_optimizer_hook.parameters())\n )\n\n # Run optimizer with hook model.\n for i in range(6):\n ddp_model_with_optimizer_hook.zero_grad()\n out = ddp_model_with_optimizer_hook(inp)\n loss = out.sum()\n loss.backward()\n\n dist.barrier()\n\n # Run regular model.\n for i in range(6):\n ddp_model_with_no_hook.zero_grad()\n out = ddp_model_with_no_hook(inp)\n loss = out.sum()\n loss.backward()\n sgd_no_hook.step()\n\n dist.barrier()\n\n # Now verify parameters are equal.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Verify optimizer modified parameters, otherwise they would be\n # trivially equal above.\n self.assertNotEqual(\n opt_hook_init_params,\n list(ddp_model_with_optimizer_hook.parameters()),\n )\n dist.barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @sandcastle_skip_if(IS_WINDOWS, \"FunctionalSGD not yet supported with Windows.\")\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_hook_with_optimizer_parity(self):\n for grad_as_bucket_view, static_graph in itertools.product(\n [True, False], [True, False]\n ):\n self._test_ddp_hook_with_optimizer_parity(\n grad_as_bucket_view=grad_as_bucket_view, static_graph=static_graph\n )\n\n def _test_ddp_hook_parity(self, state, hook):\n rank = self.rank\n m = torch.nn.Linear(1, 5)\n try:\n process_group = state.process_group\n except AttributeError:\n process_group = state\n\n net_with_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n net_with_hook.register_comm_hook(state=state, hook=hook)\n net_without_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n for i in range(100):\n # Clear gradients manually.\n for g in [\n net_without_hook.module.weight.grad,\n net_with_hook.module.weight.grad,\n ]:\n if g is not None:\n g.requires_grad_(False)\n g.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net_without_hook(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net_without_hook.module.weight.grad\n avg = grad.clone()\n expected_grad = (\n sum(i for i in range(dist.get_world_size())) / dist.get_world_size()\n )\n loss_hook = net_with_hook(batch).sum()\n loss_hook.backward()\n grad_hook = net_with_hook.module.weight.grad\n avg_hook = grad_hook.clone()\n # Verify hook grad with expected.\n # Cannot use exact match here due to a very small accuracy loss,\n # e.g. 1e-05, for powerSGD hook case.\n assert_func = (\n self.assertEqual\n if hook == default.allreduce_hook\n else torch.testing.assert_allclose\n )\n assert_func(\n avg_hook[0, 0],\n expected_grad,\n msg=f\"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}\",\n )\n # Verify hook grad with vanilla allreduce\n assert_func(\n avg_hook[0, 0],\n avg[0, 0],\n msg=f\"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}\",\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce(self):\n self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce_process_group(self):\n # process_group is passed in to both DDP and comm. hook\n rank_to_GPU = self._init_multigpu_helper()\n gpus = [rank_to_GPU[int(r)][0] for r in range(dist.get_world_size())]\n process_group = torch.distributed.new_group(gpus)\n self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_powerSGD(self):\n for warm_start in [True, False]:\n powersgd_state = powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n start_powerSGD_iter=2,\n warm_start=warm_start,\n )\n self._test_ddp_hook_parity(\n state=powersgd_state, hook=powerSGD.powerSGD_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @sandcastle_skip_if(\n NO_MULTIPROCESSING_SPAWN,\n \"Disabled for environments that \\\n don't support multiprocessing with spawn start method\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_post_localSGD(self):\n # Although we start run local SGD at iteration 10, since we still use the global process group to run it,\n # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations.\n state = post_localSGD.PostLocalSGDState(\n process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10\n )\n self._test_ddp_hook_parity(\n state=state, hook=post_localSGD.post_localSGD_hook\n )\n\n # Since we start local SGD later than the total number of 100 iterations,\n # no local SGD actually is executed, and we don't even need to provide a subgroup for this case.\n state = post_localSGD.PostLocalSGDState(\n process_group=None, subgroup=None, start_localSGD_iter=1000\n )\n self._test_ddp_hook_parity(\n state=state, hook=post_localSGD.post_localSGD_hook\n )\n\n def _prepare_single_device_module(\n self,\n rank,\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n device = devices[0] if devices else torch.device(\"cuda:%d\" % rank)\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model).to(device),\n device_ids=device_ids,\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n model.to(device)\n\n input = torch.randn(global_batch_size, 2).to(device)\n target = torch.randn(global_batch_size, 4).to(device)\n\n return model, ddp_model, input, target\n\n def _prepare_cpu_module(\n self,\n process_group,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model),\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n input = torch.randn(global_batch_size, 2)\n target = torch.randn(global_batch_size, 4)\n return model, ddp_model, input, target\n\n def _test_accumulate_gradients_no_sync(\n self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False\n ):\n \"\"\"\n This is the recommended way to implement accumulate grads.\n If ``ddp_comm_hook`` input was specified, it will also register that hook\n to the ``ddp_model``. The hook fed into this function should not change\n the resulting gradients.\n \"\"\"\n group, group_id, rank = self._init_global_test()\n world_size = get_world_size()\n\n # FIXME: Add testing for gloo/CUDA\n if BACKEND == \"mpi\" or BACKEND == \"gloo\":\n global_batch_size = world_size\n local_batch_size = 1\n model, ddp_model, input, target = self._prepare_cpu_module(\n group_id, global_batch_size, gradient_as_bucket_view\n )\n\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n int_devices = rank_to_GPU[rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n global_batch_size = world_size\n local_batch_size = len(devices)\n model, ddp_model, input, target = self._prepare_single_device_module(\n rank,\n group_id,\n devices,\n devices,\n global_batch_size,\n gradient_as_bucket_view,\n )\n\n if ddp_comm_hook is not None:\n ddp_model.register_comm_hook(group_id, ddp_comm_hook)\n\n def step_model(model, input, target):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n loss.backward()\n\n # ensure accumulate grads works with no_grad => no grads are accumulated.\n with torch.no_grad():\n with ddp_model.no_sync():\n ddp_model.train()\n ddp_model(input)\n\n # check two model parameters over num_iters iterations\n for iteration in range(num_iters):\n step_model(model, input, target)\n\n ddp_input = input[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n ddp_target = target[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n\n if iteration % num_iters == 0:\n # accumulate grads locally\n with ddp_model.no_sync():\n step_model(ddp_model, ddp_input, ddp_target)\n else:\n # sync grads\n step_model(ddp_model, ddp_input, ddp_target)\n\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n if not i.requires_grad:\n continue\n if iteration % num_iters == 0:\n self.assertNotEqual(i.grad, j.grad)\n else:\n self.assertEqual(i.grad, j.grad)\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + iteration)\n input = input[torch.randperm(global_batch_size)]\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync()\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_grad_is_view(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync\n using allreduce hook and validates whether future result was properly\n passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n tensors = [bucket.get_tensor() / world_size]\n return (\n group_id.allreduce(tensors)\n .get_future()\n .then(lambda fut: fut.value()[0])\n )\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce\n hook that also uses then callbacks. In first then callback result is multiplied\n by 2, and the second callback divides the result by 2 * world_size. It validates\n whether final result was properly passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_with_then_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n fut = group_id.allreduce([bucket.get_tensor()]).get_future()\n\n def mult(fut):\n # Multiply the result by 2.\n return 2 * fut.wait()[0]\n\n def div(fut):\n # Divide the result by 2 * world_size.\n return fut.wait() / (2 * world_size)\n\n return fut.then(mult).then(div)\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_with_then_hook\n )\n\n @sandcastle_skip_if(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_get_future(self):\n def mult(fut):\n return [t * 3 for t in fut.wait()]\n\n def add(fut):\n return [t + 1 for t in fut.wait()]\n\n group, group_id, rank = self._init_global_test()\n input = _build_tensor(3, 2)\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n input = input.to(device_id)\n fut = group_id.allreduce([input]).get_future()\n res = fut.then(mult).then(add).wait()\n expected = _build_tensor(3, 2 * len(group) * 3 + 1)\n\n self.assertEqual(res[0], expected)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n gpus = list(rank_to_GPU[rank])\n\n for use_bucket_view, static_graph in itertools.product(\n (False, True), (False, True)\n ):\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test output_device\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test device_ids\n gpus_list = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel(\n gpu_subset=gpus_list,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n def _test_DistributedDataParallel_with_amp(self, grad_is_view=False):\n torch.manual_seed(31415)\n # Creates model and optimizer in default precision\n model = copy.deepcopy(DDP_NET).cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.03)\n\n # Creates a GradScaler once at the beginning of training.\n scaler = GradScaler()\n\n ddp_model = nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view\n )\n\n input = torch.randn(dist.get_world_size() * 2, 2).cuda()\n target = torch.randn(dist.get_world_size() * 2, 4).cuda()\n loss_fn = nn.MSELoss()\n\n # verify grads are none before training\n for p in ddp_model.parameters():\n self.assertTrue(p is not None)\n self.assertTrue(p.grad is None)\n\n for idx in range(20):\n optimizer.zero_grad()\n # Runs the forward pass with autocasting.\n with autocast():\n output = ddp_model(input)\n loss = loss_fn(output, target)\n\n # Scales loss. Calls backward() on scaled loss to create scaled gradients.\n # Backward passes under autocast are not recommended.\n # Backward ops run in the same dtype autocast chose for corresponding forward ops.\n scaler.scale(loss).backward()\n\n # verify grads are not none and are valid during training\n for p in ddp_model.parameters():\n if p.requires_grad:\n self.assertTrue(p.grad is not None)\n self.assertFalse(p.grad.isnan().any())\n self.assertFalse(p.grad.isinf().any())\n\n # scaler.step() first unscales the gradients of the optimizer's assigned params.\n # If these gradients do not contain infs or NaNs, optimizer.step() is then called,\n # otherwise, optimizer.step() is skipped.\n scaler.step(optimizer)\n\n # Updates the scale for next iteration.\n scaler.update()\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + idx)\n input = input[torch.randperm(dist.get_world_size() * 2)]\n\n return ddp_model\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_with_amp_and_grad_is_view(self):\n torch.cuda.set_device(self.rank)\n ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=False\n )\n ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=True\n )\n for i, j in zip(\n ddp_model_grad_not_view.parameters(),\n ddp_model_grad_is_view.parameters(),\n ):\n self.assertEqual(i, j)\n\n def _test_DistributedDataParallel_SyncBatchNorm(\n self,\n gpu_subset,\n rank,\n local_bs,\n global_bs,\n offset,\n output_device=None,\n affine=True,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = BN_NET if affine else BN_NET_NO_AFFINE\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpu_subset\n )\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # data initialization\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n offset,\n dist.get_world_size(),\n 5 if affine else 2,\n )\n self._barrier()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @sandcastle_skip_if(\n IS_WINDOWS, \"PostLocalSGDOptimizer not yet supported with Windows.\"\n )\n def test_post_localSGD_optimizer_parity(self, grad_is_view=False):\n learning_rate = 0.03\n period = 4\n warmup_steps = 10\n torch.cuda.set_device(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(DDP_NET).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_is_view,\n )\n opt = torch.optim.SGD(net.parameters(), lr=learning_rate)\n averager = averagers.PeriodicModelAverager(\n period=period, warmup_steps=warmup_steps\n )\n\n post_localSGD_net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(DDP_NET).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_is_view,\n )\n post_localSGD_opt = post_localSGD_optimizer.PostLocalSGDOptimizer(\n params=post_localSGD_net.parameters(),\n optimizer_class=torch.optim.SGD,\n averager=averagers.PeriodicModelAverager(\n period=period, warmup_steps=warmup_steps\n ),\n lr=learning_rate,\n )\n\n input = torch.randn(dist.get_world_size() * 2, 2).cuda()\n target = torch.randn(dist.get_world_size() * 2, 4).cuda()\n loss_fn = nn.MSELoss()\n\n for _ in range(20):\n opt.zero_grad()\n output = net(input)\n loss = loss_fn(output, target)\n loss.backward()\n opt.step()\n averager.average_parameters(net.parameters())\n\n post_localSGD_opt.zero_grad()\n post_localSGD_output = post_localSGD_net(input)\n post_localSGD_loss = loss_fn(post_localSGD_output, target)\n post_localSGD_loss.backward()\n post_localSGD_opt.step()\n\n for p1, p2 in zip(net.parameters(), post_localSGD_net.parameters()):\n self.assertEqual(p1.data, p2.data)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self):\n group, group_id, rank = self._init_global_test()\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n model = ONLY_SBN_NET\n model_gpu = copy.deepcopy(model).cuda(rank)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_gpu, device_ids=[rank]\n )\n\n memory_format = torch.channels_last\n input_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n target_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_gpu,\n target_gpu,\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n bs_offset,\n dist.get_world_size(),\n memory_format=memory_format,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n # test output_device\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n # test device_ids\n gpus = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n affine=False,\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = len(gpus) * 2\n global_bs = dist.get_world_size() * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n @require_world_size(2)\n def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = 1\n global_bs = dist.get_world_size()\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(\n self,\n ):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n model = nn.parallel.DistributedDataParallel(\n ONLY_SBN_NET.cuda(rank), device_ids=[rank]\n )\n\n input_var = []\n for i in range(dist.get_world_size()):\n input_var_rank = torch.cat(\n [\n torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),\n torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)),\n ],\n dim=1,\n )\n input_var.append(input_var_rank)\n\n all_input_var = torch.cat(\n [\n x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1)\n for x in input_var\n ],\n dim=1,\n ).cuda(rank)\n\n for i in range(100):\n y = model(input_var[rank].cuda(rank))\n y.mean().backward()\n\n running_mean, running_var = (\n model.module.running_mean,\n model.module.running_var,\n )\n torch.testing.assert_allclose(running_mean, all_input_var.mean(1))\n torch.testing.assert_allclose(running_var, all_input_var.var(1))\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):\n group, group_id, rank = self._init_global_test()\n # only do single GPU per process\n gpus = [rank]\n\n # cpu training setup\n model = BN_NET\n\n num_processes = dist.get_world_size()\n local_bs = rank + 2\n bs_offset = int((rank + 3) * rank / 2)\n global_bs = int((num_processes + 3) * num_processes / 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n def _test_ddp_logging_data(self, is_gpu):\n rank = dist.get_rank()\n model_DDP = copy.deepcopy(DDP_NET)\n if is_gpu:\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP.cuda(rank), device_ids=[rank]\n )\n else:\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP)\n\n # dummy data initialization\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n if is_gpu:\n input = input.cuda(rank)\n target = target.cuda(rank)\n\n model_DDP._set_ddp_runtime_logging_sample_rate(2)\n\n for idx in range(20):\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n\n self._model_step_with_zero_grad(model_DDP)\n\n # Verify DDP logging data is sampled as expected\n # If it has ran more than 10 iteratons and this is\n # the sampled iteration for measuring run time stats,\n # the run time stats for this idx-th iteration will not\n # be zeros.\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n if idx > 0 and (idx < 10 or idx % 2 == 0):\n self.assertGreaterEqual(\n ddp_logging_data.get(\"forward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), idx)\n elif idx > 0:\n # if the idx-th iteration is not sampled to set runtime stats,\n # ddp_logging_data.iteration will not be updated to current\n # iteration.\n self.assertNotEqual(ddp_logging_data.get(\"iteration\"), idx)\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n return model_DDP\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_ddp_logging_data_cpu(self):\n def parse_env(var):\n return os.environ[var] if var in os.environ else \"N/A\"\n\n os.environ[\"TORCH_DISTRIBUTED_DEBUG\"] = \"INFO\"\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=False)\n\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"world_size\"), dist.get_world_size())\n self.assertEqual(ddp_logging_data.get(\"rank\"), dist.get_rank())\n self.assertEqual(ddp_logging_data.get(\"module_name\"), \"Net\")\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), \"\")\n # output_device is -1 in default if it is not set, e.g.\n # output_device of CPU training is -1.\n self.assertEqual(ddp_logging_data.get(\"output_device\"), -1)\n self.assertEqual(ddp_logging_data.get(\"broadcast_buffers\"), 1)\n self.assertEqual(ddp_logging_data.get(\"bucket_cap_bytes\"), 25 * 1024 * 1024)\n self.assertEqual(ddp_logging_data.get(\"find_unused_parameters\"), 0)\n self.assertEqual(ddp_logging_data.get(\"gradient_as_bucket_view\"), 0)\n self.assertEqual(\n ddp_logging_data.get(\"backend_name\"), dist.get_backend(group_id)\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), 18)\n params = list(model_DDP.parameters())\n num_params = 0\n param_size = 0\n params = list(\n parameter\n for parameter in filter(\n lambda parameter: parameter.requires_grad, params\n )\n )\n for p in params:\n num_params += 1\n param_size += p.numel() * p.element_size()\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"float\")\n self.assertEqual(\n ddp_logging_data.get(\"total_parameter_size_bytes\"), param_size\n )\n self.assertEqual(ddp_logging_data.get(\"num_parameter_tensors\"), num_params)\n self.assertEqual(ddp_logging_data.get(\"bucket_sizes\"), str(param_size))\n self.assertEqual(\n ddp_logging_data.get(\"master_port\"), parse_env(\"MASTER_PORT\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"master_addr\"), parse_env(\"MASTER_ADDR\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"torch_distributed_debug\"),\n parse_env(\"TORCH_DISTRIBUTED_DEBUG\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"cuda_visible_devices\"),\n parse_env(\"CUDA_VISIBLE_DEVICES\"),\n )\n if ddp_logging_data.get(\"backend_name\") == \"gloo\":\n self.assertEqual(\n ddp_logging_data.get(\"gloo_socket_ifname\"),\n parse_env(\"GLOO_SOCKET_IFNAME\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"gloo_device_transport\"),\n parse_env(\"GLOO_DEVICE_TRANSPORT\"),\n )\n self.assertEqual(ddp_logging_data.get(\"nccl_socket_ifname\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_blocking_wait\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_async_error_handling\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_debug\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_nthreads\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_ib_timeout\"), None)\n # test runtime logging fields\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"unused_parameter_size\", 0), 0)\n self.assertEqual(ddp_logging_data.get(\"has_rebuilt_buckets\"), 1)\n self.assertEqual(\n ddp_logging_data.get(\"rebuilt_bucket_sizes\"), str(param_size)\n )\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"), 1\n )\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_backward_comm_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n # test larger net with mixed data types, verify multiple bucket sizes\n model = LargeNet()\n model.float()\n model.fc1.double()\n model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n params = list(model_DDP.parameters())\n self.assertEqual(\n ddp_logging_data.get(\"bucket_cap_bytes\"), int(1.5 * 1024 * 1024)\n )\n bucket_sizes = [\n params[1].numel() * params[1].element_size(),\n params[0].numel() * params[0].element_size(),\n ]\n self.assertEqual(\n ddp_logging_data.get(\"bucket_sizes\"),\n \", \".join(str(x) for x in bucket_sizes),\n )\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"double, float\")\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_ddp_logging_data_gpu(self):\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=True)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), str(rank))\n self.assertEqual(ddp_logging_data.get(\"output_device\"), rank)\n # test runtime logging fields\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n\n @sandcastle_skip_if(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_static_graph_api_cpu(self):\n model_DDP = nn.parallel.DistributedDataParallel(DDP_NET)\n model_DDP._set_static_graph()\n self.assertEqual(\n model_DDP._get_ddp_logging_data().get(\"static_graph\"), True\n )\n expected_err = \"should be called before training loop starts\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n offset = dist.get_rank() * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n model_DDP._set_static_graph()\n\n # Verify error was logged in ddp_logging_data.\n verify_ddp_error_logged(model_DDP, expected_err)\n\n @skipIfNoTorchVision\n def test_SyncBatchNorm_process_group(self):\n # When adopting `convert_sync_batchnorm` to convert a `nn.modules`,\n # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`\n # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).\n\n process_ids = 0\n process_group = torch.distributed.new_group([process_ids])\n res50_model = torchvision.models.resnet50()\n res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(res50_model), process_group\n )\n process_group_sync = res50_model_sync.layer1[0].bn1.process_group\n self.assertEqual(process_group_sync, process_group)\n\n def _run_reduction_test(\n self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None\n ):\n if reduction_fn != dist.all_reduce and dst is None:\n raise ValueError(f\"Reduction fn {reduction_fn} must specify dst!\")\n if dst is not None:\n reduction_fn(tensor, dst, op)\n # Only destination rank tensor is expected to have final result.\n if dist.get_rank() == dst:\n self.assertEqual(tensor, expected_tensor)\n else:\n reduction_fn(tensor, op)\n self.assertEqual(tensor, expected_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allreduce(self):\n torch.cuda.set_device(self.rank)\n # Run all_reduce with PRODUCT\n element = self.rank % 2 == 0\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([False, False]).to(self.rank), op\n )\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(input_tensor, expected_tensor, op)\n\n # Run all_reduce with SUM\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([True, True]).to(self.rank), op\n )\n # TODO: NCCL backend does not work correctly for bitwise reduction ops\n # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for\n # these once it is supported.\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allgather(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, True]}\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n # Preserve a copy of the tensor to compare against after allgather.\n input_tensor_copy = input_tensor.clone()\n tensor_list = [\n torch.tensor([False, False]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, input_tensor)\n\n self.assertEqual(len(tensor_list), dist.get_world_size())\n for i, t in enumerate(tensor_list):\n expected = torch.tensor(inp[i % 2]).to(self.rank)\n self.assertEqual(t, expected)\n # Ensure that the input tensor is not modified, since this collective\n # does not modify its input.\n self.assertEqual(input_tensor_copy, input_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_nccl_backend_bool_reduce(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, False]}\n # Run reduce() with product op\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = torch.tensor([False, False]).to(self.rank)\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(\n input_tensor, expected_tensor, op, dist.reduce, dst=0\n )\n\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = (\n torch.tensor([True, True]).to(self.rank)\n if self.rank == 0\n else input_tensor.clone()\n )\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_broadcast(self):\n tensor_size = 10\n bcast_tensor = torch.tensor(\n [\n (random.random() < 0.5 if self.rank == 0 else False)\n for _ in range(tensor_size)\n ]\n ).to(self.rank)\n dist.broadcast(bcast_tensor, src=0)\n # Now allgather and ensure the tensors are equal.\n tensor_list = [\n torch.tensor([False for _ in range(tensor_size)]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, bcast_tensor)\n expected = tensor_list[0]\n for tensor in tensor_list[1:]:\n self.assertEqual(tensor, expected)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_DistributedSampler_padding(self):\n # Tests padding of distributed sampler.\n world_size = dist.get_world_size()\n\n # Simulates the 'casual' dataset size\n dataset_size = 100 + world_size + 1\n dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]\n\n # Simulates the 'tiny' dataset size\n dataset_tiny_size = max(world_size // 2 - 1, 1)\n dataset_tiny = [\n torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)\n ]\n\n # Specifying drop_last=True will cause the tail of the data to be dropped.\n dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)\n local_num_samples, local_dataset_size = (\n dist_sampler.num_samples,\n dist_sampler.total_size,\n )\n # The effective dataset size should be the greatest integer that is <=\n # dataset_size that is divisible by the world_size. This is to ensure each\n # rank processes the same number of samples.\n effective_dataset_size = (\n math.ceil((dataset_size - world_size) / world_size)\n if dataset_size % world_size != 0\n else dataset_size / world_size\n )\n self.assertEqual(local_num_samples, effective_dataset_size)\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler))\n self.assertEqual(len(indices_list), local_num_samples)\n\n def validate_global_samples(local_num_samples):\n # Ensure that each rank processes the same number of samples.\n world_samples = [\n torch.LongTensor([0]).to(self.rank) for _ in range(world_size)\n ]\n dist.all_gather(\n world_samples, torch.tensor([local_num_samples]).to(self.rank)\n )\n world_samples = [sample.item() for sample in world_samples]\n self.assertEqual(len(set(world_samples)), 1)\n\n validate_global_samples(local_num_samples)\n\n # drop_last=False is the default and will add additional indices to be sampled,\n # increasing the effective dataset size.\n dist_sampler_added_samples = DistributedSampler(dataset=dataset)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples.num_samples,\n dist_sampler_added_samples.total_size,\n )\n # The effective dataset size is the smallest integer that is >= dataset_size\n # and divisible by the world size.\n self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size))\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples))\n self.assertEqual(len(indices_list), local_num_samples)\n\n # Ensure that each rank processes the same number of samples.\n validate_global_samples(local_num_samples)\n\n # Ensure additional samples are padded even when\n # the extremely small dataset is given.\n dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples_tiny.num_samples,\n dist_sampler_added_samples_tiny.total_size,\n )\n self.assertEqual(\n local_num_samples, math.ceil(dataset_tiny_size / world_size)\n )\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples_tiny))\n self.assertEqual(len(indices_list), local_num_samples)\n validate_global_samples(local_num_samples)\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_allgather_object(self):\n # Only set device for NCCL backend since it must use GPUs.\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n torch.cuda.set_device(next_rank)\n\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n @require_backend({\"gloo\"})\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support gather\")\n def test_gather_object(self):\n # Ensure stateful objects can be gathered\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n my_rank = dist.get_rank()\n dist.gather_object(\n gather_objects[self.rank % len(gather_objects)],\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n if my_rank != gather_on_rank:\n self.assertEqual(\n output_gathered, [None for _ in range(dist.get_world_size())]\n )\n else:\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n # Validate errors when objects can't be pickled.\n class Bar:\n pass\n\n b = Bar()\n gather_objects = [b for _ in range(dist.get_world_size())]\n with self.assertRaisesRegex(AttributeError, \"Can't pickle local object\"):\n dist.all_gather_object(\n [None for _ in range(dist.get_world_size())],\n gather_objects[self.rank],\n )\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_gather_object_err(self):\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n # Case where rank != GPU device.\n my_rank = dist.get_rank()\n next_rank = (my_rank + 1) % dist.get_world_size()\n torch.cuda.set_device(next_rank)\n with self.assertRaisesRegex(\n RuntimeError, \"ProcessGroupNCCL does not support gather\"\n ):\n dist.gather_object(\n \"foo\",\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n\n def validate_net_equivalence(self, net):\n # Helper to validate synchronization of nets across ranks.\n net_module_states = list(net.module.state_dict().values())\n # Check that all tensors in module's state_dict() are equal.\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for tensor in tensor_list:\n self.assertEqual(tensor, t)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_sync_params_and_buffers(self):\n # Test that after calling _sync_params_and_buffers, models across ranks\n # are the same and are equal to the model on the input rank.\n dim = 2\n rank = self.rank\n rank_to_broadcast = 1\n # Seed to ensure that ranks are initialized with different initial models.\n torch.manual_seed(rank)\n model = nn.Linear(dim, dim, bias=False)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n new_model = nn.Linear(dim, dim, bias=False).cuda(rank)\n net.module = copy.deepcopy(new_model)\n # Assert params are different\n net_module_states = list(net.module.state_dict().values())\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for i, tensor in enumerate(tensor_list):\n if i == rank:\n self.assertEqual(t, tensor)\n else:\n # tensor from another rank should be different.\n self.assertNotEqual(t, tensor)\n\n net._sync_params_and_buffers(authoritative_rank=rank_to_broadcast)\n # Now all model params should be the same.\n self.validate_net_equivalence(net)\n # Since the network params were broadcast from rank_to_broadcast, validate that\n # they are the same as new_model on rank_to_broadcast.\n if rank == rank_to_broadcast:\n expected_states = new_model.state_dict().values()\n for t, expected in zip(net_module_states, expected_states):\n self.assertEqual(t, expected)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_grad_div_uneven_inputs(self):\n # Test gradient division during training with join() API. If\n # divide_by_initial_world_size=False, we scale by the effective world\n # size when allreducing grads.\n dim = 5\n batch = 1\n grad_scale = 50\n rank = self.rank\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.ones(batch, dim, device=self.rank) * grad_scale\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n n_iters = 3\n if self.rank > 0:\n n_iters += 2\n\n with net.join(divide_by_initial_world_size=False):\n for _ in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n # The grad is always expected_grad, since we divide by the number\n # of currently active processes and inactive processes contribute\n # zero gradient. If we kept dividing by static initial world\n # size as processes leave, the grad would be smaller.\n expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grads so that it's the same every iteration\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n # If divide_by_initial_world_size=True (default), we always scale grads\n # by the initial world_size.\n with net.join(divide_by_initial_world_size=True):\n for i in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n effective_ws = dist.get_world_size()\n if i >= 3:\n effective_ws -= 1\n expected_grad = (\n torch.ones(dim, dim, device=self.rank)\n * grad_scale\n * effective_ws\n ) / dist.get_world_size()\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grad so that it's the same every iteration.\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n def _test_ddp_profiling(self, profiler_ctx):\n batch = 3\n dim = 10\n num_iters = 6\n torch.cuda.set_device(self.rank)\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n profiler_ctx_copy = copy.deepcopy(profiler_ctx)\n\n with profiler_ctx as prof:\n for i in range(num_iters):\n loss = net(inp).sum()\n loss.backward()\n\n all_reduce_event_name = f\"{dist.get_backend()}:all_reduce\"\n events = get_profiling_event(all_reduce_event_name, prof)\n event_count = sum(e.count for e in events)\n self.assertEqual(event_count, num_iters)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, all_reduce_event_name)\n\n broadcast_event_name = f\"{dist.get_backend()}:broadcast\"\n broadcast_events = get_profiling_event(broadcast_event_name, prof)\n event_count = sum(e.count for e in broadcast_events)\n # Broadcast is called during rebuild_buckets\n self.assertGreaterEqual(event_count, 1)\n for event in broadcast_events:\n self.assertEqual(event.name, broadcast_event_name)\n\n # Run DDP with profiling for a few iterations, then enable profiling\n # for a single pass, and ensure it is recorded. This tests that the\n # thread local state is correctly updated.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n for i in range(3):\n loss = net(inp).sum()\n loss.backward()\n # Now enable the profiler.\n with profiler_ctx_copy as prof:\n loss = net(inp).sum()\n loss.backward()\n\n events = get_profiling_event(all_reduce_event_name, prof)\n self.assertGreaterEqual(len(events), 1)\n self.assertGreaterEqual(events[0].count, 1)\n self.assertEqual(events[0].name, all_reduce_event_name)\n for event in events:\n self.assertTrue(event.is_async)\n # Ensure searching unused parameters was profiled\n events = get_profiling_event(\"search_unused_parameters\", prof)\n self.assertEqual(len(events), 1)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_profiling_autograd_profiler(self):\n autograd_profiler_ctx = torch.autograd.profiler.profile()\n return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_ddp_profiling_torch_profiler(self):\n cpu_act = torch.profiler.ProfilerActivity.CPU\n cuda_act = torch.profiler.ProfilerActivity.CUDA\n torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act])\n self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_join_model_equivalence(self):\n # Verifies equivalence with model training locally and with DDP under\n # the join context manager.\n batch = 3\n dim = 10\n learning_rate = 0.03\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n local_model = copy.deepcopy(model)\n local_model = local_model.cuda(self.rank)\n rank_to_iter_mapping = {\n rank: 2 * (rank + 1) for rank in range(dist.get_world_size())\n }\n # run local model\n local_iters = sum(rank_to_iter_mapping.values())\n local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)\n for _ in range(local_iters):\n local_optim.zero_grad()\n out = local_model(inp)\n loss = out.sum()\n loss.backward()\n local_optim.step()\n\n # run DDP model with join API\n num_iters = rank_to_iter_mapping[self.rank]\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n ddp_optim = torch.optim.SGD(\n model.parameters(), lr=learning_rate * dist.get_world_size()\n )\n with net.join():\n for i in range(num_iters):\n ddp_optim.zero_grad()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n torch.cuda.synchronize(device=self.rank)\n ddp_optim.step()\n\n # Validate model state dicts are equal\n for (_, local_tensor), (_, dist_tensor) in zip(\n local_model.state_dict().items(), net.module.state_dict().items()\n ):\n self.assertEqual(local_tensor, dist_tensor)\n\n def _run_uneven_inputs_test(\n self,\n test_case,\n iteration_mapping,\n find_unused_params,\n ):\n model = test_case.model\n inp = test_case.inp\n rank = self.rank\n sync_interval = test_case.sync_interval\n torch.cuda.set_device(rank)\n # Ensure all outsanding GPU work is comlete so this test runs independently.\n dist.barrier()\n # Bucket_cap_mb is intentionally low to test allreduce scheduling when\n # there are many buckets.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank),\n device_ids=[rank],\n bucket_cap_mb=1,\n find_unused_parameters=find_unused_params,\n )\n # Register hook if specified\n if test_case.hook is not None:\n net.register_comm_hook(test_case.state, test_case.hook)\n print(f\"registered hook {test_case.hook}\")\n\n # Determine num iters for this rank via the passed in mapping.\n num_iters = iteration_mapping[rank]\n # If we throw when earliest rank terminates, we should ensure\n # that we iterate for that minimum number of times.\n num_iters_tensor = torch.tensor(\n [num_iters], device=torch.cuda.current_device()\n )\n dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN)\n min_num_iters = num_iters_tensor.item()\n total_iters = 0\n if test_case.throw_on_early_termination:\n if min_num_iters == num_iters:\n # Early termination rank(s)\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n else:\n exception_ctx = suppress()\n with exception_ctx:\n with net.join(\n throw_on_early_termination=test_case.throw_on_early_termination\n ):\n for i in range(num_iters):\n # Use model.no_sync() to disable grad synchronization every\n # sync_interval.\n if i % sync_interval != 0:\n context = net.no_sync()\n else:\n context = suppress()\n with context:\n if isinstance(inp, tuple):\n loss = net(*inp).sum()\n else:\n loss = net(inp).sum()\n loss.backward()\n self._model_step(net)\n # Ensure completion of GPU kernels (including allreduce). If the\n # join API is not properly implemented, then this should hang\n # since the allreduce will hang.\n torch.cuda.synchronize(device=rank)\n total_iters += 1\n if test_case.throw_on_early_termination:\n # Ensure we iterated min_num_iters times.\n self.assertEqual(total_iters, min_num_iters)\n else:\n # Ensure we iterated at least min_num_iters times.\n self.assertGreaterEqual(total_iters, min_num_iters)\n\n # Ensure completion of all GPU kernels.\n torch.cuda.synchronize(device=rank)\n # When throwing on early rank termination, we do not\n # broadcast model state from an authoritative rank. All models\n # should already be in sync.\n if not test_case.throw_on_early_termination:\n self.assertTrue(net._authoritative_rank)\n # All ranks should have agreed on the same authoritative_rank!\n final_rank_tensor = torch.tensor(\n [net._authoritative_rank], device=self.rank\n )\n tensor_list = [\n torch.zeros_like(final_rank_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, final_rank_tensor)\n max_rank = dist.get_world_size() - 1\n self.assertSetEqual(\n {max_rank}, set(tensor.item() for tensor in tensor_list)\n )\n # Ensure that all models are the same across ranks after all have joined.\n self.validate_net_equivalence(net)\n # Ensure that running with DDP uneven inputs was logged.\n ddp_logging_data = net._get_ddp_logging_data()\n self.assertTrue(ddp_logging_data.get(\"join_uneven_inputs\"))\n dist.barrier()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs_stop_iteration_sync_bn(self):\n # Tests that uneven inputs join handler correctly throws StopIteration\n # for models with SyncBN or general collective comm when\n # throw_on_early_termination=True.\n class ModelWithComm(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(2, 40, bias=False)\n\n def forward(self, x):\n x = self.lin(x)\n dist.all_reduce(x)\n return x\n\n torch.cuda.set_device(self.rank)\n model_bn = BN_NET\n model_bn = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(model_bn)\n ).cuda(self.rank)\n comm_model = ModelWithComm().cuda(self.rank)\n model_input = torch.randn(10, 2).cuda(torch.cuda.current_device())\n\n for model in [model_bn, comm_model]:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n min_num_iters = 5\n if self.rank != 0:\n # Early termination rank(s)\n num_iters = min_num_iters\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n num_iters = min_num_iters * 2\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n n = 0\n with exception_ctx:\n with model.join(throw_on_early_termination=True):\n for i in range(num_iters):\n loss = model(model_input).sum()\n loss.backward()\n self._model_step(model)\n n += 1\n\n self.assertEqual(n, min_num_iters)\n # Verify model equivalence\n self.validate_net_equivalence(model)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs(self):\n dim = 1000\n batch = 1\n # Create a variety of models to run uneven input tests on.\n large_model = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 32, 5),\n nn.ReLU(),\n nn.Conv2d(32, 256, 5),\n nn.ReLU(),\n )\n small_model = nn.Linear(dim, dim, bias=False)\n bn_net = BatchNormNet()\n\n class UnusedParamModule(nn.Module):\n def __init__(self, unused_params_rank):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n self.unused_params_rank = unused_params_rank\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return (\n self.t1(self.t0(x))\n if rank != self.unused_params_rank\n else self.t1(x)\n )\n\n unjoined_rank_with_unused_params_model = UnusedParamModule(1)\n joined_rank_with_unused_params_model = UnusedParamModule(0)\n\n rank = self.rank\n models_to_test = [\n # Network with batchnorm\n DDPUnevenTestInput(\n name=\"batch_norm_net\",\n model=bn_net,\n inp=torch.ones(batch, 2, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"large_conv_model\",\n model=large_model,\n inp=torch.ones(batch, batch, dim, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model\",\n model=small_model,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does not join early has unused params\n DDPUnevenTestInput(\n name=\"unjoined_rank_with_unused_params_model\",\n model=unjoined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does join early has unused params\n DDPUnevenTestInput(\n name=\"joined_rank_with_unused_params_model\",\n model=joined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n ]\n\n # Test models that have hook installed.\n models_with_hook = [\n DDPUnevenTestInput(\n name=\"small_model_allreduce_hook\",\n model=small_model,\n hook=default.allreduce_hook,\n state=None,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model_power_sgd_hook\",\n model=small_model,\n hook=powerSGD.powerSGD_hook,\n state=powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n # Config so that powerSGD runs immediately instead of\n # allreduce.\n start_powerSGD_iter=1,\n warm_start=False,\n use_error_feedback=False,\n ),\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n ]\n models_to_test.extend(models_with_hook)\n\n # Add resnet model if we have torchvision installed.\n if HAS_TORCHVISION:\n resnet_model = torchvision.models.resnet50()\n models_to_test.append(\n DDPUnevenTestInput(\n name=\"resnet_model\",\n model=resnet_model,\n inp=torch.ones(1, 3, 1000, 1000),\n sync_interval=1,\n )\n )\n\n # Test with no_sync every 2, 3, 4, ... iterations.\n models_with_sync = []\n for i, test_input in enumerate(models_to_test):\n models_with_sync.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=i + 2,\n )\n )\n\n throw_on_early_term_tests = []\n for test_input in models_to_test:\n throw_on_early_term_tests.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=test_input.sync_interval,\n throw_on_early_termination=True,\n )\n )\n\n models_to_test.extend(models_with_sync)\n models_to_test.extend(throw_on_early_term_tests)\n\n # 0 iteration tests for when one process does not train model at all, so\n # we must shadow the broadcast calls made when rebuilding buckets.\n baseline_num_iters = [0, 5]\n iteration_offsets = [2, 3, 10]\n num_uneven_ranks = [1]\n if dist.get_world_size() > 2:\n num_uneven_ranks.append(2)\n iteration_mappings = []\n # Generate rank : num_iters mappings for various uneven input scenarios.\n # This includes cases where rank 0 joins early and all other ranks join\n # later, and scenarios where multiple ranks join early, but at different\n # iterations, and later ranks join later.\n for num_early_join_ranks in num_uneven_ranks:\n for baseline_iter in baseline_num_iters:\n for offset in iteration_offsets:\n mapping = {\n rank: baseline_iter\n for rank in range(0, num_early_join_ranks)\n }\n # if num_early_join_ranks > 1, ranks > 0 that will join early\n # iterate offset//2 more times than rank 0, to test nodes\n # depleting inputs at different times.\n if num_early_join_ranks > 1:\n for rank in mapping.keys():\n if rank > 0:\n mapping[rank] += offset // 2\n mapping.update(\n {\n rank: baseline_iter + offset\n for rank in range(\n num_early_join_ranks, dist.get_world_size()\n )\n }\n )\n iteration_mappings.append(mapping)\n\n for (test_case, iteration_mapping) in itertools.product(\n models_to_test, iteration_mappings\n ):\n if self.rank == 0:\n print(\n f\"\"\"Running test: {test_case.name} sync interval\n {test_case.sync_interval} with iteration mapping\n {iteration_mapping}\"\"\"\n )\n self._run_uneven_inputs_test(\n test_case,\n iteration_mapping,\n find_unused_params=(\"unused_params_model\" in test_case.name),\n )\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_join_disable(self):\n # tests that if net.join() with enable=False is specified, DDP works as\n # expected with even inputs.\n torch.manual_seed(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1) * self.rank\n n_iters = 5\n world_size = dist.get_world_size()\n with net.join(enable=False):\n for _ in range(n_iters):\n # Clear grads\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n # Validate gradients to ensure that we divide by the correct\n # world_size when join mode is disabled.\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(net.module.weight.grad.item(), expected_grad)\n\n join_config = net._join_config\n self.assertFalse(join_config.enable)\n self.validate_net_equivalence(net)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_exception(self):\n # Tests that exceptions during training are correctly propagated by the\n # context manager.\n error_str = \"Intentional error\"\n\n class ExceptionModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.param = nn.Parameter(torch.ones(1, requires_grad=True))\n\n def forward(self, _):\n raise ValueError(error_str)\n\n exception_module = ExceptionModule()\n net = torch.nn.parallel.DistributedDataParallel(\n exception_module.cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1)\n with self.assertRaisesRegex(ValueError, error_str):\n with net.join():\n out = net(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_broadcast_object_list(self):\n # Only set device for NCCL backend since it must use GPUs.\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n torch.cuda.set_device(next_rank)\n\n src_rank = 0\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n objects = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n\n # Single object test with device specified. Backend=\"gloo\", device=cpu\n if backend != \"nccl\":\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(\"cpu\")\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"gloo\", device=current_device+1\n # The test is gated by the fact GPU count is the same as world size to avoid the case\n # when backend is gloo but there is no multiple GPU devices.\n if backend != \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(next_rank)\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"nccl\", device=current_device+1\n if backend == \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(\n single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0]\n )\n dist.broadcast_object_list(\n single_obj_list, src=0, group=None, device=torch.device(next_rank)\n )\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test: backward compatibility with device unspecified\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0)\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Multiple input objects test\n if self.rank != src_rank:\n self.assertNotEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n dist.broadcast_object_list(objects, src=0)\n self.assertEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n\n def _test_ddp_ignore_params_arg(self, static_graph=False):\n class TestModel(nn.Module):\n def __init__(self, rank):\n self.rank = rank\n super(TestModel, self).__init__()\n self.fc1 = nn.Linear(1, 1, bias=False)\n # Proxy that will be materialized to another architecture later.\n # (after wrapping model with DDP)\n if self.rank == 0:\n self.fc2 = nn.Linear(1, 10, bias=False)\n else:\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n device_id = self.rank\n # Ensure the test works for both find_unused_parameter and broadcast_buffer settings.\n for (find_unused, broadcast_buffers) in itertools.product(\n [False, True], [False, True]\n ):\n model = TestModel(self.rank).float().to(device_id)\n # Note that the model can have different shape buffers if we pass\n # them in to be ignored as well.\n model.fc2.register_buffer(\n \"ignore_buffer\", torch.zeros(5 + self.rank, device=self.rank)\n )\n proxy_params = list(model.fc2.parameters())\n proxy_buffers = list(model.fc2.buffers())\n model_fc2_name = [\n module_name\n for module_name, module in model.named_modules()\n if module is model.fc2\n ][0]\n proxy_param_names = [\n f\"{model_fc2_name}.{param_name}\"\n for param_name, _ in model.fc2.named_parameters()\n ]\n proxy_buffer_names = [\n f\"{model_fc2_name}.{buf_name}\"\n for buf_name, _ in model.fc2.named_buffers()\n ]\n # Specify that we should ignore proxy_params since it will be\n # materialized later.\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, proxy_param_names + proxy_buffer_names\n )\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[device_id],\n find_unused_parameters=find_unused,\n broadcast_buffers=broadcast_buffers,\n )\n if static_graph:\n ddp._set_static_graph()\n # Materialize new params. These are not registered in DDP and thus\n # don't have autograd hooks installed on them.\n ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)\n # local model with the new materialized parameters.\n local_model = copy.deepcopy(ddp.module).cuda(self.rank)\n\n inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)\n for i in range(6):\n ddp(inp).sum().backward()\n local_model(inp).sum().backward()\n # materialized param grad is not touched by DDP, so its grad should\n # be the same as if running locally.\n for materialized_param, local_param in zip(\n ddp.module.fc2.parameters(), local_model.fc2.parameters()\n ):\n self.assertEqual(materialized_param.grad, local_param.grad)\n\n # fc1 parameter grad should still be different, due to allreduce.\n for synced_param, local_param in zip(\n ddp.module.fc1.parameters(), local_model.fc1.parameters()\n ):\n self.assertFalse(synced_param.grad == local_param.grad)\n\n # Proxy module grad should not be touched\n for proxy_param in proxy_params:\n self.assertTrue(proxy_param.grad is None)\n\n # Synchronize since we run multiple iterations of this test, to\n # isolate failure hangs.\n torch.cuda.synchronize(device=self.rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_ignore_params_arg(self):\n self._test_ddp_ignore_params_arg(static_graph=False)\n self._test_ddp_ignore_params_arg(static_graph=True)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_unused_params_rebuild_buckets_exception(self):\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10, bias=False)\n self.net2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n return self.net1(x)\n\n ddp = torch.nn.parallel.DistributedDataParallel(\n ToyModel().cuda(self.rank), device_ids=[self.rank]\n )\n for i in range(2):\n inp = torch.rand(1, 10)\n if i > 0:\n # On 2nd iteration, this will fail during rebuild_buckets,\n # but we should report an error regarding unused parameters\n # since that is the underlying root cause.\n try:\n ddp(inp).sum().backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(ddp, msg)\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"net2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(\n True, \"DDP unused parameters error not raised.\"\n )\n else:\n ddp(inp).sum().backward()\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_shared_grad_acc_unused_params(self):\n # When find_unused_parameters=True, ensure we mark unused parameters\n # even if they share gradient accumulators.\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n # net1, bias, and net1.bias are all unused params.\n self.net1 = nn.Linear(10, 5, bias=False)\n self.bias = nn.Parameter(torch.zeros(5))\n # net1.bias and self.bias are names for the same underlying\n # parameter, so they share the same grad acc. This caused\n # the bug reported in https://github.com/pytorch/pytorch/issues/41324.\n self.net1.bias = self.bias\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(x)\n\n torch.cuda.set_device(self.rank)\n model = ToyModel().to(torch.cuda.current_device())\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], find_unused_parameters=True\n )\n inp = torch.randn(20, 10, device=self.rank)\n for i in range(6):\n out = ddp_model(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_device(self):\n m = nn.Linear(10, 10).to(self.rank)\n expected_len = 2\n\n class TensorWrapper:\n __slots__ = [\"t\", \"moved_to_gpu\"]\n\n def __init__(self, t):\n self.t = t\n self.moved_to_gpu = False\n\n # Handlers for specific types of validation we want to do based on\n # the input type.\n\n def tuple_and_list_validator(x):\n self.assertTrue(len(x), expected_len)\n self.assertEqual(1, len(set(t.device for t in x)))\n self.assertEqual(x[0].device.index, self.rank)\n return x[0] + x[1]\n\n def namedtuple_validator(x):\n self.assertEqual(x._fields, EXPECTED_FIELDS)\n self.assertEqual(x.a.device.index, x.b.device.index)\n self.assertEqual(x.a.device.index, self.rank)\n return x.a + x.b\n\n def custom_type_validator(x):\n self.assertTrue(x.moved_to_gpu or (str(x.t.device) == \"cpu\"))\n x.t = x.t.to(self.rank)\n x.moved_to_gpu = True\n return x.t\n\n def dict_validator(x):\n self.assertTrue(EXPECTED_FIELDS[0] in x.keys())\n self.assertTrue(EXPECTED_FIELDS[1] in x.keys())\n self.assertEqual(1, len(set(t.device for t in x.values())))\n self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)\n return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]\n\n validators = {\n TensorWrapper: custom_type_validator,\n tuple: tuple_and_list_validator,\n list: tuple_and_list_validator,\n TestNamedTupleInput_0: namedtuple_validator,\n TestNamedTupleInput_1: namedtuple_validator,\n dict: dict_validator,\n }\n\n class ToyModel(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 10, bias=False)\n\n def forward(_self, x, expected_type): # noqa: B902\n # Similar to scatter, the recursive to in the single-device\n # case does not move tensors if they are in a custom type.\n self.assertTrue(isinstance(x, expected_type))\n fwd_tensor = validators[expected_type](x)\n return _self.lin(fwd_tensor)\n\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel().to(self.rank), device_ids=[self.rank]\n )\n\n def train_iter(inp, input_type):\n for _ in range(4):\n out = model(inp, input_type)\n out.sum().backward()\n\n # CPU tuple input, should be moved to the proper device before call\n # to forward.\n inp = tuple(torch.randn(10, 10) for _ in range(expected_len))\n train_iter(inp, tuple)\n\n # List CPU input, should be moved to proper device before call to\n # forward.\n inp = [torch.randn(10, 10) for _ in range(expected_len)]\n train_iter(inp, list)\n # Custom type containing tensor. The type is maintained, but the\n # device is not propagated (which is what happens with scatter too)\n inp = TensorWrapper(torch.randn(10, 10))\n train_iter(inp, TensorWrapper)\n # NamedTuple input. The type should be maintained and tensor inputs\n # should be moved to the correct device as in scatter.\n batch = 5\n dim = 10\n a = torch.rand(batch, dim)\n b = torch.rand(batch, dim)\n\n inp = TestNamedTupleInput_0(a, b)\n train_iter(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n train_iter(inp, type(inp))\n\n # dictionary input.\n inp = {\n EXPECTED_FIELDS[0]: a,\n EXPECTED_FIELDS[1]: b,\n }\n train_iter(inp, type(inp))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_namedtuple(self):\n batch = 5\n dim = 10\n\n a = torch.rand(batch, dim, device=self.rank)\n b = torch.rand(batch, dim, device=self.rank)\n\n class NamedTupleModule(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 1)\n\n def forward(_self, input, expected_type): # noqa: B902\n # Without NamedTuple support, this would be of type tuple.\n self.assertTrue(\n isinstance(input, expected_type),\n f\"Expected type {expected_type} but got {type(input)}\",\n )\n self.assertEqual(input._fields, EXPECTED_FIELDS)\n self.assertEqual(a, input.a)\n self.assertEqual(b, input.b)\n return _self.lin(torch.mul(input.a, input.b))\n\n model = torch.nn.parallel.DistributedDataParallel(\n NamedTupleModule().cuda(self.rank), device_ids=[self.rank]\n )\n inp = TestNamedTupleInput_0(a, b)\n # The following would fail if DDP does not propagate NamedTuples correctly.\n model(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n model(inp, type(inp))\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_same_across_ranks(self):\n # Control flow that is the same across ranks.\n batch = 20\n dim = 10\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used.\n local_used_maps = model.reducer._get_local_used_maps()\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, world_size], device=self.rank, dtype=torch.int32\n )\n\n # Validate parameter usage.\n variable_usage_tensor = local_used_maps[0]\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n # 2nd linear layer is unused\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_invalid_static_graph(self):\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n )\n model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n ones_input = torch.ones(20, 10, device=self.rank)\n # unused parameter in the first iteration got used\n # in second iteration.\n expected_err = \"Your training graph has changed in this iteration\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n for i in range(2):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, expected_err)\n\n # used parameter in the first iteration got unused\n # in second iteration.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected to have finished reduction in the prior iteration \"\n \"before starting a new one. This error indicates that your \"\n \"training graph has changed in this iteration\",\n ):\n for i in range(2):\n if i % 2 != 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, \"Expected to have finished reduction\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_different_across_ranks(self):\n # Control flow that is different across ranks.\n batch = 20\n dim = 10\n\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n # Control-flow that is rank and input dependent for the\n # model.\n use_second_layer = (\n torch.equal(x, torch.ones(batch, dim, device=x.device))\n and self.rank == 1\n )\n\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used only on rank 1.\n local_used_maps = model.reducer._get_local_used_maps()\n\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, 1], device=self.rank, dtype=torch.int32\n )\n\n variable_usage_tensor = local_used_maps[0]\n # Validate parameter usage. On odd iterations, 2nd param is only\n # used on rank 1.\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\"})\n @sandcastle_skip_if(BACKEND == \"nccl\", \"NCCL does not support scatter\")\n def test_scatter_object_list(self):\n src_rank = 0\n scatter_list = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n world_size = dist.get_world_size()\n scatter_list = scatter_list[:world_size]\n i = 0\n while len(scatter_list) < world_size:\n scatter_list.append(scatter_list[i])\n i += 1\n\n output_obj_list = [None]\n dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank)\n self.assertEqual(\n output_obj_list[0],\n COLLECTIVES_OBJECT_TEST_LIST[\n self.rank % len(COLLECTIVES_OBJECT_TEST_LIST)\n ],\n )\n # Ensure errors are raised upon incorrect arguments.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected argument scatter_object_output_list to be a list of size at least 1.\",\n ):\n dist.scatter_object_list([], scatter_list, src=src_rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_model_diff_across_ranks(self):\n group_gloo = dist.new_group(\n timeout=timedelta(seconds=60), backend=dist.Backend.GLOO\n )\n # Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test\n # determinism.\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n group_to_use = dist.new_group(\n backend=dist.get_backend(), timeout=timedelta(seconds=5)\n )\n torch.cuda.set_device(self.rank)\n # Creates network with different sized embedding table on different\n # ranks. This should throw an error during DDP init.\n net = EmbeddingNet(self.rank)\n # When running with NCCL backend, we don't expect an error on rank 0,\n # rather, it will be taken down by NCCL_ASYNC_ERROR_HANDLING. When\n # running with Gloo or with debug mode wrapper, we expect the error\n # to be caught inline.\n is_detail_dbg_mode = (\n dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL\n )\n rank_0_ctx = (\n self.assertRaisesRegex(\n RuntimeError, \"Caught collective operation timeout\"\n )\n if dist.get_backend(group_to_use) == dist.Backend.NCCL\n and not is_detail_dbg_mode\n # Gloo can raise various exception messages, so just assert\n # Runtime error here.\n else self.assertRaises(RuntimeError)\n )\n ctx = (\n rank_0_ctx\n if self.rank == 0\n else self.assertRaisesRegex(RuntimeError, \"appears not to match\")\n )\n with ctx:\n net = torch.nn.parallel.DistributedDataParallel(\n net.to(self.rank),\n device_ids=[self.rank],\n process_group=group_to_use,\n )\n # Should only be run by rank 0, and blocking_wait catches and\n # reports exception.\n dist.barrier(group_to_use)\n\n # Perform gloo-based barrier to ensure one rank doesn't exit test\n # early which causes failure with Barrier.sync.\n dist.barrier(group_gloo)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_output_unused_in_loss(self):\n model = TwoLinLayerNet()\n # Need copy of model to pass into 2nd DDP ctor otherwise autograd hooks\n # on first DDP reducer will execute!\n model_copy = copy.deepcopy(model)\n net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(self.rank),\n device_ids=[self.rank],\n )\n net_with_find_unused = torch.nn.parallel.DistributedDataParallel(\n model_copy.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n\n inp = torch.randn(10, 10)\n\n for ddp in [net, net_with_find_unused]:\n for i in range(2):\n if i == 0:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n else:\n try:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n unused_index = 0\n unused_index_substr = (\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_index}\"\n )\n if ddp == net:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_find_unused_params_enabled_str,\n ]\n elif ddp == net_with_find_unused:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_outputs_not_used_in_loss_str,\n ddp_find_unused_params_enabled_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_recommend_find_unused_params_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if (\n dist._get_debug_mode()\n == dist._DistributedDebugLevel.OFF\n ):\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"a.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(\n s in msg, f\"Expected {s} to be in {msg}\"\n )\n for s in unexpected_strs:\n self.assertFalse(\n s in msg, f\"Expected {s} not to be in {msg}\"\n )\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n def _test_different_graph_across_ranks(\n self, find_unused_parameters=False, static_graph=False\n ):\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n if self.rank == 0:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n torch.manual_seed(31415)\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = ToyModel(self.rank).cuda(self.rank)\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=find_unused_parameters,\n gradient_as_bucket_view=True,\n )\n if static_graph:\n ddp_model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n for i in range(10):\n out = ddp_model(random_input)\n loss = out.sum()\n loss.backward()\n return ddp_model\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_different_graph_across_ranks(self):\n base_model = self._test_different_graph_across_ranks(\n find_unused_parameters=True\n )\n self.assertFalse(\n base_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n static_model = self._test_different_graph_across_ranks(static_graph=True)\n self.assertTrue(\n static_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n for i, j in zip(base_model.parameters(), static_model.parameters()):\n self.assertEqual(i, j)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_gloo(self):\n tensors = [torch.ones(10) * self.rank]\n # Kick off some allreduce work on all ranks\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n # Run monitored barrier and ensure it passees\n timeout = timedelta(seconds=2)\n dist.monitored_barrier(timeout=timeout)\n # Check monitored_barrier success with wait_all_ranks=True\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n # All ranks besides 1 call into barrier, rank 0 should report failure\n # while others report gloo error.\n failed_rank = 1\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank != failed_rank:\n # Other ranks should not pass barrier since rank 0 failed.\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n # We need a barrier since otherwise failed_rank exits too early\n # and cause a timeout.\n self._barrier(timeout=30)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_subgroup(self):\n # Tests that monitored_barrier works as expected on non-default\n # process groups.\n failed_rank = 1\n timeout = 0.1\n subgroup = dist.new_group(ranks=[0, 1])\n\n if self.rank == failed_rank:\n return\n\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(subgroup, timeout)\n else:\n # Other ranks call into monitored_barrier, but this should be a\n # noop because they are not part of the subgroup. Verify that\n # there are no errors here.\n dist.monitored_barrier(subgroup, timeout)\n\n def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks):\n # tests expected behavior when nonzero rank hangs.\n nccl_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n timeout=timedelta(seconds=2),\n backend=dist.Backend.NCCL,\n )\n gloo_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n backend=dist.Backend.GLOO,\n )\n tensors = [torch.ones(10, device=self.rank) * self.rank]\n # Let all ranks call allreduce first to set up communicators etc.\n # Directly simulating error here will run into store issue described\n # in https://github.com/pytorch/pytorch/issues/54524.\n nccl_pg.allreduce(tensors).wait()\n # All ranks besides 0 call into allreduce. This is to simulate a\n # desync across the world, where some ranks call into\n # monitored_barrier() and others are stuck in collective comm. In\n # practice, we don't need NCCL_BLOCKING_WAIT, but we use it in this\n # test to ensure it exits cleanly.\n if self.rank != 0:\n # Can get different errors here depending on whether gloo-based\n # wrapper PG is enabled or not, since with wrapper pg, it will\n # fail in a collective synchronization check and not actually\n # call into the nccl pg.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n err_regex = \"Timed out waiting\"\n else:\n err_regex = \"Caught collective operation timeout\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1))\n else:\n # Rank 0 should report first (in order) timed out rank or all ranks\n # depending on wait_all_ranks flag passed into monitored_barrier.\n if wait_all_ranks:\n rank_str = \", \".join(\n [str(i) for i in range(1, int(self.world_size))]\n )\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n else:\n expected_first_fail_rank = 1\n err_regex = f\"Rank {expected_first_fail_rank} failed to pass monitoredBarrier\"\n monitored_barrier_timeout_seconds = timedelta(seconds=0.1)\n with self.assertRaisesRegex(RuntimeError, err_regex):\n gloo_pg.monitored_barrier(\n monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks\n )\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report first timed out rank.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False)\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang_wait_all_ranks(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report all timed out ranks.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_rank_0_timeout(self):\n # tests error when rank 0 exhausts its given timeout.\n process_group = dist.new_group(\n ranks=list(i for i in range(int(self.world_size)))\n )\n timeout = timedelta(seconds=0)\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} timed out in monitoredBarrier\"\n ):\n process_group.monitored_barrier(timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n @sandcastle_skip_if(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_failure_order(self):\n # Ensure that the first (in sorted order) rank is reported when\n # multiple ranks fail to pass the monitored_barrier.\n # TODO(#54879): Provide ability to wait and report all failed ranks\n expected_first_failed_rank = 2\n timeout = timedelta(seconds=2)\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {expected_first_failed_rank}\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank == 1:\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n def test_monitored_barrier_wait_all_ranks(self):\n # Tests simple case where > 1 rank does not call into monitored\n # barrier and verifies all ranks are reported by rank 0.\n if self.rank == 0:\n timeout = timedelta(seconds=0.1)\n rank_str = \", \".join([str(i) for i in range(1, int(self.world_size))])\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping(self):\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"a.weight\", 1: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test when DDP is used with ignored parameters.\n model = TwoLinLayerNet()\n # Parameters to ignore are in the format {module_name}.{param_name}\n params_to_ignore = [\"a.weight\"]\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, params_to_ignore\n )\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test errors are raised when DDP and module parameters mismatch.\n # This generally indicates a bug with DDP and is not expected to\n # happen in user applications.\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n net_params, _ = net._build_params_for_reducer()\n if self.rank == 0:\n print(type(net_params[0][0]))\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n with self.assertRaisesRegex(ValueError, \"Expected param to name mapping\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0] = net_params[0][:-3]\n with self.assertRaisesRegex(ValueError, \"Param with name\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping_requires_grad(self):\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(10, 10)\n # Is not tracked by DDP and should not show up in param to\n # name mapping.\n self.lin.bias.requires_grad_(False)\n\n def forward(self, x):\n return self.lin(x)\n\n model = Net()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n expected_mapping = {\n 0: \"lin.weight\",\n }\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertEqual(param_to_name_mapping, expected_mapping)\n\n def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse):\n debug_mode_off = dist._get_debug_mode() == dist._DistributedDebugLevel.OFF\n\n class SubModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.embedding_net = EmbeddingNet(0)\n self.lin = TwoLinLayerNet()\n self.bn = BatchNormNet()\n self.lin_layer = nn.Linear(4, 10, bias=False)\n\n def forward(self, x):\n x = self.bn(x)\n x = self.lin_layer(x)\n x = self.lin.a(x) # self.lin.b param unused\n # EmbeddingNet entirely unused: self.embedding_net.embedding and\n # self.embedding_net.lin unused.\n return x\n\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.sub_module = SubModule()\n\n def forward(self, x):\n return self.sub_module(x)\n\n model = MyModel()\n sparse_embedding_fqns = []\n if ignore_sparse:\n for module_name, module in model.named_modules():\n if module == model.sub_module.embedding_net.embedding:\n for parameter_name, param in module.named_parameters(\n recurse=False\n ):\n fqn = f\"{module_name}.{parameter_name}\"\n sparse_embedding_fqns.append(fqn)\n\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, sparse_embedding_fqns\n )\n unused_modules = [\n model.sub_module.embedding_net.lin,\n model.sub_module.lin.b,\n ]\n else:\n unused_modules = list(model.sub_module.embedding_net.modules()) + [\n model.sub_module.lin.b,\n ]\n\n expected_unused_param_fqns = []\n used_param_fqns = [] # Validate that these don't mistakenly show up.\n fqn_to_param_index = {}\n index = 0\n for module_name, module in model.named_modules():\n for parameter_name, param in module.named_parameters(recurse=False):\n fqn = f\"{module_name}.{parameter_name}\"\n fqn_to_param_index[fqn] = index\n if fqn not in sparse_embedding_fqns:\n index += 1\n if module in unused_modules:\n expected_unused_param_fqns.append(fqn)\n else:\n if (\n not ignore_sparse\n or module != model.sub_module.embedding_net.embedding\n ):\n used_param_fqns.append(fqn)\n\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n batch, dim = 10, 2\n inp = torch.ones(batch, dim)\n for i in range(2):\n if i == 0:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n else:\n try:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n except RuntimeError as e:\n e = str(e)\n\n unused_param_substr = e[e.find(\"did not receive grad\") :]\n # Validate that each unused param fully qualified name\n # shows up in error logs. We do this instead of\n # constructing a joined string since order of parameters\n # can be different in Reducer. In addition, validate\n # param indices show up as well.\n for unused_param_fqn in expected_unused_param_fqns:\n self.assertTrue(\n unused_param_fqn in unused_param_substr\n or debug_mode_off\n )\n self.assertTrue(\n str(fqn_to_param_index[unused_param_fqn])\n in unused_param_substr,\n f\"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}\",\n )\n\n # Validate that used param fqns don't show up in error\n # logs.\n for used_param_fqn in used_param_fqns:\n self.assertFalse(used_param_fqn in unused_param_substr)\n # Validate that ignored param fqns don't show up as unused\n # (since DDP does not track them)\n for sparse_param_fqn in sparse_embedding_fqns:\n self.assertFalse(sparse_param_fqn in unused_param_substr)\n else:\n self.assertTrue(False, \"Expected error was not raised!\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_error(self):\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_err_ignore_params(self):\n # Tests unused parameter reporting when DDP is configured to ignore\n # certain parameters.\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_inference(self):\n # tests that DDP module can be run on a single node with no_grad\n # or eval setting and there is no hang.\n rank = self.rank\n torch.cuda.set_device(rank)\n model = Net().cuda()\n local_model = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n syncbn_model = nn.SyncBatchNorm(\n 2, momentum=0.99, track_running_stats=False\n ).cuda()\n local_syncbn_model = copy.deepcopy(syncbn_model)\n syncbn_model = torch.nn.parallel.DistributedDataParallel(\n syncbn_model, device_ids=[rank]\n )\n inp = torch.randn(10, 2, device=rank)\n inp_syncbn = torch.randn(10, 2, 4, 4, device=rank)\n tests = [\n (model, local_model, inp),\n (syncbn_model, local_syncbn_model, inp_syncbn),\n ]\n for test in tests:\n test_model, test_local_model, test_inp = test\n if self.rank == 0:\n test_model.eval()\n test_local_model.eval()\n for _ in range(6):\n self.assertEqual(\n test_model(test_inp), test_local_model(test_inp)\n )\n\n # Barrier since only rank 0 runs inference. Test should be\n # much faster than 30s, but this is to avoid flakiness.\n self._barrier(timeout=30)\n\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_sync_bn_training_vs_eval(self):\n rank = self.rank\n torch.cuda.set_device(rank)\n # Need to set track_running_stats=False, when track_running_stats=True,\n # bn_training is False and sync could not occur in eval model.\n model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda(\n rank\n )\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])\n # Test sync occurs in training mode.\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model(inp)\n loss = out.sum()\n loss.backward()\n\n # SyncBN allgathers stats across all ranks, so verify call to\n # all_gather in profiler.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertNotEqual([], all_gather_calls)\n\n # Only do inference on one rank. If SyncBN did collective stats sync,\n # this would hang/error.\n model_inference = model.module\n if self.rank == 0:\n model_inference.eval()\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model_inference(inp)\n loss = out.sum()\n loss.backward()\n\n # Ensure sync does not occur in eval() mode.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertEqual([], all_gather_calls)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_python_error_logged(self):\n # Most python exceptions in DDP are raised during init before\n # reducer is constructed, so we don't have a logger in those cases.\n # However, the below is one example where a python error is thrown\n # after reducer is constructed.\n model = TwoLinLayerNet().cuda(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n expected_err = \"must be callable\"\n with self.assertRaisesRegex(TypeError, expected_err):\n model.register_comm_hook({}, {})\n\n verify_ddp_error_logged(model, expected_err)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_static_graph_nested_types(self):\n # Tests for static graph training when outputs are not just tensors\n # but can be (nested) tuple, list, dict, etc.\n rank = self.rank\n torch.cuda.set_device(rank)\n\n class NestedOutputModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(100, 1, bias=False)\n\n def forward(self, inp, output_type):\n if output_type == \"tuple\":\n return (\n self.lin(inp),\n (\n self.lin(inp),\n self.lin(inp),\n ),\n )\n elif output_type == \"list\":\n return [\n self.lin(inp),\n [\n self.lin(inp),\n self.lin(inp),\n ],\n ]\n elif output_type == \"dict\":\n return {\n \"a\": self.lin(inp),\n \"b\": {\n \"c\": self.lin(inp),\n },\n }\n\n def get_loss(model_output):\n loss = 0.0\n if isinstance(model_output, torch.Tensor):\n return model_output.sum()\n elif isinstance(model_output, dict):\n for value in model_output.values():\n loss += get_loss(value)\n elif isinstance(model_output, tuple) or isinstance(model_output, list):\n for x in model_output:\n loss += get_loss(x)\n else:\n raise ValueError(f\"Unknown model output type {type(model_output)}\")\n return loss\n\n model = NestedOutputModule().cuda(rank)\n model_static_graph = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph._set_static_graph()\n inp = torch.randn(10, 100)\n type_mapping = {\n \"list\": list,\n \"tuple\": tuple,\n \"dict\": dict,\n }\n for output_type in type_mapping.keys():\n for i in range(6):\n out = model(inp, output_type=output_type)\n loss = get_loss(out)\n loss.backward()\n self._model_step(model)\n out_static = model_static_graph(inp, output_type=output_type)\n self.assertTrue(isinstance(out_static, type_mapping[output_type]))\n loss_static = get_loss(out_static)\n loss_static.backward()\n self._model_step(model_static_graph)\n for (p, p_static) in zip(\n model.parameters(), model_static_graph.parameters()\n ):\n self.assertEqual(p, p_static)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_detect_ddp_is_actually_static(self):\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10, bias=False)\n self.net2 = nn.Linear(10, 10)\n\n def forward(self, x, find_unused, dynamic):\n if find_unused:\n if dynamic:\n return self.net2(self.net1(x))\n else:\n return self.net2(x)\n else:\n return self.net2(self.net1(x))\n\n # Set of unused parameters don't change across iterations\n torch.cuda.set_device(self.rank)\n model = ToyModel().cuda()\n for find_unused in [True, False]:\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=find_unused,\n )\n inp = torch.randn(1, 10, device=\"cuda\")\n for _ in range(6):\n out = ddp(inp, find_unused=find_unused, dynamic=False)\n loss = out.sum()\n loss.backward()\n self.assertTrue(ddp.reducer._ddp_graph_static())\n\n # Set of unused parameters dynamically change\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n inp = torch.randn(1, 10, device=\"cuda\")\n for i in range(6):\n out = ddp(inp, find_unused=True, dynamic=i % 2 == 0)\n loss = out.sum()\n loss.backward()\n self.assertFalse(ddp.reducer._ddp_graph_static())\n\n def _test_ddp_new_tensor_in_fwd(self, static_graph):\n # Test from https://github.com/pytorch/pytorch/issues/60733\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(10, 10, bias=False)\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def __init_opt(self):\n param = next(self.parameters())\n opt = torch.randn(1, 10, device=param.device)\n return opt\n\n def forward(self, x, opt_1, opt_2, opt_nested):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n if opt_1 is None:\n opt_1 = self.__init_opt()\n if opt_2 is None:\n opt_2 = self.__init_opt()\n if opt_nested is None or not torch.is_tensor(opt_nested):\n opt_nested = self.__init_opt()\n # Test multiple tensors as well as newly created tensors\n # within a struct.\n return x, opt_1, opt_2, {\"tensor\": opt_nested}\n\n model = MyModel().to(self.rank)\n for find_unused in [True, False]:\n ddp = DistributedDataParallel(\n model,\n device_ids=[self.rank],\n output_device=self.rank,\n broadcast_buffers=False,\n find_unused_parameters=find_unused,\n )\n\n if static_graph:\n ddp._set_static_graph()\n\n opt = [None for _ in range(3)]\n for i in range(2):\n ddp.zero_grad()\n x = torch.randn(1, 10, device=self.rank)\n out, opt[0], opt[1], opt[2] = ddp(\n x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2]\n )\n for i in range(len(opt)):\n if torch.is_tensor(opt[i]):\n self.assertEqual(opt[i].grad_fn, None)\n else:\n self.assertEqual(opt[i][\"tensor\"].grad_fn, None)\n out.mean().backward()\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_new_tensor_in_fwd(self):\n return self._test_ddp_new_tensor_in_fwd(static_graph=False)\n\n @skip_if_lt_x_gpu(2)\n @sandcastle_skip_if(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_new_tensor_in_fwd_static_graph(self):\n return self._test_ddp_new_tensor_in_fwd(static_graph=True)\n" ]
[ [ "torch.distributed.distributed_c10d._get_default_group", "torch.distributed.distributed_c10d.get_world_size", "torch.cuda.manual_seed", "torch.nn.functional.softmax", "torch.rand", "torch.distributed.is_gloo_available", "torch.nn.Conv2d", "torch.cat", "torch.cuda.amp.GradScaler", "torch.nn.BatchNorm1d", "torch.randn", "torch.distributed._get_debug_mode", "torch.save", "torch.distributed.gather_object", "torch.distributed.broadcast_object_list", "torch.device", "torch.testing._internal.common_utils.sandcastle_skip", "torch.distributed.get_world_size", "torch.nn.parallel.distributed._dump_DDP_relevant_env_vars", "torch.profiler.profile", "torch.testing._internal.common_distributed.captured_output", "torch.manual_seed", "torch.reshape", "torch.distributed.all_reduce", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks._hook_then_optimizer", "torch.empty_like", "torch.zeros_like", "torch.testing._internal.common_distributed.initialize_temp_directories", "torch.zeros", "torch.distributed.ProcessGroupNCCL.Options", "torch.empty", "torch.distributed.get_backend", "torch.distributed.irecv", "torch.view_as_real", "torch.no_grad", "torch.cuda.synchronize", "torch.distributed.new_group", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks._OptimizerHookState", "torch.cuda.is_available", "torch.autograd.profiler.profile", "torch.distributed.is_mpi_available", "torch.distributed._rank_not_in_group", "torch.distributed.distributed_c10d.AllreduceOptions", "torch.distributed.broadcast", "torch.backends.cudnn.flags", "torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState", "torch.distributed.scatter", "torch.cuda.set_device", "torch.distributed.get_rank", "torch.cuda.current_device", "torch.distributed.barrier", "torch.distributed.BroadcastOptions", "torch.distributed.is_nccl_available", "torch.distributed.recv", "torch.utils.data.distributed.DistributedSampler", "torch.mul", "torch.cuda.amp.autocast", "torch.cuda.Stream", "torch.testing._internal.common_distributed.requires_nccl_version", "torch.distributed.reduce", "torch.distributed.gather", "torch.nn.Module", "torch.distributed.PrefixStore", "torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook.PostLocalSGDState", "torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model", "torch.testing._internal.common_distributed.cleanup_temp_dir", "torch.cuda.device_count", "torch.distributed.all_to_all_single", "torch.distributed.scatter_object_list", "torch.ones_like", "torch.distributed.all_gather", "torch.load", "torch.DoubleTensor", "torch.distributed.broadcast_multigpu", "torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager", "torch.distributed.new_subgroups_by_enumeration", "torch.nn.Linear", "torch.nn.Embedding", "torch.testing._internal.common_utils.sandcastle_skip_if", "torch.equal", "torch.testing._internal.common_distributed.verify_ddp_error_logged", "torch.distributed.destroy_process_group", "torch.distributed.P2POp", "torch.distributed.batch_isend_irecv", "torch.nn.SyncBatchNorm", "torch.testing._internal.common_distributed.with_dist_debug_levels", "torch.distributed.all_to_all", "torch.ones", "torch.distributed.ProcessGroupNCCL", "torch.distributed.Backend", "torch.tensor", "torch.distributed.new_subgroups", "torch.distributed.send", "torch.nn.parallel.DistributedDataParallel", "torch.distributed.monitored_barrier", "torch.testing._internal.common_distributed.nccl_skip_if_lt_x_gpu", "torch.nn.MSELoss", "torch.is_tensor", "torch.randperm", "torch.cuda.stream", "torch.LongTensor", "torch.nn.ReLU", "torch.testing._internal.common_distributed.skip_if_lt_x_gpu" ] ]
lacrosse91/scikit-learn
[ "2a67d88258264eb2b6dfad221be8f8d61684dcba" ]
[ "sklearn/datasets/_twenty_newsgroups.py" ]
[ "\"\"\"Caching loader for the 20 newsgroups text classification dataset.\n\n\nThe description of the dataset is available on the official website at:\n\n http://people.csail.mit.edu/jrennie/20Newsgroups/\n\nQuoting the introduction:\n\n The 20 Newsgroups data set is a collection of approximately 20,000\n newsgroup documents, partitioned (nearly) evenly across 20 different\n newsgroups. To the best of my knowledge, it was originally collected\n by Ken Lang, probably for his Newsweeder: Learning to filter netnews\n paper, though he does not explicitly mention this collection. The 20\n newsgroups collection has become a popular data set for experiments\n in text applications of machine learning techniques, such as text\n classification and text clustering.\n\nThis dataset loader will download the recommended \"by date\" variant of the\ndataset and which features a point in time split between the train and\ntest sets. The compressed dataset size is around 14 Mb compressed. Once\nuncompressed the train set is 52 MB and the test set is 34 MB.\n\"\"\"\n# Copyright (c) 2011 Olivier Grisel <[email protected]>\n# License: BSD 3 clause\n\nimport os\nfrom os.path import dirname, join\nimport logging\nimport tarfile\nimport pickle\nimport shutil\nimport re\nimport codecs\n\nimport numpy as np\nimport scipy.sparse as sp\nimport joblib\n\nfrom . import get_data_home\nfrom . import load_files\nfrom ._base import _convert_data_dataframe\nfrom ._base import _pkl_filepath\nfrom ._base import _fetch_remote\nfrom ._base import RemoteFileMetadata\nfrom ..feature_extraction.text import CountVectorizer\nfrom .. import preprocessing\nfrom ..utils import check_random_state, Bunch\n\nlogger = logging.getLogger(__name__)\n\n# The original data can be found at:\n# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz\nARCHIVE = RemoteFileMetadata(\n filename=\"20news-bydate.tar.gz\",\n url=\"https://ndownloader.figshare.com/files/5975967\",\n checksum=(\"8f1b2514ca22a5ade8fbb9cfa5727df9\" \"5fa587f4c87b786e15c759fa66d95610\"),\n)\n\nCACHE_NAME = \"20news-bydate.pkz\"\nTRAIN_FOLDER = \"20news-bydate-train\"\nTEST_FOLDER = \"20news-bydate-test\"\n\n\ndef _download_20newsgroups(target_dir, cache_path):\n \"\"\"Download the 20 newsgroups data and stored it as a zipped pickle.\"\"\"\n train_path = os.path.join(target_dir, TRAIN_FOLDER)\n test_path = os.path.join(target_dir, TEST_FOLDER)\n\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n logger.info(\"Downloading dataset from %s (14 MB)\", ARCHIVE.url)\n archive_path = _fetch_remote(ARCHIVE, dirname=target_dir)\n\n logger.debug(\"Decompressing %s\", archive_path)\n tarfile.open(archive_path, \"r:gz\").extractall(path=target_dir)\n os.remove(archive_path)\n\n # Store a zipped pickle\n cache = dict(\n train=load_files(train_path, encoding=\"latin1\"),\n test=load_files(test_path, encoding=\"latin1\"),\n )\n compressed_content = codecs.encode(pickle.dumps(cache), \"zlib_codec\")\n with open(cache_path, \"wb\") as f:\n f.write(compressed_content)\n\n shutil.rmtree(target_dir)\n return cache\n\n\ndef strip_newsgroup_header(text):\n \"\"\"\n Given text in \"news\" format, strip the headers, by removing everything\n before the first blank line.\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n _before, _blankline, after = text.partition(\"\\n\\n\")\n return after\n\n\n_QUOTE_RE = re.compile(\n r\"(writes in|writes:|wrote:|says:|said:\" r\"|^In article|^Quoted from|^\\||^>)\"\n)\n\n\ndef strip_newsgroup_quoting(text):\n \"\"\"\n Given text in \"news\" format, strip lines beginning with the quote\n characters > or |, plus lines that often introduce a quoted section\n (for example, because they contain the string 'writes:'.)\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n good_lines = [line for line in text.split(\"\\n\") if not _QUOTE_RE.search(line)]\n return \"\\n\".join(good_lines)\n\n\ndef strip_newsgroup_footer(text):\n \"\"\"\n Given text in \"news\" format, attempt to remove a signature block.\n\n As a rough heuristic, we assume that signatures are set apart by either\n a blank line or a line made of hyphens, and that it is the last such line\n in the file (disregarding blank lines at the end).\n\n Parameters\n ----------\n text : str\n The text from which to remove the signature block.\n \"\"\"\n lines = text.strip().split(\"\\n\")\n for line_num in range(len(lines) - 1, -1, -1):\n line = lines[line_num]\n if line.strip().strip(\"-\") == \"\":\n break\n\n if line_num > 0:\n return \"\\n\".join(lines[:line_num])\n else:\n return text\n\n\ndef fetch_20newsgroups(\n *,\n data_home=None,\n subset=\"train\",\n categories=None,\n shuffle=True,\n random_state=42,\n remove=(),\n download_if_missing=True,\n return_X_y=False,\n):\n \"\"\"Load the filenames and data from the 20 newsgroups dataset \\\n(classification).\n\n Download it if necessary.\n\n ================= ==========\n Classes 20\n Samples total 18846\n Dimensionality 1\n Features text\n ================= ==========\n\n Read more in the :ref:`User Guide <20newsgroups_dataset>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify a download and cache folder for the datasets. If None,\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n subset : {'train', 'test', 'all'}, default='train'\n Select the dataset to load: 'train' for the training set, 'test'\n for the test set, 'all' for both, with shuffled ordering.\n\n categories : array-like, dtype=str or unicode, default=None\n If None (default), load all the categories.\n If not None, list of category names to load (other categories\n ignored).\n\n shuffle : bool, default=True\n Whether or not to shuffle the data: might be important for models that\n make the assumption that the samples are independent and identically\n distributed (i.i.d.), such as stochastic gradient descent.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for dataset shuffling. Pass an int\n for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n remove : tuple, default=()\n May contain any subset of ('headers', 'footers', 'quotes'). Each of\n these are kinds of text that will be detected and removed from the\n newsgroup posts, preventing classifiers from overfitting on\n metadata.\n\n 'headers' removes newsgroup headers, 'footers' removes blocks at the\n ends of posts that look like signatures, and 'quotes' removes lines\n that appear to be quoting another post.\n\n 'headers' follows an exact standard; the other filters are not always\n correct.\n\n download_if_missing : bool, default=True\n If False, raise an IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns `(data.data, data.target)` instead of a Bunch\n object.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n bunch : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : list of shape (n_samples,)\n The data list to learn.\n target: ndarray of shape (n_samples,)\n The target labels.\n filenames: list of shape (n_samples,)\n The path to the location of the data.\n DESCR: str\n The full description of the dataset.\n target_names: list of shape (n_classes,)\n The names of target classes.\n\n (data, target) : tuple if `return_X_y=True`\n .. versionadded:: 0.22\n \"\"\"\n\n data_home = get_data_home(data_home=data_home)\n cache_path = _pkl_filepath(data_home, CACHE_NAME)\n twenty_home = os.path.join(data_home, \"20news_home\")\n cache = None\n if os.path.exists(cache_path):\n try:\n with open(cache_path, \"rb\") as f:\n compressed_content = f.read()\n uncompressed_content = codecs.decode(compressed_content, \"zlib_codec\")\n cache = pickle.loads(uncompressed_content)\n except Exception as e:\n print(80 * \"_\")\n print(\"Cache loading failed\")\n print(80 * \"_\")\n print(e)\n\n if cache is None:\n if download_if_missing:\n logger.info(\"Downloading 20news dataset. \" \"This may take a few minutes.\")\n cache = _download_20newsgroups(\n target_dir=twenty_home, cache_path=cache_path\n )\n else:\n raise IOError(\"20Newsgroups dataset not found\")\n\n if subset in (\"train\", \"test\"):\n data = cache[subset]\n elif subset == \"all\":\n data_lst = list()\n target = list()\n filenames = list()\n for subset in (\"train\", \"test\"):\n data = cache[subset]\n data_lst.extend(data.data)\n target.extend(data.target)\n filenames.extend(data.filenames)\n\n data.data = data_lst\n data.target = np.array(target)\n data.filenames = np.array(filenames)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset\n )\n\n module_path = dirname(__file__)\n with open(join(module_path, \"descr\", \"twenty_newsgroups.rst\")) as rst_file:\n fdescr = rst_file.read()\n\n data.DESCR = fdescr\n\n if \"headers\" in remove:\n data.data = [strip_newsgroup_header(text) for text in data.data]\n if \"footers\" in remove:\n data.data = [strip_newsgroup_footer(text) for text in data.data]\n if \"quotes\" in remove:\n data.data = [strip_newsgroup_quoting(text) for text in data.data]\n\n if categories is not None:\n labels = [(data.target_names.index(cat), cat) for cat in categories]\n # Sort the categories to have the ordering of the labels\n labels.sort()\n labels, categories = zip(*labels)\n mask = np.in1d(data.target, labels)\n data.filenames = data.filenames[mask]\n data.target = data.target[mask]\n # searchsorted to have continuous labels\n data.target = np.searchsorted(labels, data.target)\n data.target_names = list(categories)\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[mask]\n data.data = data_lst.tolist()\n\n if shuffle:\n random_state = check_random_state(random_state)\n indices = np.arange(data.target.shape[0])\n random_state.shuffle(indices)\n data.filenames = data.filenames[indices]\n data.target = data.target[indices]\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[indices]\n data.data = data_lst.tolist()\n\n if return_X_y:\n return data.data, data.target\n\n return data\n\n\ndef fetch_20newsgroups_vectorized(\n *,\n subset=\"train\",\n remove=(),\n data_home=None,\n download_if_missing=True,\n return_X_y=False,\n normalize=True,\n as_frame=False,\n):\n \"\"\"Load and vectorize the 20 newsgroups dataset (classification).\n\n Download it if necessary.\n\n This is a convenience function; the transformation is done using the\n default settings for\n :class:`~sklearn.feature_extraction.text.CountVectorizer`. For more\n advanced usage (stopword filtering, n-gram extraction, etc.), combine\n fetch_20newsgroups with a custom\n :class:`~sklearn.feature_extraction.text.CountVectorizer`,\n :class:`~sklearn.feature_extraction.text.HashingVectorizer`,\n :class:`~sklearn.feature_extraction.text.TfidfTransformer` or\n :class:`~sklearn.feature_extraction.text.TfidfVectorizer`.\n\n The resulting counts are normalized using\n :func:`sklearn.preprocessing.normalize` unless normalize is set to False.\n\n ================= ==========\n Classes 20\n Samples total 18846\n Dimensionality 130107\n Features real\n ================= ==========\n\n Read more in the :ref:`User Guide <20newsgroups_dataset>`.\n\n Parameters\n ----------\n subset : {'train', 'test', 'all'}, default='train'\n Select the dataset to load: 'train' for the training set, 'test'\n for the test set, 'all' for both, with shuffled ordering.\n\n remove : tuple, default=()\n May contain any subset of ('headers', 'footers', 'quotes'). Each of\n these are kinds of text that will be detected and removed from the\n newsgroup posts, preventing classifiers from overfitting on\n metadata.\n\n 'headers' removes newsgroup headers, 'footers' removes blocks at the\n ends of posts that look like signatures, and 'quotes' removes lines\n that appear to be quoting another post.\n\n data_home : str, default=None\n Specify an download and cache folder for the datasets. If None,\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns ``(data.data, data.target)`` instead of a Bunch\n object.\n\n .. versionadded:: 0.20\n\n normalize : bool, default=True\n If True, normalizes each document's feature vector to unit norm using\n :func:`sklearn.preprocessing.normalize`.\n\n .. versionadded:: 0.22\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string, or categorical). The target is\n a pandas DataFrame or Series depending on the number of\n `target_columns`.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n bunch : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data: {sparse matrix, dataframe} of shape (n_samples, n_features)\n The input data matrix. If ``as_frame`` is `True`, ``data`` is\n a pandas DataFrame with sparse columns.\n target: {ndarray, series} of shape (n_samples,)\n The target labels. If ``as_frame`` is `True`, ``target`` is a\n pandas Series.\n target_names: list of shape (n_classes,)\n The names of target classes.\n DESCR: str\n The full description of the dataset.\n frame: dataframe of shape (n_samples, n_features + 1)\n Only present when `as_frame=True`. Pandas DataFrame with ``data``\n and ``target``.\n\n .. versionadded:: 0.24\n\n (data, target) : tuple if ``return_X_y`` is True\n `data` and `target` would be of the format defined in the `Bunch`\n description above.\n\n .. versionadded:: 0.20\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n filebase = \"20newsgroup_vectorized\"\n if remove:\n filebase += \"remove-\" + (\"-\".join(remove))\n target_file = _pkl_filepath(data_home, filebase + \".pkl\")\n\n # we shuffle but use a fixed seed for the memoization\n data_train = fetch_20newsgroups(\n data_home=data_home,\n subset=\"train\",\n categories=None,\n shuffle=True,\n random_state=12,\n remove=remove,\n download_if_missing=download_if_missing,\n )\n\n data_test = fetch_20newsgroups(\n data_home=data_home,\n subset=\"test\",\n categories=None,\n shuffle=True,\n random_state=12,\n remove=remove,\n download_if_missing=download_if_missing,\n )\n\n if os.path.exists(target_file):\n try:\n X_train, X_test, feature_names = joblib.load(target_file)\n except ValueError as e:\n raise ValueError(\n f\"The cached dataset located in {target_file} was fetched \"\n f\"with an older scikit-learn version and it is not compatible \"\n f\"with the scikit-learn version imported. You need to \"\n f\"manually delete the file: {target_file}.\"\n ) from e\n else:\n vectorizer = CountVectorizer(dtype=np.int16)\n X_train = vectorizer.fit_transform(data_train.data).tocsr()\n X_test = vectorizer.transform(data_test.data).tocsr()\n feature_names = vectorizer.get_feature_names()\n\n joblib.dump((X_train, X_test, feature_names), target_file, compress=9)\n\n # the data is stored as int16 for compactness\n # but normalize needs floats\n if normalize:\n X_train = X_train.astype(np.float64)\n X_test = X_test.astype(np.float64)\n preprocessing.normalize(X_train, copy=False)\n preprocessing.normalize(X_test, copy=False)\n\n target_names = data_train.target_names\n\n if subset == \"train\":\n data = X_train\n target = data_train.target\n elif subset == \"test\":\n data = X_test\n target = data_test.target\n elif subset == \"all\":\n data = sp.vstack((X_train, X_test)).tocsr()\n target = np.concatenate((data_train.target, data_test.target))\n else:\n raise ValueError(\n \"%r is not a valid subset: should be one of \"\n \"['train', 'test', 'all']\" % subset\n )\n\n module_path = dirname(__file__)\n with open(join(module_path, \"descr\", \"twenty_newsgroups.rst\")) as rst_file:\n fdescr = rst_file.read()\n\n frame = None\n target_name = [\"category_class\"]\n\n if as_frame:\n frame, data, target = _convert_data_dataframe(\n \"fetch_20newsgroups_vectorized\",\n data,\n target,\n feature_names,\n target_names=target_name,\n sparse_data=True,\n )\n\n if return_X_y:\n return data, target\n\n return Bunch(\n data=data,\n target=target,\n frame=frame,\n target_names=target_names,\n feature_names=feature_names,\n DESCR=fdescr,\n )\n" ]
[ [ "numpy.searchsorted", "numpy.in1d", "numpy.arange", "numpy.array", "numpy.concatenate", "scipy.sparse.vstack" ] ]
ZiningWang/Sparse_Pooling
[ "a160ddf9a03ef53bad630b4ac186a8437bd0475c" ]
[ "MV3D_TF_release/lib/datasets/voc_eval.py" ]
[ "# --------------------------------------------------------\n# Fast/er R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Bharath Hariharan\n# --------------------------------------------------------\n\nimport xml.etree.ElementTree as ET\nimport os\nimport pickle\nimport numpy as np\nimport pdb\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n\n Top level function that does the PASCAL VOC evaluation.\n\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, 'annots.pkl')\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n if not os.path.isfile(cachefile):\n # load annots\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print ('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print ('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'w') as f:\n cPickle.dump(recs, f)\n else:\n # load\n with open(cachefile, 'r') as f:\n recs = cPickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n if any(lines) == 1:\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n else:\n rec = -1\n prec = -1\n ap = -1\n\n return rec, prec, ap\n" ]
[ [ "numpy.sum", "numpy.sort", "numpy.cumsum", "numpy.zeros", "numpy.maximum", "numpy.finfo", "numpy.argsort", "numpy.argmax", "numpy.arange", "numpy.where", "numpy.max", "numpy.array", "numpy.concatenate", "numpy.minimum" ] ]
sadams2013/pvtools
[ "12bd9334a1335972519c81d0c01c6308aa597c39" ]
[ "pvtools.py" ]
[ "# Import standard libraries.\nimport json\n\n# Import external libraries.\nimport numpy as np\nimport pandas as pd\n\nclass dbSNP:\n \"\"\"Store dbSNP data for a gene.\n\n Parameters\n ----------\n dbsnp_file : str\n Path to a dbSNP file containing variant information.\n\n Attributes\n ----------\n df : pandas.DataFrame\n Dataframe containing dbSNP data.\n \"\"\"\n def __init__(self, dbsnp_file):\n self.df = pd.read_table(dbsnp_file)\n\n def get_ref(self, start, end):\n \"\"\"Return reference allele.\"\"\"\n try:\n i = (self.df['chromStart'] == start) & (self.df['chromEnd'] == end)\n result = self.df[i]['name'].values[0]\n except IndexError:\n result = None\n return result\n\nclass LookupTable:\n \"\"\"Store liftover data for a gene.\n\n Parameters\n ----------\n ng : Sequence\n Sequence object for RefSeqGene.\n g7 : Sequence\n Sequence object for GRCh37.\n g8 : Sequence\n Sequence object for GRCh38.\n\n Attributes\n ----------\n ng : Sequence\n Sequence object for RefSeqGene.\n g7 : Sequence\n Sequence object for GRCh37.\n g8 : Sequence\n Sequence object for GRCh38.\n df : pandas.DataFrame\n Dataframe containing liftover data.\n \"\"\"\n def __init__(self, ng, g7, g8):\n self.ng = ng\n self.g7 = g7\n self.g8 = g8\n self.df = self._build_lookup_table(ng, g7, g8)\n\n def _build_lookup_table(self, ng, g7, g8):\n ng_pos1 = np.arange(1, len(ng.seq)+1)\n ng_pos2 = ng_pos1 - ng.data['CDSStarts'][0]\n ng_pos3 = ng.liftover()\n g7_pos = list(range(g7.data['Start'], g7.data['End']+1))\n g8_pos = list(range(g8.data['Start'], g8.data['End']+1))\n allele = np.array(list(ng.seq))\n annot1 = ng.annotate(cds=False)\n annot2 = ng.annotate(cds=True)\n d = {'Start_Position': ng_pos1, 'ATG_Position': ng_pos2,\n 'Transcript_Position': ng_pos3, 'GRCh37_Position': g7_pos,\n 'GRCh38_Position': g8_pos, 'Allele': allele,\n 'Exon_Annotation': annot1, 'CDS_Annotation': annot2}\n return pd.DataFrame(d)\n\n def to_tsv(self, f):\n self.df.to_csv(f, sep='\\t', index=False)\n\n def find(self, system1, system2, value):\n try:\n result = self.df[self.df[system1] == value][system2].values[0]\n except IndexError:\n result = None\n return result\n\nclass Sequence:\n \"\"\"Store sequence data for a gene.\n\n Parameters\n ----------\n fasta_file : str\n Path to a FASTA file containing the DNA sequence.\n json_file : str\n Path to a JSON file containing metadata for the DNA sequence.\n\n Attributes\n ----------\n name : str\n Sequence identifier with the leading character '>' removed.\n seq : str\n DNA sequence.\n len : int\n Length of the DNA sequence.\n data : dict\n Metadata of the DNA sequence.\n \"\"\"\n def __init__(self, fasta_file, json_file=None):\n self.name, self.seq = self._read_fasta_file(fasta_file)\n self.len = len(self.seq)\n self.data = self._read_json_file(json_file)\n\n def _read_fasta_file(self, fasta_file):\n name = ''\n seq = ''\n with open(fasta_file) as f:\n name = next(f).strip().replace('>', '')\n for line in f:\n seq += line.strip()\n return name, seq\n\n def _read_json_file(self, json_file):\n if json_file is None:\n return None\n with open(json_file) as f:\n return json.load(f)\n\n def transcribe(self):\n \"\"\"Transcribe the DNA sequence.\n\n Returns\n -------\n str\n mRNA sequence.\n \"\"\"\n rna = ''\n for i in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][i]\n end = self.data['ExonEnds'][i]\n rna += self.seq[start-1:end]\n return rna\n\n def get_exon_dataframe(self):\n \"\"\"Tabulate Exon data.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe containing Exon data.\n \"\"\"\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n exon_names = [f'Exon {x+1}' for x in range(len(exon_starts))]\n intron_starts = [x+1 for x in exon_ends[:-1]]\n intron_ends = [x-1 for x in exon_starts[1:]]\n intron_names = [f'Intron {x+1}' for x in range(len(intron_starts))]\n upstream_start = 1\n upstream_end = exon_starts[0] - 1\n upstream_name = 'Upstream'\n downstream_start = exon_ends[-1] + 1\n downstream_end = len(self.seq)\n downstream_name = 'Downstream'\n starts = exon_starts + intron_starts + [upstream_start, downstream_start]\n ends = exon_ends + intron_ends + [upstream_end, downstream_end]\n names = exon_names + intron_names + [upstream_name, downstream_name]\n df = pd.DataFrame({'Name': names, 'Start': starts, 'End': ends})\n df = df.sort_values('Start')\n df = df.reset_index(drop=True)\n return df\n\n def get_cds_dataframe(self):\n \"\"\"Tabulate CDS data.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe containing CDS data.\n \"\"\"\n cds_starts = self.data['CDSStarts']\n cds_ends = self.data['CDSEnds']\n cds_names = [f'CDS {x+1}' for x in range(len(cds_starts))]\n\n intron_starts = [x+1 for x in cds_ends[:-1]]\n intron_ends = [x-1 for x in cds_starts[1:]]\n intron_names = [f'Intron {x+1}' for x in range(len(intron_starts))]\n\n exon_df = self.get_exon_dataframe()\n\n upstream_start = 1\n upstream_end = exon_df[exon_df.Name == 'Upstream'].End.values[0]\n upstream_name = 'Upstream'\n\n utr5_starts = []\n utr5_ends = []\n atg_pos = self.get_atg_pos()\n i = self.get_atg_exon_index()\n for x in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][x]\n end = self.data['ExonEnds'][x]\n if x < i:\n utr5_starts.append(start)\n utr5_ends.append(end)\n elif x == i:\n utr5_starts.append(start)\n utr5_ends.append(atg_pos-1)\n else:\n break\n utr5_names = [f\"5' UTR Exon {x+1}\" for x in range(len(utr5_starts))]\n\n utr5_intron_starts = []\n utr5_intron_ends = []\n for utr5_end in utr5_ends[:-1]:\n utr5_intron_starts.append(utr5_end+1)\n for utr5_start in utr5_starts[1:]:\n utr5_intron_ends.append(utr5_start-1)\n utr5_intron_names = [f\"5' UTR Intron {x+1}\" for x in range(len(utr5_intron_starts))]\n\n utr3_starts = []\n utr3_ends = []\n stop_pos = self.get_stop_pos()\n i = self.get_stop_exon_index()\n for x in range(self.data['ExonCount']):\n start = self.data['ExonStarts'][x]\n end = self.data['ExonEnds'][x]\n if x < i:\n pass\n elif x == i:\n utr3_starts.append(stop_pos+1)\n utr3_ends.append(end)\n else:\n utr3_starts.append(start)\n utr3_ends.append(end)\n utr3_names = [f\"3' UTR Exon {x+1}\" for x in range(len(utr3_starts))]\n\n utr3_intron_starts = []\n utr3_intron_ends = []\n for utr3_end in utr3_ends[:-1]:\n utr3_intron_starts.append(utr3_end+1)\n for utr3_start in utr3_starts[1:]:\n utr3_intron_ends.append(utr3_start-1)\n utr3_intron_names = [f\"3' UTR Intron {x+1}\" for x in range(len(utr3_intron_starts))]\n\n downstream_start = exon_df[exon_df.Name == 'Downstream'].Start.values[0]\n downstream_end = len(self.seq)\n downstream_name = 'Downstream'\n\n starts = cds_starts + intron_starts + utr5_starts + utr5_intron_starts + utr3_starts + utr3_intron_starts + [upstream_start, downstream_start]\n ends = cds_ends + intron_ends + utr5_ends + utr5_intron_ends + utr3_ends + utr3_intron_ends + [upstream_end, downstream_end]\n names = cds_names + intron_names + utr5_names + utr5_intron_names + utr3_names + utr3_intron_names + [upstream_name, downstream_name]\n df = pd.DataFrame({'Name': names, 'Start': starts, 'End': ends})\n df = df.sort_values('Start')\n df = df.reset_index(drop=True)\n return df\n\n def annotate(self, cds=False):\n if cds:\n df = self.get_cds_dataframe()\n else:\n df = self.get_exon_dataframe()\n annotations = []\n for i, r in df.iterrows():\n n = r.End - r.Start + 1\n annotations += [r.Name] * n\n return annotations\n\n def liftover(self):\n cds_df = self.get_cds_dataframe()\n cds_pos = []\n cds_sum = 1\n atg_start = self.data['CDSStarts'][0]\n utr5_exon_offset = -1 * self.get_utr5_exon_len()\n utr3_exon_sum = 1\n for i, r in cds_df.iterrows():\n cds_len = r.End - r.Start + 1\n if r.Name.startswith('CDS'):\n cds_pos += list(range(cds_sum, cds_sum + cds_len))\n cds_sum += cds_len\n elif r.Name.startswith('Intron'):\n cds_pos += [f'{cds_sum-1}+{x}' for x in range(1, cds_len+1)]\n elif r.Name == 'Upstream':\n a = self.get_atg_pos() - self.get_utr5_intron_len()\n cds_pos += [x-a for x in range(1, r.End+1)]\n elif r.Name.startswith(\"5' UTR Exon\"):\n a = r.End - r.Start + 1\n cds_pos += [x for x in range(utr5_exon_offset, utr5_exon_offset+a)]\n utr5_exon_offset += a\n elif r.Name.startswith(\"5' UTR Intron\"):\n cds_pos += [f'{utr5_exon_offset-1}+{x}' for x in range(1, cds_len+1)]\n elif r.Name == 'Downstream':\n a = self.get_utr3_exon_len() + 1\n b = r.End - r.Start + 1\n cds_pos += [f'*{x+a}' for x in range(b)]\n elif r.Name.startswith(\"3' UTR Exon\"):\n a = r.End - r.Start + 1\n cds_pos += [f'*{x}' for x in list(range(utr3_exon_sum, utr3_exon_sum+a))]\n utr3_exon_sum += a\n elif r.Name.startswith(\"3' UTR Intron\"):\n cds_pos += [f'*{utr3_exon_sum-1}+{x}' for x in range(1, cds_len+1)]\n else:\n cds_pos += ['.' for x in range(cds_len)]\n if len(cds_pos) != self.len:\n raise ValueError(f\"LiftOver length error: expected {self.len} bp, \"\n f\"but generated: {len(cds_pos)} bp\")\n return [f'c.{x}' for x in cds_pos]\n\n def get_atg_pos(self):\n return self.data['CDSStarts'][0]\n\n def get_atg_exon_index(self):\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n atg_pos = self.get_atg_pos()\n for i in range(self.data['ExonCount']):\n if exon_starts[i] <= atg_pos <= exon_ends[i]:\n return i\n\n def get_stop_pos(self):\n return self.data['CDSEnds'][-1]\n\n def get_stop_exon_index(self):\n exon_starts = self.data['ExonStarts']\n exon_ends = self.data['ExonEnds']\n stop_pos = self.get_stop_pos()\n for i in range(self.data['ExonCount']):\n if exon_starts[i] <= stop_pos <= exon_ends[i]:\n return i\n\n def get_utr5_intron_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"5' UTR Intron\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr5_exon_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"5' UTR Exon\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr3_intron_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"3' UTR Intron\")]\n return sum(df.End - df.Start + 1)\n\n def get_utr3_exon_len(self):\n df = self.get_cds_dataframe()\n df = df[df.Name.str.contains(\"3' UTR Exon\")]\n return sum(df.End - df.Start + 1)\n" ]
[ [ "pandas.read_table", "pandas.DataFrame" ] ]
vishalbelsare/tsa
[ "203e602fe5fc95b89afb454156fc7e4faee90f2a" ]
[ "src/main/python/thalesians/tsa/optimization/visual.py" ]
[ "import itertools\nimport time\nimport warnings\n\nimport numpy as np\nimport matplotlib.colors\nimport matplotlib.pyplot as plt\n\nimport thalesians.tsa.checks as checks\nimport thalesians.tsa.numpyutils as npu\nimport thalesians.tsa.utils as utils\n\ndef _aggregate(aggregate_func, data, empty_aggregate):\n if empty_aggregate != 'none':\n return npu.apply(lambda x: empty_aggregate if len(x) == 0 else aggregate_func(x), data)\n else:\n return npu.apply(aggregate_func, data)\n\ndef visualize_grid_search(grid_search_result,\n aggregate_func=np.nanmean, empty_aggregate='none',\n fig=None, title=None,\n refresh_until_ready=False):\n if fig is None: fig = plt.figure()\n\n if title is None: title = grid_search_result.optimization_id\n fig.suptitle(title)\n\n param_names = list(grid_search_result.param_ranges.keys())\n\n subplots = {}\n heatmaps = {}\n datas = {}\n\n for i1 in range(len(param_names)):\n param_name1 = param_names[i1]\n param_values1 = grid_search_result.param_ranges[param_name1]\n for i2 in range(i1):\n param_name2 = param_names[i2]\n param_values2 = grid_search_result.param_ranges[param_name2]\n data = np.empty((len(param_values1), len(param_values2)), dtype=object)\n for i in range(np.size(data)): data.flat[i] = []\n datas[(i1, i2)] = data\n\n ax = fig.add_subplot(len(param_names) - 1, len(param_names) - 1, (i1 - 1) * (len(param_names) - 1) + i2 + 1)\n subplots[(i1, i2)] = ax\n \n initial_data = _aggregate(aggregate_func, datas[(i1, i2)], empty_aggregate)\n\n heatmaps[(i1, i2)] = ax.matshow(npu.apply(aggregate_func, initial_data), cmap='coolwarm')\n\n if i2 == i1 - 1:\n ax.set_xticklabels([np.nan] + [0. if x == 1e-06 else x for x in param_values2], fontsize=6, rotation='vertical', verticalalignment='bottom')\n ax.xaxis.set_ticks_position('top')\n ax.set_yticklabels([np.nan] + [0. if x == 1e-06 else x for x in param_values1], fontsize=6)\n ax.yaxis.set_ticks_position('right')\n else:\n ax.set_xticks([])\n ax.set_yticks([])\n if i1 == len(param_names) - 1: ax.set_xlabel(param_name2)\n if i2 == 0: ax.set_ylabel(param_name1)\n\n while True:\n all_ready = True\n for status in grid_search_result.evaluation_statuses:\n if not status.ready: all_ready = False\n else:\n checks.check(utils.sequence_eq(param_names, status.work.info['param_names']))\n param_value_index_combinations = itertools.combinations(range(len(param_names)), 2)\n param_value_index_combinations = [(i2, i1) for (i1, i2) in param_value_index_combinations if i1 != i2]\n for i1, i2 in param_value_index_combinations:\n param_value_index1 = status.work.info['param_value_indices'][i1]\n param_value_index2 = status.work.info['param_value_indices'][i2]\n if status.result.exception is not None:\n result = np.nan\n elif status.result.result is None:\n result = np.nan\n else:\n result = status.result.result\n datas[(i1, i2)][param_value_index1, param_value_index2].append(result)\n for i1 in range(len(param_names)):\n for i2 in range(i1):\n new_data = _aggregate(aggregate_func, datas[(i1, i2)], empty_aggregate)\n heatmaps[(i1, i2)].set_data(new_data)\n heatmaps[(i1, i2)].autoscale()\n if (not refresh_until_ready) or all_ready: break\n else:\n fig.canvas.draw()\n time.sleep(1)\n\n return fig\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.size" ] ]
MUYANGGUO/HPC
[ "ab95d18d4054b892269dd439470548abd06f5512" ]
[ "projects/1-molecular-dynamics/check.py" ]
[ "\nif __name__ == \"__main__\":\n import sys\n import json\n import numpy as np\n\n firstline = sys.stdin.readline()\n obj = json.loads(firstline)\n\n Np = obj['num_points']\n dt = obj['dt']\n L = obj['L']\n Nt = obj['num_steps']\n Nint = obj['step_chunk']\n k = obj['k']\n d = obj['d']\n gifname = obj['gifname']\n\n numframes = int(Nt) // int(Nint) + 1\n maxinterv = 100\n maxinterv = min(maxinterv,numframes -1)\n accum = np.zeros((maxinterv,1))\n denom = np.zeros((maxinterv,1))\n for i in range(numframes):\n try:\n line = sys.stdin.readline()\n obj = json.loads(line)\n X = np.array(obj['X'])\n except:\n break\n center = np.mean(X,axis=1)\n X = X - center.reshape((3,1)) * np.ones((1,X.shape[1]))\n if not i:\n X0 = np.ndarray((maxinterv,X.shape[0],X.shape[1]))\n for j in range(maxinterv):\n X0[j,:,:] = X[:,:]\n continue\n for interv in range(1,maxinterv+1):\n if i % interv:\n continue\n r = X[:,:] - X0[interv-1,:,:]\n s_pro = r[0,:]*r[0,:] + r[1,:]*r[1,:] + r[2,:]*r[2,:]\n accum[interv-1] = accum[interv-1] + np.mean(s_pro)\n denom[interv-1] = denom[interv-1] + 1\n X0[interv-1,:,:] = X[:,:]\n\n out = accum / denom\n x = np.linspace(dt*Nint,dt*Nint*maxinterv,maxinterv)\n p = np.polyfit(x,out,1)\n print(f'Diffusion constant: {p[0] / 6.}')\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.ndarray", "numpy.array", "numpy.polyfit", "numpy.linspace", "numpy.mean" ] ]
hiroyasuakada/ros_start
[ "10221ad2bcaefa4aaadc6c90424a3751126ac256" ]
[ "scripts/gan/cycle_gan/train.py" ]
[ "import os\nimport random\nimport itertools\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torchvision.utils import make_grid\nfrom torch.autograd import Variable\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom tensorboardX import SummaryWriter\nimport time\nimport cv2\n\n##################################################################\nfrom dataset import UnalignedDataset\nfrom model_base import ResNetBlock, Generator, Discriminator\nfrom model_cyclegan import CycleGAN\n##################################################################\n\n\ndef train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,\n num_epoch, num_epoch_resume, save_epoch_freq):\n model = CycleGAN(log_dir=log_dir, device=device, lr=lr, beta1=beta1,\n lambda_idt=lambda_idt, lambda_A=lambda_A, lambda_B=lambda_B, lambda_mask=lambda_mask)\n\n if num_epoch_resume != 0:\n model.log_dir = 'logs'\n print('load model {}'.format(num_epoch_resume))\n model.load('epoch' + str(num_epoch_resume))\n\n writer = SummaryWriter(log_dir)\n\n for epoch in range(num_epoch):\n print('epoch {} started'.format(epoch + 1 + num_epoch_resume))\n t1 = time.perf_counter()\n\n losses = model.train(train_loader)\n\n t2 = time.perf_counter()\n get_processing_time = t2 - t1\n\n print('epoch: {}, elapsed_time: {} sec losses: {}'\n .format(epoch + 1 + num_epoch_resume, get_processing_time, losses))\n\n writer.add_scalar('loss_G_A', losses[0], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_D_A', losses[1], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_G_B', losses[2], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_D_B', losses[3], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_cycle_A', losses[4], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_cycle_B', losses[5], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_idt_A', losses[6], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_idt_B', losses[7], epoch + 1 + num_epoch_resume)\n writer.add_scalar('loss_mask', losses[8], epoch + 1 + num_epoch_resume)\n\n if (epoch + 1 + num_epoch_resume) % save_epoch_freq == 0:\n model.save('epoch%d' % (epoch + 1 + num_epoch_resume))\n\n\nif __name__ == '__main__':\n\n # random seeds\n torch.manual_seed(1234)\n np.random.seed(1234)\n random.seed(1234)\n\n # image\n height = 128\n width = 256\n\n # training details\n batch_size = 1\n lr = 0.0002 # initial learning rate for adam\n beta1 = 0.5 # momentum term of adam\n\n num_epoch = 100\n num_epoch_resume = 0\n save_epoch_freq = 1\n\n # weights of loss function\n # lambda_idt = 5\n # lambda_A = 10.0\n # lambda_B = 10.0\n # lambda_mask = 10.0\n lambda_idt = 5.0\n lambda_A = 10.0\n lambda_B = 10.0\n lambda_mask = 0\n\n # files, dirs\n log_dir = 'logs'\n\n # gpu\n device = torch.device(\"cuda:0\" if torch.cuda.is_available else \"cpu\")\n print('device {}'.format(device))\n\n # dataset\n train_dataset = UnalignedDataset(is_train=True)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n\n # train\n train(log_dir, device, lr, beta1, lambda_idt, lambda_A, lambda_B, lambda_mask,\n num_epoch, num_epoch_resume, save_epoch_freq)\n\n\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.manual_seed", "numpy.random.seed", "torch.device" ] ]
ziyedy/category-priornet
[ "5aa080eeff936ce3939f0d5458a2936677c15726" ]
[ "lib/prior/priorNet.py" ]
[ "import sys\n\nsys.path.append(\"../../\")\nimport lib.gcn3d as gcn3d\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PriorEncoder(nn.Module):\n def __init__(self, support_num: int, neighbor_num: int):\n super(PriorEncoder, self).__init__()\n\n self.neighbor_num = neighbor_num\n\n self.conv_0 = gcn3d.Conv_surface(kernel_num=32, support_num=support_num)\n self.conv_1 = gcn3d.Conv_layer(32, 64, support_num=support_num)\n self.pool_1 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n self.conv_2 = gcn3d.Conv_layer(64, 128, support_num=support_num)\n self.conv_3 = gcn3d.Conv_layer(128, 256, support_num=support_num)\n self.pool_2 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n self.conv_4 = gcn3d.Conv_layer(256, 512, support_num=support_num)\n self.pool_3 = gcn3d.Pool_layer(pooling_rate=4, neighbor_num=4)\n\n def forward(self, vertices: \"(bs, vertice_num, 3)\"):\n bs, vertice_num, _ = vertices.size()\n\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n\n fm_0 = self.conv_0(neighbor_index, vertices)\n fm_0 = F.relu(fm_0, inplace=True)\n fm_1 = self.conv_1(neighbor_index, vertices, fm_0)\n fm_1 = F.relu(fm_1, inplace=True)\n vertices, fm_1 = self.pool_1(vertices, fm_1)\n\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n fm_2 = self.conv_2(neighbor_index, vertices, fm_1)\n fm_2 = F.relu(fm_2, inplace=True)\n fm_3 = self.conv_3(neighbor_index, vertices, fm_2)\n fm_3 = F.relu(fm_3, inplace=True)\n vertices, fm_3 = self.pool_2(vertices, fm_3)\n neighbor_index = gcn3d.get_neighbor_index(vertices, self.neighbor_num)\n\n fm_4 = self.conv_4(neighbor_index, vertices, fm_3)\n feature_global = fm_4.max(1)[0]\n # fm_4 = F.relu(fm_4, inplace=True)\n # vertices, fm_4 = self.pool_3(vertices, fm_4)\n\n return feature_global\n\n\nclass PriorDecoder(nn.Module):\n def __init__(self, emb_dim, n_pts):\n super(PriorDecoder, self).__init__()\n self.fc1 = nn.Linear(emb_dim, 512)\n self.fc2 = nn.Linear(512, 1024)\n self.fc3 = nn.Linear(1024, 3 * n_pts)\n\n def forward(self, embedding):\n \"\"\"\n Args:\n embedding: (B, 512)\n\n \"\"\"\n bs = embedding.size()[0]\n out1 = F.relu(self.fc1(embedding))\n out2 = F.relu(self.fc2(out1))\n out3 = self.fc3(out2)\n out_pc = out3.view(bs, -1, 3)\n return out_pc\n\n\nclass PriorNet(nn.Module):\n def __init__(self, emb_dim=512, n_pts=1024):\n super(PriorNet, self).__init__()\n self.encoder = PriorEncoder(1, 20)\n self.decoder = PriorDecoder(emb_dim, n_pts)\n\n def forward(self, in_pc):\n emb = self.encoder(in_pc)\n out_pc = self.decoder(emb)\n return emb, out_pc\n\n\nif __name__ == '__main__':\n estimator = PriorEncoder(1, 1)\n xyz = torch.randn(32, 2048, 3)\n\n gg = estimator(xyz)" ]
[ [ "torch.randn", "torch.nn.Linear", "torch.nn.functional.relu" ] ]
yulong314/mmpose
[ "cdfce789d0e48dd868c70a405a7d7f3da2b4ebe3" ]
[ "mmpose/datasets/datasets/hand/freihand_dataset.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .hand_base_dataset import HandBaseDataset\n\n\[email protected]_module()\nclass FreiHandDataset(HandBaseDataset):\n \"\"\"FreiHand dataset for top-down hand pose estimation.\n\n `FreiHAND: A Dataset for Markerless Capture of Hand Pose\n and Shape from Single RGB Images' ICCV'2019\n More details can be found in the `paper\n <https://arxiv.org/pdf/1909.04349.pdf>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n FreiHand keypoint indexes::\n\n 0: 'wrist',\n 1: 'thumb1',\n 2: 'thumb2',\n 3: 'thumb3',\n 4: 'thumb4',\n 5: 'forefinger1',\n 6: 'forefinger2',\n 7: 'forefinger3',\n 8: 'forefinger4',\n 9: 'middle_finger1',\n 10: 'middle_finger2',\n 11: 'middle_finger3',\n 12: 'middle_finger4',\n 13: 'ring_finger1',\n 14: 'ring_finger2',\n 15: 'ring_finger3',\n 16: 'ring_finger4',\n 17: 'pinky_finger1',\n 18: 'pinky_finger2',\n 19: 'pinky_finger3',\n 20: 'pinky_finger4'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n self.ann_info['use_different_joint_weights'] = False\n assert self.ann_info['num_joints'] == 21\n self.ann_info['joint_weights'] = \\\n np.ones((self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.dataset_name = 'freihand'\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n gt_db = []\n bbox_id = 0\n num_joints = self.ann_info['num_joints']\n for img_id in self.img_ids:\n\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n objs = self.coco.loadAnns(ann_ids)\n\n for obj in objs:\n if max(obj['keypoints']) == 0:\n continue\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n keypoints = np.array(obj['keypoints']).reshape(-1, 3)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n # the ori image is 224x224\n center, scale = self._xywh2cs(0, 0, 224, 224, 0.8)\n\n image_file = os.path.join(self.img_prefix,\n self.id2name[img_id])\n gt_db.append({\n 'image_file': image_file,\n 'center': center,\n 'scale': scale,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'dataset': self.dataset_name,\n 'bbox': obj['bbox'],\n 'bbox_score': 1,\n 'bbox_id': bbox_id\n })\n bbox_id = bbox_id + 1\n gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])\n\n return gt_db\n\n def evaluate(self, outputs, res_folder, metric='PCK', **kwargs):\n \"\"\"Evaluate freihand keypoint results. The pose prediction results will\n be saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs (list(preds, boxes, image_path, output_heatmap))\n :preds (np.ndarray[N,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['training/rgb/\n 00031426.jpg']\n :output_heatmap (np.ndarray[N, K, H, W]): model outpus.\n\n res_folder (str): Path of directory to save the results.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'AUC', 'EPE'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'AUC', 'EPE']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n for output in outputs:\n preds = output['preds']\n boxes = output['boxes']\n image_paths = output['image_paths']\n bbox_ids = output['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n return name_value\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.zeros", "numpy.minimum" ] ]
gonsoomoon/tensorflow-workshop-for-sagemaker
[ "985ab3853c16f4833caeae6382ccfc4474ac8e98" ]
[ "training_script/cifar10_keras_sm.py" ]
[ "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the \"Software\"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam, SGD, RMSprop\nimport tensorflow as tf\nfrom keras import backend as K\n\nsess = tf.Session()\nK.set_session(sess)\n\nlogging.getLogger().setLevel(logging.INFO)\ntf.logging.set_verbosity(tf.logging.INFO)\nHEIGHT = 32\nWIDTH = 32\nDEPTH = 3\nNUM_CLASSES = 10\nNUM_DATA_BATCHES = 5\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES\nINPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + \"_input\"\n\ndef keras_model_fn(learning_rate, weight_decay, optimizer, momentum):\n \"\"\"keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.\n The model will be transformed into a TensorFlow Estimator before training and it will be saved in a \n TensorFlow Serving SavedModel at the end of training.\n\n Args:\n hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow \n training script.\n Returns: A compiled Keras model\n \"\"\"\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(32, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(128, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Conv2D(128, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(NUM_CLASSES))\n model.add(Activation('softmax'))\n\n size = 1\n\n if optimizer.lower() == 'sgd':\n opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)\n elif optimizer.lower() == 'rmsprop':\n opt = RMSprop(lr=learning_rate * size, decay=weight_decay)\n else:\n opt = Adam(lr=learning_rate * size, decay=weight_decay)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n return model\n\n\ndef get_filenames(channel_name, channel):\n if channel_name in ['train', 'validation', 'eval']:\n return [os.path.join(channel, channel_name + '.tfrecords')]\n else:\n raise ValueError('Invalid data subset \"%s\"' % channel_name)\n\n\ndef train_input_fn():\n return _input(args.epochs, args.batch_size, args.train, 'train')\n\n\ndef eval_input_fn():\n return _input(args.epochs, args.batch_size, args.eval, 'eval')\n\n\ndef validation_input_fn():\n return _input(args.epochs, args.batch_size, args.validation, 'validation')\n\n\ndef _input(epochs, batch_size, channel, channel_name):\n\n filenames = get_filenames(channel_name, channel)\n dataset = tf.data.TFRecordDataset(filenames)\n\n dataset = dataset.repeat(epochs)\n dataset = dataset.prefetch(10)\n\n # Parse records.\n dataset = dataset.map(\n _dataset_parser, num_parallel_calls=10)\n\n # Potentially shuffle records.\n if channel_name == 'train':\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size\n dataset = dataset.shuffle(buffer_size=buffer_size)\n\n # Batch it up.\n dataset = dataset.batch(batch_size, drop_remainder=True)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return {INPUT_TENSOR_NAME: image_batch}, label_batch\n\n\ndef _train_preprocess_fn(image):\n \"\"\"Preprocess a single training image of layout [height, width, depth].\"\"\"\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_image_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)\n\n # Randomly crop a [HEIGHT, WIDTH] section of the image.\n image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])\n\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n return image\n\n\ndef _dataset_parser(value):\n \"\"\"Parse a CIFAR-10 record from value.\"\"\"\n featdef = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n }\n\n example = tf.parse_single_example(value, featdef)\n image = tf.decode_raw(example['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(example['label'], tf.int32)\n image = _train_preprocess_fn(image)\n return image, tf.one_hot(label, NUM_CLASSES)\n\ndef save_model(model, output):\n signature = tf.saved_model.signature_def_utils.predict_signature_def(\n inputs={'inputs': model.input}, outputs={'scores': model.output})\n\n builder = tf.saved_model.builder.SavedModelBuilder(output+'/1/')\n builder.add_meta_graph_and_variables(\n sess=K.get_session(),\n tags=[tf.saved_model.tag_constants.SERVING],\n signature_def_map={\"serving_default\": signature})\n builder.save()\n\n logging.info(\"Model successfully saved at: {}\".format(output))\n return\n\ndef main(args):\n logging.info(\"getting data\")\n train_dataset = train_input_fn()\n eval_dataset = eval_input_fn()\n validation_dataset = validation_input_fn()\n\n logging.info(\"configuring model\")\n model = keras_model_fn(args.learning_rate, args.weight_decay, args.optimizer, args.momentum)\n callbacks = []\n\n # -----------수정 부분\n# callbacks.append(ModelCheckpoint(args.model_dir + '/checkpoint-{epoch}.h5'))\n callbacks.append(ModelCheckpoint(args.model_output_dir + '/checkpoint-{epoch}.h5'))\n\n logging.info(\"Starting training\")\n model.fit(x=train_dataset[0], y=train_dataset[1],\n steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size),\n epochs=args.epochs, validation_data=validation_dataset,\n validation_steps=(num_examples_per_epoch('validation') // args.batch_size), callbacks=callbacks)\n\n score = model.evaluate(eval_dataset[0], eval_dataset[1], steps=num_examples_per_epoch('eval') // args.batch_size,\n verbose=0)\n\n logging.info('Test loss:{}'.format(score[0]))\n logging.info('Test accuracy:{}'.format(score[1]))\n\n # -------------수정 부분\n# return save_model(model, args.model_dir)\n return save_model(model, args.model_output_dir)\n\ndef num_examples_per_epoch(subset='train'):\n if subset == 'train':\n return 40000\n elif subset == 'validation':\n return 10000\n elif subset == 'eval':\n return 10000\n else:\n raise ValueError('Invalid data subset \"%s\"' % subset)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--train',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_TRAIN'), # ----수정 부분\n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--validation',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_VALIDATION'), # ----수정 부분 \n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--eval',\n type=str,\n required=False,\n default=os.environ.get('SM_CHANNEL_EVAL'), # ----수정 부분 \n help='The directory where the CIFAR-10 input data is stored.')\n parser.add_argument(\n '--model_dir',\n type=str,\n required=True,\n help='The directory where the model will be stored.')\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=2e-4,\n help='Weight decay for convolutions.')\n parser.add_argument(\n '--learning-rate',\n type=float,\n default=0.001,\n help=\"\"\"\\\n This is the inital learning rate value. The learning rate will decrease\n during training. For more details check the model_fn implementation in\n this file.\\\n \"\"\")\n parser.add_argument(\n '--epochs',\n type=int,\n default=10,\n help='The number of steps to use for training.')\n parser.add_argument(\n '--batch-size',\n type=int,\n default=128,\n help='Batch size for training.')\n parser.add_argument(\n '--optimizer',\n type=str,\n default='adam')\n parser.add_argument(\n '--momentum',\n type=float,\n default='0.9')\n # ----------추가 부분\n parser.add_argument(\n '--model_output_dir',\n type=str,\n default=os.environ.get('SM_MODEL_DIR'))\n \n args = parser.parse_args()\n main(args)" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.random_crop", "tensorflow.image.random_flip_left_right", "tensorflow.reshape", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.logging.set_verbosity", "tensorflow.parse_single_example", "tensorflow.decode_raw", "tensorflow.cast", "tensorflow.saved_model.signature_def_utils.predict_signature_def", "tensorflow.one_hot", "tensorflow.FixedLenFeature", "tensorflow.Session", "tensorflow.saved_model.builder.SavedModelBuilder" ] ]
slimnsour/datman
[ "6ac4827e2ae20401eb4b048d42bdfca5db5d3de9" ]
[ "bin/dm_link.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nRenames (links) exam zip archives by consulting a lookup table.\n\nThis program looks up the proper name in a table that lists the original exam\narchive name, and the target name.\n\nUsage:\n dm_link.py [options] <study>\n dm_link.py [options] <study> <zipfile>\n\nArguments:\n <study> Name of the study to process\n <zipfile> Single Zipfile to process\n\nOptions:\n --lookup FILE Path to scan id lookup table,\n overrides metadata/scans.csv\n --scanid-field STR Dicom field to match target_name with\n [default: PatientName]\n -v --verbose Verbose logging\n -d --debug Debug logging\n -q --quiet Less debuggering\n --dry-run Dry run\n\n\nDETAILS\n This program is used to rename an exam archive with their properly\n formatted scan names (see datman.scanid). Two approaches are used to find\n this name:\n\n ### Scan ID in a lookup table (--lookup)\n\n The lookup table should have atleast two columns: source_name, and\n target_name. For example:\n\n source_name target_name\n 2014_0126_FB001 ASDD_CMH_FB001_01_01\n\n The source_name column is matched against the archive filename (so the\n entry above applies to 2014_0126_FB001.zip). The target_name column\n specifies the proper name for the exam.\n\n If the archive is not found in the lookup table, the dicom header is\n consulted:\n\n ### Scan ID in the dicom header (--scanid-field)\n\n Some scans may have the scan ID embedded in a dicom header field.\n\n The --scanid-field specifies a dicom header field to check for a\n well-formatted exam name.\n\n\nADDITIONAL MATCH CONDITIONS\n Additional columns in the lookup table can be specified to ensure that the\n DICOM headers of the file match what is expected. These column names should\n start with dicom_. For example,\n\n source_name target_name dicom_StudyID\n 2014_0126_FB001 ASDD_CMH_FB001_01_01 512\n\n In the example above, this script would check that the StudyID field of an\n arbitrary dicom file in the archive contains the value \"512\". If not, an\n error is thrown.\n\nIGNORING EXAM ARCHIVES\n Exam archives can be ignored by placing an entry into the lookup table with\n the target_name of '<ignore>', for example:\n source_name target_name dicom_StudyID\n 2014_0126_FB001 <ignore>\n\"\"\"\n\nimport glob\nimport logging\nimport os\nimport sys\n\nfrom docopt import docopt\nimport pandas as pd\n\nimport datman.config\nimport datman.scanid\nimport datman.utils\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\nalready_linked = {}\nlookup = None\nDRYRUN = None\n\n\ndef main():\n # make the already_linked dict global as we are going to use it a lot\n global already_linked\n global lookup\n global DRYRUN\n\n arguments = docopt(__doc__)\n verbose = arguments[\"--verbose\"]\n debug = arguments[\"--debug\"]\n DRYRUN = arguments[\"--dry-run\"]\n quiet = arguments[\"--quiet\"]\n study = arguments[\"<study>\"]\n lookup_path = arguments[\"--lookup\"]\n scanid_field = arguments[\"--scanid-field\"]\n zipfile = arguments[\"<zipfile>\"]\n\n # setup logging\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.WARN)\n logger.setLevel(logging.WARN)\n if quiet:\n logger.setLevel(logging.ERROR)\n ch.setLevel(logging.ERROR)\n if verbose:\n logger.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n if debug:\n logger.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - {study} - \"\n \"%(levelname)s - %(message)s\".format(\n study=study))\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n # setup the config object\n cfg = datman.config.config(study=study)\n if not lookup_path:\n lookup_path = os.path.join(cfg.get_path(\"meta\"), \"scans.csv\")\n\n dicom_path = cfg.get_path(\"dicom\")\n zips_path = cfg.get_path(\"zips\")\n\n if not os.path.isdir(dicom_path):\n logger.warning(\"Dicom folder {} doesnt exist, creating it.\".format(\n dicom_path))\n try:\n os.makedirs(dicom_path)\n except IOError:\n logger.error(\"Failed to create dicom path {}\".format(dicom_path))\n return\n\n if not os.path.isdir(zips_path):\n logger.error(\"Zips path {} doesnt exist\".format(zips_path))\n return\n\n try:\n lookup = pd.read_csv(lookup_path, sep=\"\\s+\", dtype=str) # noqa: W605\n except IOError:\n logger.error(\"Lookup file {} not found\".format(lookup_path))\n return\n\n # identify which zip files have already been linked\n already_linked = {os.path.realpath(f): f\n for f\n in glob.glob(os.path.join(dicom_path, \"*\"))\n if os.path.islink(f)}\n\n if zipfile:\n if isinstance(zipfile, str):\n zipfile = [zipfile]\n archives = [os.path.join(zips_path, zip) for zip in zipfile]\n else:\n archives = [os.path.join(zips_path, archive)\n for archive\n in os.listdir(zips_path)\n if os.path.splitext(archive)[1] == \".zip\"]\n\n logger.info(\"Found {} archives\".format(len(archives)))\n for archive in archives:\n link_archive(archive, dicom_path, scanid_field, cfg)\n\n\ndef link_archive(archive_path, dicom_path, scanid_field, config):\n if not os.path.isfile(archive_path):\n logger.error(\"Archive {} not found\".format(archive_path))\n return\n\n try:\n linked_path = already_linked[os.path.realpath(archive_path)]\n except KeyError:\n linked_path = \"\"\n\n if linked_path:\n logger.info(\"{} already linked at {}\".format(archive_path,\n linked_path))\n return\n\n scanid = get_scanid_from_lookup_table(archive_path)\n\n # if scanid has been returned from the lookup table its a tuplet\n # otherwise None\n if scanid:\n scanid, lookupinfo = scanid\n\n if scanid == \"<ignore>\":\n logger.info(\"Ignoring {}\".format(archive_path))\n return\n\n if not scanid:\n scanid = get_scanid_from_header(archive_path, scanid_field)\n\n if not scanid:\n logger.error(\"Scanid not found for archive: {}\".format(archive_path))\n return\n\n try:\n ident = datman.utils.validate_subject_id(scanid, config)\n except datman.scanid.ParseException as e:\n logger.error(\"Can't make link for {}. Reason: {}\".format(\n archive_path, e))\n return\n\n scanid = str(ident)\n\n # do the linking\n target = os.path.join(dicom_path, scanid)\n target = target + datman.utils.get_extension(archive_path)\n if os.path.exists(target):\n logger.error(\"Target: {} already exists for archive: {}\"\n .format(target, archive_path))\n return\n\n relpath = os.path.relpath(archive_path, dicom_path)\n logger.info(\"Linking {} to {}\".format(relpath, target))\n if not DRYRUN:\n os.symlink(relpath, target)\n\n\ndef get_scanid_from_lookup_table(archive_path):\n \"\"\"\n Gets the scanid from the lookup table (pandas dataframe)\n\n Returns the scanid and the rest of the lookup table information (e.g.\n expected dicom header matches). If no match is found, both the scan id and\n lookup table info is None.\n \"\"\"\n global lookup\n basename = os.path.basename(os.path.normpath(archive_path))\n source_name = basename[:-len(datman.utils.get_extension(basename))]\n lookupinfo = lookup[lookup[\"source_name\"] == source_name]\n\n if len(lookupinfo) == 0:\n logger.debug(\"{} not found in source_name column.\"\n .format(source_name))\n return\n else:\n scanid = lookupinfo[\"target_name\"].tolist()[0]\n return (scanid, lookupinfo)\n\n\ndef get_archive_headers(archive_path):\n # get some DICOM headers from the archive\n header = None\n try:\n header = datman.utils.get_archive_headers(archive_path,\n stop_after_first=True)\n header = list(header.values())[0]\n except Exception:\n logger.warning(\"Archive: {} contains no DICOMs\".format(archive_path))\n return header\n\n\ndef get_scanid_from_header(archive_path, scanid_field):\n \"\"\"\n Gets the scanid from the dicom header object.\n\n Returns None if the header field isn't present or the value isn't a proper\n scan ID.\n \"\"\"\n header = get_archive_headers(archive_path)\n if not header:\n return False\n if scanid_field not in header:\n logger.error(\"{} field is not in {} dicom headers\"\n .format(scanid_field, archive_path))\n return\n\n scanid = str(header.get(scanid_field))\n\n if datman.scanid.is_scanid(scanid):\n logger.debug(\"{}: Using scan ID from dicom field {} = {}.\"\n .format(archive_path, scanid_field, scanid))\n return scanid\n else:\n logger.warning(\"{}: {} (header {}) not valid scan ID\"\n .format(archive_path, scanid, scanid_field))\n return None\n\n\ndef validate_headers(archive_path, lookupinfo, scanid_field):\n \"\"\"\n Validates an exam archive against the lookup table\n\n Checks that all dicom_* dicom header fields match the lookup table\n \"\"\"\n header = get_archive_headers(archive_path)\n if not header:\n return False\n\n columns = lookupinfo.columns.values.tolist()\n dicom_cols = [c for c in columns if c.startswith(\"dicom_\")]\n\n for c in dicom_cols:\n f = c.split(\"_\")[1]\n\n if f not in header:\n logger.error(\"{} field is not in {} dicom headers\"\n .format(scanid_field, archive_path))\n return False\n\n actual = str(header.get(f))\n expected = str(lookupinfo[c].tolist()[0])\n\n if actual != expected:\n logger.error(\"{}: dicom field '{}' = '{}', expected '{}'\"\n .format(archive_path, f, actual, expected))\n return False\n return True\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
haggaila/qiskit-dynamics
[ "fd20314e2b591c35323782bc429d9f928fdb9a12" ]
[ "test/dynamics/solvers/test_solver_classes.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# pylint: disable=invalid-name\n\n\"\"\"\nTests for solver classes module.\n\"\"\"\n\nimport numpy as np\n\nfrom qiskit import QiskitError\nfrom qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix\n\nfrom qiskit_dynamics import Solver\nfrom qiskit_dynamics.signals import Signal\n\nfrom ..common import QiskitDynamicsTestCase, TestJaxBase\n\n\nclass TestSolverExceptions(QiskitDynamicsTestCase):\n \"\"\"Tests for Solver exception raising based on input types.\"\"\"\n\n def setUp(self):\n X = Operator.from_label(\"X\")\n self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0])\n\n self.lindblad_solver = Solver(\n hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X]\n )\n\n self.vec_lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[1.0],\n dissipator_operators=[X],\n evaluation_mode=\"dense_vectorized\",\n )\n\n def test_hamiltonian_shape_error(self):\n \"\"\"Test error raising if invalid shape for Hamiltonian model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_lindblad_shape_error(self):\n \"\"\"Test error raising if invalid shape for Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_vectorized_lindblad_shape_error(self):\n \"\"\"Test error raising if invalid shape for vectorized Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]]))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n with self.assertRaises(QiskitError) as qe:\n self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))\n self.assertTrue(\"Shape mismatch\" in str(qe.exception))\n\n def test_non_vectorized_SuperOp_error(self):\n \"\"\"Test SuperOp simulation attempt for non-vectorized Lindblad model.\"\"\"\n\n with self.assertRaises(QiskitError) as qe:\n self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4)))\n self.assertTrue(\"Simulating SuperOp\" in str(qe.exception))\n\n\nclass TestSolver(QiskitDynamicsTestCase):\n \"\"\"Tests for Solver class.\"\"\"\n\n def setUp(self):\n \"\"\"Set up some simple models.\"\"\"\n X = 2 * np.pi * Operator.from_label(\"X\") / 2\n Z = 2 * np.pi * Operator.from_label(\"Z\") / 2\n self.ham_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n drift=5 * Z,\n rotating_frame=5 * Z,\n )\n\n self.rwa_ham_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n drift=5 * Z,\n rotating_frame=5 * Z,\n rwa_cutoff_freq=2 * 5.0,\n )\n\n self.lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.01 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n )\n\n self.vec_lindblad_solver = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.01 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n evaluation_mode=\"dense_vectorized\",\n )\n\n # lindblad solver with no dissipation for testing\n self.vec_lindblad_solver_no_diss = Solver(\n hamiltonian_operators=[X],\n hamiltonian_signals=[Signal(1.0, 5.0)],\n dissipator_operators=[0.0 * X],\n drift=5 * Z,\n rotating_frame=5 * Z,\n evaluation_mode=\"dense_vectorized\",\n )\n self.method = \"DOP853\"\n\n def test_lindblad_solve_statevector(self):\n \"\"\"Test correct conversion of Statevector to DensityMatrix.\"\"\"\n\n results = self.lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999)\n\n def test_vec_lindblad_statevector(self):\n \"\"\"Test correct conversion of Statevector to DensityMatrix and vectorized solving.\"\"\"\n\n results = self.vec_lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n results2 = self.lindblad_solver.solve(\n [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertAllClose(results.y[-1].data, results2.y[-1].data)\n\n def test_array_vectorized_lindblad(self):\n \"\"\"Test Lindblad solver is array-vectorized.\"\"\"\n results = self.lindblad_solver.solve(\n [0.0, 1.0],\n y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]),\n method=self.method,\n )\n self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999)\n self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999)\n\n def test_rwa_hamiltonian(self):\n \"\"\"Test perfect inversion for pi pulse with RWA.\"\"\"\n results = self.rwa_ham_solver.solve(\n [0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method\n )\n self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8))\n\n def test_hamiltonian_DensityMatrix(self):\n \"\"\"Test correct conjugation of Hamiltonian-based density matrix simulation.\"\"\"\n results = self.ham_solver.solve(\n [0.0, 1.0],\n y0=DensityMatrix(np.array([0.0, 1.0])),\n atol=1e-10,\n rtol=1e-10,\n method=self.method,\n )\n self.assertTrue(isinstance(results.y[-1], DensityMatrix))\n self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999)\n\n def test_hamiltonian_SuperOp(self):\n \"\"\"Test Hamiltonian-based SuperOp simulation.\"\"\"\n results = self.rwa_ham_solver.solve(\n [0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method\n )\n self.assertTrue(isinstance(results.y[-1], SuperOp))\n X = np.array([[0.0, 1.0], [1.0, 0.0]])\n self.assertAllClose(results.y[-1].data, np.kron(X, X))\n\n def test_hamiltonian_lindblad_SuperOp_consistency(self):\n \"\"\"Test Hamiltonian-based SuperOp simulation.\"\"\"\n results = self.ham_solver.solve(\n [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method\n )\n results2 = self.vec_lindblad_solver_no_diss.solve(\n [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10\n )\n self.assertAllClose(results.y[-1].data, results2.y[-1].data)\n\n\nclass TestSolverJax(TestSolver, TestJaxBase):\n \"\"\"JAX version of TestSolver.\"\"\"\n\n def setUp(self):\n \"\"\"Set method to 'jax_odeint' to speed up running of jax version of tests.\"\"\"\n super().setUp()\n self.method = \"jax_odeint\"\n\n def test_jit_solve(self):\n \"\"\"Test jitting setting signals and solving.\"\"\"\n\n def func(a):\n ham_solver = self.ham_solver.copy()\n ham_solver.signals = [Signal(lambda t: a, 5.0)]\n yf = ham_solver.solve(\n np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method\n ).y[-1]\n return yf\n\n jit_func = self.jit_wrap(func)\n self.assertAllClose(jit_func(2.0), func(2.0))\n\n def test_jit_grad_solve(self):\n \"\"\"Test jitting setting signals and solving.\"\"\"\n\n def func(a):\n lindblad_solver = self.lindblad_solver.copy()\n lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]]\n yf = lindblad_solver.solve(\n [0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method\n ).y[-1]\n return yf\n\n jit_grad_func = self.jit_grad_wrap(func)\n jit_grad_func(1.0)\n" ]
[ [ "numpy.array", "numpy.eye", "numpy.kron", "numpy.abs" ] ]
Sourodip-ghosh123/Fruits-360
[ "f15ce919757f0a0ce057f4ba4b49ce3d5aba53e2" ]
[ "ResNet50 V2/resnet50_v2_model.py" ]
[ "from keras.applications.resnet_v2 import ResNet50V2\nmodel=ResNet50V2(include_top=True, weights=None, input_tensor=None, input_shape=(100,100,3),classes=41)\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nprint('Compiled!')\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D,MaxPooling2D\nfrom keras.layers import Activation, Dense, Flatten, Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import backend as K\nbatch_size = 50\n\ncheckpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', save_best_only = True)\n\nhistory = model.fit(x_train,y_train,\n batch_size = 50,\n epochs=15,\n validation_data=(x_valid, y_vaild),\n callbacks = [checkpointer],\n shuffle=True\n )\n \nmodel.load_weights('cnn_from_scratch_fruits.hdf5')\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('\\n', 'Test accuracy:', score[1])\n\nimport matplotlib.pyplot as plt\n\n# Plot training & validation accuracy values\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
gandreassi/ImmoKaa
[ "904115e5a6f91ca78b41aebdaf4ffe3934a4c318" ]
[ "ImmoKaa/scraper.py" ]
[ "from bs4 import BeautifulSoup\nimport urllib.request as urllib2\nimport random\nfrom random import choice\nimport pandas as pd\nimport copy, time, sys, shutil, os, yaml, json\nimport datetime as dt\nfrom glob import glob\nimport regex\n\nclass scraper():\n \n criteria = None\n df = None\n df_pre = None\n __verbose = False\n __parameter_names = { #this dict translate the parameters into thei corresponding url bit\n 'min_price' : 'pf',\n 'max_price' : 'pt',\n 'min_rooms' : 'nrf',\n 'max_rooms' : 'nrt',\n 'radius' : 'r',\n 'days_old' : 'pa',\n }\n __instance_name = None\n __root_dir = \"./ImmoKaa_data/\"\n __base_dir = None\n \n \n \n def __init__(self, instance_name, criteria_file):\n self.__instance_name = instance_name\n self.__base_dir = self.__root_dir+instance_name\n os.makedirs(self.__base_dir, exist_ok=True)\n with open(criteria_file) as file:\n self.criteria = yaml.load(file, Loader=yaml.FullLoader) \n self.get_preexisting_data()\n \n\n\n def _urlquery(self, url, verbose=False):\n # function cycles randomly through different user agents and time intervals to simulate more natural queries\n try:\n sleeptime = float(random.randint(1,6))/5\n time.sleep(sleeptime)\n\n agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',\n 'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',\n 'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',\n 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',\n 'Mozilla/3.0',\n 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',\n 'Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522+ (KHTML, like Gecko) Safari/419.3',\n 'Opera/9.00 (Windows NT 5.1; U; en)']\n\n agent = choice(agents)\n opener = urllib2.build_opener()\n opener.addheaders = [('User-agent', agent)]\n\n html = opener.open(url).read()\n time.sleep(sleeptime)\n\n return html\n\n except Exception as e:\n if verbose: print('Something went wrong with Crawling:\\n%s' % e)\n return None\n \n \n \n def _immoscout24parser(self, url, verbose=False):\n '''\n Read search results from Immoscout24.ch, given a specific url indicating the search criteria and the page number.\n '''\n if verbose: print (\"Scanning the following url:\", url)\n\n try:\n soup = BeautifulSoup(self._urlquery(url, verbose), 'html.parser')\n scripts = soup.findAll('script')\n scripts = filter(None, [script.string for script in scripts])\n sr = next(script for script in scripts if 'searchResult' in script)\n #Come cleaning... with not-so-clean code. Because ImmoScout keeps changing stuff and I can't be bothered to fix this properly every time.\n s = sr.replace(\":undefined\", ':\"undefined\"').lstrip(\"__INITIAL_STATE__=\")\n s = regex.sub('\\{\"render\".*?(?:\\{(?:(?R)|[^{}])*})\\}', '\"\"', s)\n poss = [m.start() for m in regex.finditer('e=>', s)]\n res = s[:poss[0]]\n for i in range(len(poss)):\n end = len(s)\n if i+1 < len(poss):\n end = poss[i+1]\n dd = regex.sub('(?:\\{(?:(?R)|[^{}])*})', '\"\"', s[poss[i]+3:end], 1)\n res += dd\n \n js = json.loads(res)\n return js\n \n except Exception as e:\n if verbose: print(\"Error in immoscout24 parser: %s\" % e)\n return None\n \n \n \n def _make_url(self, criteria, page):\n url = 'https://www.immoscout24.ch/en/real-estate/{mode}/city-{city}?'.format(**criteria)\n for key in [x for x in criteria.keys() if x not in ['city', 'mode']]:\n try:\n url+=self.__parameter_names[key]+'='+str(criteria[key])+\"&\"\n except KeyError:\n raise Exception(\"Error in make_url\", \"Unsupported search parameter!\")\n url = url[:-1]+\"&pn=\"+str(page) #add page number\n\n return url\n \n \n\n def _get_listings(self, criteria, verbose):\n \"\"\"\n Pull a list of listings for given criteria and cities, and put them in a dataframe.\n \"\"\"\n print (\"city:\",criteria['city'])\n page = 0\n data_pages = []\n numberOfPages = 1\n while page<numberOfPages:\n page+=1\n url = self._make_url(criteria, page)\n resultlist_json = None\n N_attempts = 0\n while resultlist_json is None and N_attempts<5:\n try: \n N_attempts+=1\n resultlist_json = self._immoscout24parser(url, verbose)\n numberOfPages = int(resultlist_json[\"pages\"][\"searchResult\"][\"resultData\"][\"pagingData\"][\"totalPages\"])\n print(\"\\tpage: {0}/{1}\".format(page,numberOfPages), end=\" \")\n data = resultlist_json[\"pages\"][\"searchResult\"][\"resultData\"][\"listData\"]\n data = pd.DataFrame.from_dict(data)\n data[\"searched-city\"]=criteria['city'] #store which city we searched, for reference\n data[\"fetch-date\"]=dt.datetime.now().date()\n print(\"({0} results)\".format(data.shape[0]))\n data_pages.append(copy.copy(data))\n except Exception as e:\n print (e)\n pass\n data_all = pd.concat(data_pages)\n\n return data_all\n \n \n \n def scrape(self):\n dfs = []\n for city in self.criteria['cities']:\n criteria_city = copy.copy(self.criteria)\n criteria_city['city'] = city\n del criteria_city['cities']\n dfs.append(self._get_listings(criteria_city, verbose=self.__verbose))\n\n self.df = pd.concat(dfs)\n \n \n \n def set_verbose(self, flag):\n if not isinstance(flag, bool):\n raise Exception(\"ImmoKaa - set_verbose\", \"Argument must be bool.\")\n self.__verbose=flag\n \n \n \n def save_scraped_dataframe(self):\n if self.df is None:\n raise Exception(\"There is no scraped dataset to save.\")\n today = dt.datetime.now().date().strftime(\"%Y-%m-%d\")\n self.df.to_csv(self.__base_dir+\"/serach_results_\"+today+\".csv\", mode=\"w\")\n print (\"History file created/overwritten.\")\n \n \n \n def get_preexisting_data(self):\n pres = []\n try:\n for f in glob(self.__base_dir+\"/serach_results_*.csv\"):\n pres.append(pd.read_csv(f))\n pres[-1][\"fetch-date\"] = pd.to_datetime(pres[-1]['fetch-date'],\\\n format=\"%Y-%m-%d\").dt.date\n self.df_pre = pd.concat(pres)\n print (\"Found {0} pre-existing data file(s). You can access the full dataset using get_full_dataset().\". format(len(pres)))\n except FileNotFoundError:\n pass \n \n \n def get_full_dataset(self):\n return pd.concat([self.df, self.df_pre])" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.concat", "pandas.DataFrame.from_dict" ] ]
yxia-fb/shaDow-GNN
[ "2b867011c7084d4ed1b407e29f3ee09632fcc3dc" ]
[ "shaDow/utils.py" ]
[ "import os\nimport torch\nimport glob\n\nimport numpy as np\nimport scipy.sparse as sp\nimport yaml\nfrom sklearn.preprocessing import StandardScaler\n\nfrom shaDow.globals import git_rev, timestamp, Logger\nfrom torch_scatter import scatter\n\nfrom copy import deepcopy\n\nfrom typing import List, Union\nfrom shaDow import TRAIN, VALID, TEST\n\nfrom shaDow.data_converter import convert2shaDow, to_undirected\n\n\n\ndef load_data(prefix, dataset, config_data, os_='linux'):\n Logger.printf(\"Loading training data..\")\n prefix_l = prefix['local']\n fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']\n if not all(glob.glob(f\"{prefix_l}/{dataset}/{f}\") for f in fs_shadow):\n convert2shaDow(dataset, prefix_l)\n role = np.load(f\"./{prefix_l}/{dataset}/split.npy\", allow_pickle=True)\n if type(role) == np.ndarray:\n role = role[()]\n else:\n assert type(role) == dict\n # role is used as index, which is required to be int64 (node_set won't take much mem anyways)\n node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64), \n VALID: np.asarray(role[VALID], dtype=np.int64), \n TEST : np.asarray(role[TEST], dtype=np.int64)}\n # load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,\n # then we skip the conversion in the program and directly load the undirected adj. \n bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},\n VALID: {'indptr': None, 'indices': None, 'data': None},\n TEST: {'indptr': None, 'indices': None, 'data': None}}\n def fill_bin_adj_dict(mode_, split_, type_):\n for d in ['indptr', 'indices', 'data']:\n bin_adj_files[mode_][d] = f\"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin\"\n if config_data['to_undirected']:\n if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:\n adj_full = load_adj(prefix_l, dataset, 'raw', 'full')\n adj_full = to_undirected(adj_full)\n fill_bin_adj_dict(VALID, 'full', 'undirected')\n fill_bin_adj_dict(TEST, 'full', 'undirected')\n if config_data['transductive']:\n adj_train = adj_full\n fill_bin_adj_dict(TRAIN, 'full', 'undirected')\n elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:\n adj_train = load_adj(prefix_l, dataset, 'raw', 'train')\n adj_train = to_undirected(adj_train)\n fill_bin_adj_dict(TRAIN, 'train', 'undirected')\n assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))\n else:\n adj_full = load_adj(prefix_l, dataset, 'raw', 'full')\n fill_bin_adj_dict(VALID, 'full', 'raw')\n fill_bin_adj_dict(TEST, 'full', 'raw')\n if config_data['transductive']:\n adj_train = adj_full\n fill_bin_adj_dict(TRAIN, 'full', 'raw')\n else:\n adj_train = load_adj(prefix, dataset, 'raw', 'train')\n assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))\n fill_bin_adj_dict(TRAIN, 'train', 'raw')\n\n bin_adj_files = validate_bin_file(bin_adj_files)\n\n Logger.printf(f\"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING\", style=\"red\")\n label_full = np.load(f\"./{prefix_l}/{dataset}/label_full.npy\")\n label_full = torch.from_numpy(label_full)\n \n # ======= deal with feats =======\n mode_norm = 'all' if config_data['transductive'] else 'train'\n if config_data['norm_feat'] and os.path.isfile(f\"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy\"):\n feats = np.load(f\"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy\")\n Logger.printf(f\"Loading '{mode_norm}'-normalized features\", style='yellow')\n else:\n feats = np.load(f\"./{prefix_l}/{dataset}/feat_full.npy\")\n if config_data['norm_feat']:\n feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]\n scaler = StandardScaler()\n scaler.fit(feats_fit)\n feats = scaler.transform(feats)\n Logger.printf(f\"Normalizing node features (mode = {mode_norm})\", style=\"yellow\")\n else:\n Logger.printf(\"Not normalizing node features\", style=\"yellow\")\n feats = torch.from_numpy(feats.astype(np.float32, copy=False))\n Logger.printf(\"Done loading training data..\")\n return {'adj_full' : adj_full, \n 'adj_train' : adj_train, \n 'feat_full' : feats, \n 'label_full': label_full, \n 'node_set' : node_set,\n 'bin_adj_files': bin_adj_files}\n\n\ndef parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):\n # [config]\n if args.configs is not None:\n config_train = args.configs\n else:\n assert task in ['inference', 'postproc']\n if task == 'inference':\n if args.inference_configs is None:\n assert not args.compute_complexity_only\n dir_candy = args.inference_dir\n else:\n assert args.inference_dir is None and args.compute_complexity_only\n dir_candy = None\n config_train = args.inference_configs\n else: \n if args.postproc_dir is not None:\n dir_candy = args.postproc_dir\n else:\n with open(args.postproc_configs) as f:\n config_temp = yaml.load(f, Loader=yaml.FullLoader)\n if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml\n dir_candy = config_temp['dir_pred_mat'][0] \n elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)\n dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]\n else:\n raise NotImplementedError\n if dir_candy is not None:\n assert os.path.isdir(dir_candy)\n f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]\n assert len(f_yml) == 1\n config_train = f\"{dir_candy}/{f_yml[0]}\"\n with open(config_train) as f_config_train:\n config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)\n config_train_copy = deepcopy(config_train)\n # [data]\n config_data = {\"to_undirected\" : False,\n \"transductive\" : False,\n \"norm_feat\" : True}\n config_data.update(config_train['data'])\n # [arch]\n arch_gnn = { # default values\n \"dim\" : -1,\n \"aggr\" : \"sage\",\n \"residue\" : \"none\",\n \"pooling\" : \"center\",\n \"loss\" : \"softmax\",\n \"num_layers\" : -1,\n \"act\" : \"I\",\n \"heads\" : -1,\n \"feature_augment\" : \"hops\",\n \"feature_smoothen\" : \"none\",\n \"label_smoothen\" : \"none\", # label_smoothen is only considered if use_label != none\n \"ensemble_act\" : \"leakyrelu\",\n \"branch_sharing\" : False,\n \"use_label\" : \"none\"\n }\n arch_gnn.update(config_train[\"architecture\"])\n assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']\n assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']\n assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']\n assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']\n assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']\n if arch_gnn[\"feature_augment\"] and arch_gnn[\"feature_augment\"].lower() != \"none\":\n arch_gnn[\"feature_augment\"] = set(k for k in arch_gnn[\"feature_augment\"].split(\"-\"))\n else:\n arch_gnn['feature_augment'] = set()\n # [params]\n params_train = {\n \"lr\" : 0.01,\n \"dropedge\" : 0.0,\n \"ensemble_dropout\" : \"none\"\n }\n params_train.update(config_train[\"hyperparameter\"])\n params_train[\"lr\"] = float(params_train[\"lr\"])\n # [sampler]\n sampler_preproc, sampler_train = [], []\n for s in config_train['sampler']:\n phase = s.pop('phase')\n if phase == 'preprocess':\n sampler_preproc.append(s)\n elif phase == 'train':\n sampler_train.append(s)\n else:\n raise NotImplementedError\n batch_size = config_train[\"hyperparameter\"][\"batch_size\"]\n config_sampler_preproc = {\"batch_size\": batch_size, \"configs\": sampler_preproc}\n config_sampler_train = {\"batch_size\": batch_size, \"configs\": sampler_train}\n # add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges\n if arch_gnn[\"aggr\"] in [\"gcn\", \"gat\", \"gatscat\"]:\n for sc in config_sampler_train[\"configs\"]:\n num_ens = [len(v) for k, v in sc.items() if k != 'method']\n assert max(num_ens) == min(num_ens)\n sc[\"add_self_edge\"] = [True] * num_ens[0]\n # [copy yml]\n name_key = f\"{arch_gnn['aggr']}_{arch_gnn['num_layers']}\"\n dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)\n return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full\n\n\ndef parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):\n if f_config is not None:\n with open(f_config) as f:\n config_postproc = yaml.load(f, Loader=yaml.FullLoader)\n name_key = f\"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}\"\n log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)\n skip_instantiate = []\n if 'check_record' in config_postproc:\n load_acc_record = config_postproc['check_record']\n else:\n load_acc_record = True\n if config_postproc['method'] == 'cs': # C&S\n acc_record = [] if load_acc_record else None\n if dir_load is not None:\n if 'dir_pred_mat' not in config_postproc:\n config_postproc['dir_pred_mat'] = [dir_load]\n elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:\n config_postproc['dir_pred_mat'].append(dir_load)\n config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])\n for i, di in enumerate(config_postproc['dir_pred_mat']):\n if load_acc_record:\n acc_record.append(logger.decode_csv('final', di))\n for f in os.listdir(di):\n if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):\n config_postproc['pred_mat'][i] = torch.load(f\"{di}/{f}\")\n break\n if all(m is not None for m in config_postproc['pred_mat']):\n skip_instantiate = ['data', 'model']\n elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc\n acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None\n assert dir_load is None\n config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}\n for sname, dirs_l in config_postproc['dir_emb_mat'].items():\n for i, di in enumerate(dirs_l):\n if load_acc_record:\n acc_record[sname].append(logger.decode_csv('final', di))\n for f in os.listdir(di):\n if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):\n config_postproc['emb_mat'][sname][i] = torch.load(f\"{di}/{f}\")\n break\n if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):\n skip_instantiate = ['model'] # you have to load data (role, labels) anyways\n return config_postproc, acc_record, skip_instantiate\n\n\ndef log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):\n if task == 'train':\n prefix = 'running'\n elif task == 'inference':\n prefix = 'INF'\n elif task == 'postproc':\n prefix = 'POST'\n else:\n raise NotImplementedError\n log_dir = f\"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n yml_file = f\"{log_dir}/{yml_name_key}.yml\"\n with open(yml_file, 'w') as f:\n yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)\n return log_dir\n\n\n# =============== #\n# ADJ UTILS #\n# =============== #\n\ndef get_deg_torch_sparse(adj):\n return scatter(adj._values(), adj._indices()[0], reduce=\"sum\")\n\n\ndef adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):\n \"\"\"\n Normalize adj according to the method of rw normalization.\n Note that sym norm is used in the original GCN paper (kipf),\n while rw norm is used in GraphSAGE and some other variants.\n \n # Procedure:\n # 1. adj add self-connection --> adj'\n # 2. D' deg matrix from adj'\n # 3. norm by D^{-1} x adj'\n if sort_indices is True, we re-sort the indices of the returned adj\n Note that after 'dot' the indices of a node would be in descending order\n rather than ascending order\n \"\"\"\n if type(adj) == torch.Tensor:\n assert deg is None\n assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]\n _deg_orig = get_deg_torch_sparse(adj)\n if dropedge > 0:\n masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()\n adj._values()[masked_indices] = 0\n _deg_dropped = get_deg_torch_sparse(adj)\n else:\n _deg_dropped = _deg_orig\n _deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())\n _deg = torch.clamp(_deg, min=1)\n _val = adj._values()\n _val /= _deg\n adj_norm = adj\n else:\n assert dropedge == 0., \"not supporting dropedge for scipy csr matrices\"\n assert adj.shape[0] == adj.shape[1]\n diag_shape = (adj.shape[0], adj.shape[1])\n D = adj.sum(1).flatten() if deg is None else deg\n D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to. \n norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)\n adj_norm = norm_diag.dot(adj)\n if sort_indices:\n adj_norm.sort_indices()\n return adj_norm\n\n\ndef adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):\n assert adj.shape[0] == adj.shape[1]\n assert adj.data.sum() == adj.size, \"symmetric normalization only supports binary input adj\"\n N = adj.shape[0]\n # drop edges symmetrically\n if dropedge > 0:\n masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))\n adj.data[masked_indices] = 0\n adjT = adj.tocsc()\n data_add = adj.data + adjT.data\n survived_indices = np.where(data_add == 2)[0]\n adj.data *= 0\n adj.data[survived_indices] = 1\n # augment adj with self-connection\n if add_self_edge:\n indptr_new = np.zeros(N + 1)\n neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]\n for i in range(len(neigh_list)):\n neigh_list[i].add(i)\n neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))\n indptr_new[i + 1] = neigh_list[i].size\n indptr_new = indptr_new.cumsum()\n indices_new = np.concatenate(neigh_list)\n data_new = np.broadcast_to(np.ones(1), indices_new.size)\n adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)\n # NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only\n else:\n adj_aug = adj\n # normalize\n D = np.clip(adj_aug.sum(1).flatten(), 1, None)\n norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)\n adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)\n if sort_indices:\n adj_norm.sort_indices()\n return adj_norm\n\n\ndef coo_scipy2torch(adj):\n \"\"\"\n convert a scipy sparse COO matrix to torch\n \"\"\"\n values = adj.data\n indices = np.vstack((adj.row, adj.col))\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))\n\n\n# ================= #\n# ADJ FILE IO UTILS #\n# ================= #\n\ndef load_adj(prefix, dataset, type_, split_):\n \"\"\"\n Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None\n \"\"\"\n assert split_ in ['full', 'train'], \"UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]\"\n assert type_ in ['raw', 'undirected'], \"UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]\"\n file_adj = f\"{prefix}/{dataset}/adj_{split_}_{type_}.\" + \"{}\"\n if os.path.isfile(file_adj.format('npz')):\n adj = sp.load_npz(file_adj.format('npz'))\n elif os.path.isfile(file_adj.format('npy')):\n adj_d = np.load(file_adj.format('npy'), allow_pickle=True)\n if type(adj_d) == np.ndarray:\n adj_d = adj_d[()]\n else:\n assert type(adj_d) == dict\n indptr = adj_d['indptr']\n indices = adj_d['indices']\n if 'data' in adj_d:\n data = adj_d['data']\n else:\n data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)\n num_nodes = indptr.size - 1\n adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))\n else:\n adj = None\n return adj\n\n\ndef validate_bin_file(bin_adj_files):\n for md, df in bin_adj_files.items():\n assert set(df.keys()) == set(['indptr', 'indices', 'data'])\n if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):\n return {mmd: None for mmd in bin_adj_files}\n if not os.path.isfile(df['data']):\n df['data'] = ''\n return bin_adj_files\n\n\ndef merge_stat_record(dict_l : List[dict]):\n key_l = [set(d.keys()) for d in dict_l]\n assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)\n names_stat = set(dict_l[0][TRAIN].keys())\n ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}\n for d in dict_l:\n for m in [TRAIN, VALID, TEST]:\n assert set(d[m].keys()) == names_stat\n for k, v in d[m].items():\n ret[k][m].append(v)\n return ret" ]
[ [ "numpy.vstack", "numpy.load", "torch.FloatTensor", "scipy.sparse.dia_matrix", "torch.Size", "numpy.ones", "numpy.zeros", "torch.load", "scipy.sparse.csr_matrix", "numpy.asarray", "numpy.where", "torch.from_numpy", "numpy.clip", "numpy.power", "sklearn.preprocessing.StandardScaler", "numpy.concatenate", "torch.LongTensor", "torch.clamp" ] ]
dmuehlemann/RPGV
[ "18b4216e6cedce40a020a57e1822a363a8a6b60c" ]
[ "3_ gph-low-pass-filter.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sam Aug 7 11:50:05 2020\n\n@author: Dirk\n\nThis scripts applies a 10day low pass filter to the ERA5 gph daily means\n\n\"\"\"\n\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport xarray as xr\n\n\n#Define input and output data\ndata_folder = Path(\"../data/\")\nfilename = data_folder / 'gph-daily-mean.nc'\n\ndata_out = data_folder / 'gph-daily-mean-lowpass_2_0-1.nc'\nfig_out = data_folder / 'fig/gph-daily-mean-lowpass_2_0-1.png'\n\n\n#Load data\nz_all = xr.open_dataset(filename)\n\n\n# First, design the Buterworth filter\nN = 2 # Filter order\nWn = 0.1 # Cutoff frequency\nB, A = signal.butter(N, Wn, output='ba')\n\n\n# temp = z_all.isel(latitude=10, longitude=10).z.loc[\"2000-01-01\":\"2005-01-01\"]\n# Second, apply the filter\nz_allf = xr.apply_ufunc(\n signal.filtfilt, B, A, z_all,\n kwargs=dict(\n axis=0,\n )\n)\n\n\n# Make plots\nd = 10000\na=10150\nb=100\nc=150\nfor i in range(0,10):\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n plt.plot(z_all.z[d:a, b, c], 'b-')\n plt.plot(z_allf.z[d:a, b, c], 'r-',)\n plt.ylabel(\"Geopotential height\")\n plt.legend(['Original','Filtered'])\n plt.title(\"4-day lowpass filtered geopotential height\")\n ax1.axes.get_xaxis().set_visible(False)\n \n ax1 = fig.add_subplot(212)\n plt.plot(z_all.z[d:a, b, c]-z_allf.z[d:a, b, c], 'b-')\n plt.ylabel(\"Geopotential height\")\n plt.xlabel(\"Days\")\n plt.legend(['Residuals'])\n name= 'fig/filter/gph-daily-mean-lowpass_2_0-25_150d'+str(i)+'.png'\n a = a +5\n b = b +5\n c = c+5\n d = d +5\n fig.savefig(data_folder / name)\n\n\n#save results and plot\n# z_allf.to_netcdf(data_out)\n# fig.savefig(fig_out)\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "scipy.signal.butter", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
endremborza/data-bevy
[ "25398124595ffddc201de6a748e84bb24d5885b2" ]
[ "staging/stop_detection/stops.py" ]
[ "from dataclasses import dataclass\nfrom datetime import datetime\n\nimport datazimmer as dz\nimport pandas as pd\nfrom colassigner import ColAssigner, get_all_cols\n\n\nclass NoStops(Exception):\n pass\n\n\n@dataclass\nclass DaySetup:\n work_start: int\n work_end: int\n home_arrive: int\n home_depart: int\n\n\nclass Coordinates(dz.CompositeTypeBase):\n lat = float\n lon = float\n\n\nclass Interval(dz.CompositeTypeBase):\n start = datetime\n end = datetime\n\n\nclass PingFeatures(dz.TableFeaturesBase):\n loc = Coordinates\n datetime = datetime\n device_id = str\n\n\nclass StopFeatures(dz.TableFeaturesBase):\n device_id = str\n destination_label = str\n stay_number = int\n n_events = int\n interval = Interval\n center = Coordinates\n is_home = bool\n is_work = bool\n info = str\n\n\nclass Labeler(ColAssigner):\n def __init__(self, model, day: DaySetup) -> None:\n self.model = model\n self.day = day\n\n def ts(self, df):\n return df[PingFeatures.datetime].view(int) / 10**9\n\n def hour(self, df):\n return df[PingFeatures.datetime].dt.hour\n\n def destination_label(self, df):\n arr = df.loc[:, [PingFeatures.loc.lat, PingFeatures.loc.lon, Labeler.ts]].values\n try:\n return self.model.fit_predict(arr).astype(str)\n except Exception as e:\n assert \"No stop events found\" in str(e)\n raise NoStops(\"hopefully\")\n\n def stay_number(self, df):\n return (\n df[Labeler.destination_label] != df[Labeler.destination_label].shift(1)\n ).cumsum()\n\n def is_worktime(self, df):\n return (df[Labeler.hour] >= self.day.work_start) & (\n df[Labeler.hour] <= self.day.work_end\n )\n\n def is_hometime(self, df):\n return (df[Labeler.hour] >= self.day.home_arrive) | (\n df[Labeler.hour] <= self.day.home_depart\n )\n\n\ndef proc_device_pings(ping_df, model, day: DaySetup):\n return (\n ping_df.sort_values(PingFeatures.datetime)\n .pipe(Labeler(model, day))\n .pipe(_gb_stop)\n )\n\n\ndef _gb_stop(labeled_df):\n dt_col = PingFeatures.datetime\n return (\n labeled_df.groupby([Labeler.stay_number, Labeler.destination_label])\n .agg(\n **{\n StopFeatures.n_events: pd.NamedAgg(dt_col, \"count\"),\n StopFeatures.interval.start: pd.NamedAgg(dt_col, \"first\"),\n StopFeatures.interval.end: pd.NamedAgg(dt_col, \"last\"),\n StopFeatures.center.lon: pd.NamedAgg(PingFeatures.loc.lon, \"mean\"),\n StopFeatures.center.lat: pd.NamedAgg(PingFeatures.loc.lat, \"mean\"),\n \"home_rate\": pd.NamedAgg(Labeler.is_hometime, \"mean\"),\n \"work_rate\": pd.NamedAgg(Labeler.is_worktime, \"mean\"),\n }\n )\n .reset_index()\n .assign(\n **{\n \"dur\": lambda df: (\n df[StopFeatures.interval.end] - df[StopFeatures.interval.start]\n ).dt.total_seconds()\n * (df[StopFeatures.destination_label] != \"-1\"),\n StopFeatures.is_work: lambda df: _is_maxw(df, \"work_rate\"),\n StopFeatures.is_home: lambda df: _is_maxw(df, \"home_rate\"),\n StopFeatures.info: \"N/A\",\n StopFeatures.device_id: \"0\",\n }\n )\n .loc[:, get_all_cols(StopFeatures)]\n )\n\n\ndef _is_maxw(df, rate_col):\n gb_cols = [\"_week\", StopFeatures.destination_label]\n wdf = df.assign(\n _week=df[StopFeatures.interval.start].dt.isocalendar().week,\n target=df[\"dur\"] * df[rate_col],\n )\n wsums = wdf.groupby(gb_cols)[\"target\"].sum()\n wmaxs = wsums.groupby(\"_week\").transform(\"max\")\n return (wsums == wmaxs).reindex(wdf[gb_cols]).values\n" ]
[ [ "pandas.NamedAgg" ] ]
zhubonan/castepxbin
[ "24b875cf44b83d5eac75b52cf45e378a3361e90e" ]
[ "castepxbin/pdos.py" ]
[ "\"\"\"\nReader module for CASTEP pdos_bin\n\nWritten based on the example `pdos_bin.f90` file in open-source OptaDos code\n\"\"\"\nfrom enum import Enum, unique\nimport numpy as np\nfrom scipy.io import FortranFile\n\n\n@unique\nclass SpinEnum(Enum):\n \"\"\"\n Enum type for Spin. Only up and down.\n Usage: Spin.up, Spin.down.\n \"\"\"\n up, down = (1, -1)\n\n def __int__(self):\n return self.value\n\n def __float__(self):\n return float(self.value)\n\n def __str__(self):\n return str(self.value)\n\n\n@unique\nclass OrbitalType(Enum):\n \"\"\"\n Enum type for orbital type. Indices are basically the azimuthal quantum\n number, l.\n \"\"\"\n\n s = 0\n p = 1\n d = 2\n f = 3\n\n def __str__(self):\n return str(self.name)\n\n\n\n@unique\nclass OrbitalEnum(Enum):\n \"\"\"\n Enum type for specific orbitals. The value are the name reported by CASTEP.\n \"\"\"\n\n s = \"S\"\n px = \"Px\"\n py = \"Py\"\n pz = \"Pz\"\n dxy = \"Dxy\"\n dyz = \"Dzy\"\n dz2 = \"Dzz\"\n dxz = \"Dzx\"\n dx2 = \"Dxx-yy\"\n f_xxx = \"Fxxx\"\n f_yyy = \"Fyyy\"\n f_zzz = \"Fzzz\"\n f_xyz = \"Fxyz\"\n f_z_xx_yy = \"Fz(xx-yy)\"\n f_y_zz_xx = \"Fy(zz-xx)\"\n f_x_yy_zz = \"Fx(yy-zz)\"\n\n def __int__(self):\n return self.value\n\n def __str__(self):\n return str(self.name)\n\n @property\n def orbital_type(self):\n \"\"\"\n Returns OrbitalType of an orbital.\n \"\"\"\n return OrbitalType[self.name[0]]\n\n\ndef read_pdos_bin(filename, endian='big'):\n \"\"\"\n Read the pdos_bin file generated by CASTEP Spectral task.\n\n Args:\n filename (str): name of the file to be read\n\n Returns:\n A dictionary of the data that have been read.\n the weights of each orbital in stored in the 'pdos_weights' array\n with dimension (n_orbital, n_max_eign, n_kpoints, n_spin)\n \"\"\"\n esymbol = '>' if endian.upper() == 'BIG' else '>'\n dint = np.dtype(esymbol + 'i4')\n ddouble = np.dtype(esymbol + 'f8')\n dch80 = np.dtype(esymbol + 'a80')\n diarray = lambda x: '{}({},)i4'.format(esymbol, x)\n ddarray = lambda x: '{}({},)f8'.format(esymbol, x)\n\n with FortranFile(filename, header_dtype=np.dtype('>u4')) as fhandle:\n fversion = fhandle.read_record(ddouble)[0]\n fheader = fhandle.read_record(dch80)[0].decode()\n num_kpoints = fhandle.read_record(dint)[0]\n num_spins = fhandle.read_record(dint)[0]\n num_popn_orb = fhandle.read_record(dint)[0]\n max_eignenv = fhandle.read_record(dint)[0]\n\n # Now we start to read more data\n species = fhandle.read_record(diarray(num_popn_orb))\n ion = fhandle.read_record(diarray(num_popn_orb))\n am_channel = fhandle.read_record(diarray(num_popn_orb))\n\n # Now we initialize the storage space for the weights\n pdos_weights = np.zeros(\n (num_popn_orb, max_eignenv, num_kpoints, num_spins),\n dtype=float)\n\n kpoint_positions = np.zeros((num_kpoints, 3), dtype=float)\n num_eigenvalues = np.zeros(num_spins, dtype=int)\n # Now we start to read the actual data\n for nk in range(num_kpoints):\n _, kpoint_positions[nk, :] = fhandle.read_record('>i4', '>(3,)f8')\n for ns in range(num_spins):\n _ = fhandle.read_record(dint)\n num_eigenvalues[ns] = fhandle.read_record(dint)\n for nb in range(num_eigenvalues[ns]):\n pdos_weights[:, nb, nk, ns] = fhandle.read_record(\n '>({},)f8'.format(num_popn_orb))\n\n output = {\n 'fversion': fversion,\n 'fheader': fheader,\n 'num_kpoints': num_kpoints,\n 'num_spins': num_spins,\n 'num_popn_orb': num_popn_orb,\n 'max_eigenenv': max_eignenv,\n 'species': species,\n 'ion': ion,\n 'am_channel': am_channel,\n 'pdos_weights': pdos_weights,\n 'kpoints_positions': kpoint_positions,\n 'num_eigenvalues': num_eigenvalues,\n 'pdos_weights': pdos_weights,\n }\n return output\n\ndef reorder_pdos_data(input_items, pymatgen_labels=True, use_string_as_keys=False):\n \"\"\"\n Arrange the PDOS weights so it is more meaningful\n\n The result can be used to compute PDOS for creating CompleteDos object\n that can be used for Pymatgen\n\n Args:\n input_items (dict): A dictionary of the pdos information, use the\n output of `read_pdos` function. \n pymatgen_labels (bool): Use pymatgen Enum as the keys of the result dictionary. \n \n\n Returns:\n A dictionary of {Site_index: {Orbital: {Spin: weight}}}\n \"\"\"\n if pymatgen_labels is True:\n try:\n from pymatgen.electronic_structure.core import Orbital as POrbital\n from pymatgen.electronic_structure.core import Spin as PSpin\n except ImportError:\n pymatgen_labels = False\n\n if pymatgen_labels:\n # Note that s-p labels are inferreed from dot castep output\n # f labels - I know the first three is among the first three.\n # There is no way to tell if they are correct, f_1 is not very informative from VASP....\n orbital_mapping = [[POrbital.s], [POrbital.px, POrbital.py, POrbital.pz],\n [\n POrbital.dz2, POrbital.dyz, POrbital.dxz, POrbital.dx2,\n POrbital.dxy\n ],\n [\n POrbital.f_1, POrbital.f_2, POrbital.f_3, POrbital.f0,\n POrbital.f1, POrbital.f2, POrbital.f3\n ]]\n Spin = PSpin\n else:\n # These are the orders inferred from CASTEP output\n orbital_mapping = [[OrbitalEnum.s], [OrbitalEnum.px, OrbitalEnum.py, OrbitalEnum.pz],\n [\n OrbitalEnum.dz2, OrbitalEnum.dyz, OrbitalEnum.dxz, OrbitalEnum.dx2,\n OrbitalEnum.dxy\n ],\n [\n OrbitalEnum.f_xxx, OrbitalEnum.f_yyy, OrbitalEnum.f_zzz, OrbitalEnum.f_xyz,\n OrbitalEnum.f_z_xx_yy, OrbitalEnum.f_y_zz_xx, OrbitalEnum.f_x_yy_zz\n ]]\n Spin = SpinEnum\n\n # We take average of each kpoints from here\n # One might task why not take account the kpoints weight?\n # because it should be taken account of in the TDOS\n weights = input_items['pdos_weights']\n # Specie index for all orbitals\n species = input_items['species']\n # Index of each ion for all orbitals\n ion = input_items['ion']\n num_spins = input_items['num_spins']\n # Angular momentum channel all orbitals\n am_channel = input_items['am_channel']\n\n unique_speices = np.unique(species)\n unique_speices.sort()\n site_index = 0\n output_data = {}\n # Initialise storage space\n for specie in unique_speices:\n specie_mask = specie == species\n # Total number of ions for this specie\n total_ions = ion[specie_mask].max()\n # Note that indice are from one, not zero\n for nion in range(1, total_ions + 1):\n # Iterate through each ion\n ion_mask = (ion == nion) & specie_mask\n max_am = am_channel[ion_mask].max()\n site_dict = {} # {Orbital: {Spin: weight}...}\n for am in range(max_am + 1):\n # Collect the angular momentum channels\n ion_am_mask = (am_channel == am) & ion_mask\n # Indices of each matched channels\n ion_am_idx = np.where(ion_am_mask)[0]\n for iam, iloc in enumerate(ion_am_idx):\n # iloc - index of the oribtal\n # You can have 4 orbitals for p channel - they have difference n numbers\n this_orb = orbital_mapping[am][iam % (2 * am + 1)]\n orb_dict = {} # {Spin: weight...}\n if num_spins == 2:\n for ispin, espin in enumerate((Spin.up, Spin.down)):\n # Sumup\n wtmp = weights[iloc, :, :, ispin]\n orb_dict[espin] = wtmp\n else:\n orb_dict[Spin.up] = weights[iloc, :, :, 0]\n\n # Now we have the orb_dict populated\n # Combined the weights if this orbital has been seen...\n if this_orb in site_dict:\n site_dict[this_orb] = _merge_weights(\n site_dict[this_orb], orb_dict)\n else:\n site_dict[this_orb] = orb_dict\n # Now we populated site_dict add it to output_data\n output_data[site_index] = site_dict\n site_index += 1\n\n return output_data\n\n\ndef compute_pdos(pdos_bin, eigenvalues, kpoints_weights, bins):\n \"\"\"\n Compute the PDOS from eigenvalue and kpoint weights\n \n Args:\n pdos_bin (str): Path to the binary pdos_bin file\n eigenvealues (str): Eigenvalue as {Spin: array_)}.\n kpoints_weights (np.ndarray): Weights of each kpoints.\n bins: The bins for computing the density of states.\n \"\"\"\n\n # Walk through the ordred_weights dictionary and compute PDOS for each weight\n ordered_weights = reorder_pdos_data(read_pdos_bin(pdos_bin))\n pdos_data = {}\n for site, porbs_dict in ordered_weights.items():\n porbs_outdict = {}\n for orb, pspin_dict in porbs_dict.items():\n pdos_orbit = {\n spin: np.histogram(\n eigenvalue_set,\n bins=bins,\n weights=kpoints_weights * pspin_dict[\n spin] # weight (nk, ); pspin_dict[spin] (nk, nb)\n )[0]\n for spin, eigenvalue_set in eigenvalues.items()\n }\n porbs_outdict[orb] = pdos_orbit\n pdos_data[site] = porbs_outdict\n return pdos_data\n\n\ndef _merge_weights(spin_d1, spin_d2):\n \"\"\"Sum the weights stored in two dictionaries with keys being the spins\"\"\"\n if len(spin_d1) != len(spin_d2):\n raise RuntimeError(\"Critical - mismatch spin-dict length\")\n out = {}\n for spin in spin_d1:\n out[spin] = spin_d1[spin] + spin_d2[spin]\n return out\n" ]
[ [ "numpy.zeros", "numpy.histogram", "numpy.dtype", "numpy.where", "numpy.unique" ] ]
TwinMooon/transformers-plus-performers
[ "c17d6473deb5316363f60bb2ddd1007d4364abe4" ]
[ "src/transformers/modeling_tf_performer_attention.py" ]
[ "from typing import Optional, Union\nimport logging\nimport numpy as np\nimport tensorflow as tf\n\n\nfrom .configuration_performer_attention import PerformerAttentionConfig\nfrom .modeling_utils import (\n find_pruneable_heads_and_indices,\n prune_linear_layer\n)\n\nKERNEL_CALLABLES = {\n 'cosh': lambda x, h: tf.concat((tf.exp(h + x), tf.exp(h - x)), dim=-1),\n 'exp': lambda x, h: tf.exp(h + x), # Default\n 'elu': lambda x: tf.nn.elu(x) + 1,\n 'relu': tf.nn.relu\n}\n\nSHORT_SEQUENCE_BEHAVIOR_CALLABLES = {\n 'use_softmax_eval_only': lambda L, M, training: False if training else L < 2.0 * M,\n 'use_softmax_eval_and_train': lambda L, M, training: L < 2.0 * M, \n 'never_use_softmax': lambda L, M, training: False\n}\n\n\nclass TFPerformerAttention(tf.keras.layers.Layer):\n def __init__(self, config: Optional[Union[dict, PerformerAttentionConfig]] = None, **kwargs):\n super().__init__()\n \n if config is not None:\n # config can either be a dictionary or a PerformerAttentionConfig object\n if not isinstance(config, dict):\n config = config.__dict__\n \n # Just copy over all the parameters\n self.__dict__.update(config)\n else:\n # Make sure we have all the default values filled in\n config = PerformerAttentionConfig(**kwargs)\n kwargs = config.__dict__\n \n # kwargs take precedence over the default values that might be stored in the config object\n self.__dict__.update(kwargs)\n \n if self.num_heads is None or self.d_model is None:\n raise ValueError(\"PerformerAttention: num_heads and d_model must be non-None\")\n \n self.dropout = tf.keras.layers.Dropout(rate=self.attention_dropout)\n self.calls_since_last_redraw = 0\n self.random_features = None\n \n behavior = self.short_sequence_behavior\n if not behavior:\n behavior = 'never_use_softmax' if self.kernel_type == 'relu' else 'use_softmax_eval_only'\n self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]\n \n elif self.kernel_type == 'relu' and behavior != 'never_use_softmax':\n raise ValueError(f\"PerformerAttention: short_sequence_behavior = {behavior} cannot be combined with the relu \"\n \"kernel type\")\n \n elif isinstance(behavior, str):\n self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]\n elif callable(behavior):\n self.should_fallback_to_softmax = behavior\n else:\n raise ValueError(\"PerformerAttention: short_sequence_behavior must be either str or Callable\")\n \n self.kernel_fn = KERNEL_CALLABLES[self.kernel_type]\n\n assert self.d_model % self.num_heads == 0\n \n if self.use_qkv_linear_layers:\n self.q_lin = tf.keras.layers.Dense(units=self.d_model)\n self.k_lin = tf.keras.layers.Dense(units=self.d_model)\n self.v_lin = tf.keras.layers.Dense(units=self.d_model)\n \n self.out_lin = tf.keras.layers.Dense(units=self.d_model)\n\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n attention_head_size = self.d_model // self.num_heads\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)\n # Prune linear layers\n if self.use_qkv_linear_layers:\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n \n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.num_heads = self.num_heads - len(heads)\n self.d_model = attention_head_size * self.num_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n \n def redraw_features_now(self):\n self._generate_feature_matrix()\n \n if self.training and self.redraw_verbose:\n logging.info(\"PerformerAttention: Just redrew random features.\")\n \n self.calls_since_last_redraw = 0\n\n def call(self, query, key, value, mask=None, head_mask=None, output_attentions=False):\n \"\"\"\n Parameters:\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\n Returns:\n weights: tf.tensor(bs, num_heads, seq_length, seq_length) Attention weights context: tf.tensor(bs,\n seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`\n \"\"\"\n bs, q_length, dim = query.size()\n k_length = key.size(1)\n # assert dim == self.d_model, 'Dimensions do not match: %s input vs %s configured' % (dim, self.d_model)\n # assert key.size() == value.size()\n\n dim_per_head = self.d_model // self.num_heads\n mask_reshp = (bs, 1, 1, k_length)\n\n def shape(x):\n \"\"\" separate heads \"\"\"\n new_shape = tf.concat((x.shape[:-1], tf.constant([self.num_heads, dim_per_head])), axis=0)\n return tf.transpose(tf.reshape(x, new_shape), perm=[0, 2, 1, 3])\n \n if self.use_qkv_linear_layers:\n q = self.q_lin(query)\n k = self.k_lin(key)\n v = self.v_lin(value)\n else:\n q, k, v = query, key, value\n \n # (bs, num_heads, q_length, dim_per_head)\n q, k, v = (shape(x) for x in (q, k, v))\n \n # If the sequence length is short enough that FAVOR+ would use considerably more time and/or memory than just\n # using softmax attention, use softmax. This works because FAVOR+ is an unbiased estimator of softmax attention.\n m = round(dim_per_head * np.log(dim_per_head)) # m is the number of random features\n if self.should_fallback_to_softmax(q_length, m, self.training):\n scores = q @ tf.linalg.matrix_transpose(k) / (dim ** 0.5)\n \n if mask is not None:\n mask = tf.reshape((mask == 0), mask_reshp) # .expand_as(scores) # (bs, num_heads, q_length, k_length)\n scores -= 1e9 * tf.cast(mask, q.dtype) # (bs, num_heads, q_length, k_length)\n \n attn_map = tf.nn.softmax(scores, dim=-1)\n attn_map = self.dropout(attn_map) # (bs, num_heads, q_length, k_length)\n return self._finalize_attention_output(attn_map @ v, head_mask, attn_map)\n \n # When we're using FAVOR+ we can't output the attention matrix\n if output_attentions:\n raise ValueError(\"TFPerformerAttention: Can't output attention maps when using FAVOR+ linear attention.\")\n \n self._redraw_features_if_needed()\n \n # Get the transformed values of Q and K\n q_prime, k_prime = self.get_projected_queries_and_keys(q, k)\n return self.compute_attention_with_projected_queries_and_keys(q_prime, k_prime, v, mask, head_mask)\n \n # Turns Q into Q', K into K'\n def get_projected_queries_and_keys(self, q, k):\n # Broadcast the feature matrix across the batch dimension\n # new_shape = list(q.shape)\n # new_shape[-2] = self.random_features.shape[-2]\n W_t = tf.linalg.matrix_transpose(self.random_features) # .expand(new_shape)\n \n # Instead of dividing the product QK^T by sqrt(d), we divide Q and K by the 4th root of d.\n q = q / (self.d_model ** 0.25)\n k = k / (self.d_model ** 0.25)\n \n projected_q = q @ W_t\n projected_k = k @ W_t\n \n # Special logic for kernels that attempt to approximate softmax\n if self.kernel_type in ('cosh', 'exp'):\n # The h(x) function is defined in Lemma 1 in Choromanski et al. pg. 4 as exp(-||x||**2 / 2). For numerical\n # stability we leverage the fact that exp(x)*exp(y) = exp(x + y) here and delay computing the exp().\n h_of_q = -tf.reduce_sum(q ** 2, dim=-1, keepdim=True) / 2\n h_of_k = -tf.reduce_sum(k ** 2, dim=-1, keepdim=True) / 2\n \n # Compute the numerical stabilizer that we subtract from the input to exp(). For some reason the original\n # Jax implementation uses different types of stabilizers for queries vs. keys, and we follow that here.\n q_stabilizer = tf.math.reduce_max(h_of_q, axis=-1, keepdims=True)\n \n # This is just a scalar\n k_stabilizer = tf.math.reduce_max(h_of_k)\n \n q_kernel_output = self.kernel_fn(projected_q - q_stabilizer, h_of_q)\n k_kernel_output = self.kernel_fn(projected_k - k_stabilizer, h_of_k)\n \n # By multiplying by 1/sqrt(m), we ensure the final matrix product will contain a factor of 1/m. This means\n # each row of Q'K'^T can be interpreted as an average over the exp(omega^T * q) * exp(omega^T * k) terms.\n normalizing_constant = (q_kernel_output.shape[-1] ** -0.5)\n \n q_prime = normalizing_constant * (q_kernel_output + self.kernel_epsilon)\n k_prime = normalizing_constant * (k_kernel_output + self.kernel_epsilon)\n return q_prime, k_prime\n \n # Generalized attention (ReLU, ELU...)\n else:\n return (self.kernel_fn(x) + self.kernel_epsilon for x in (projected_q, projected_k))\n \n def compute_attention_with_projected_queries_and_keys(self, q_prime, k_prime, v, mask = None, head_mask = None):\n # Apply the padding mask to K'. Also applying it to Q' would be redundant.\n if mask is not None:\n k_prime *= tf.expand_dims(tf.expand_dims(mask, 1), -1)#.expand_as(k_prime)\n \n k_prime_t = tf.linalg.matrix_transpose(k_prime)\n output = q_prime @ (k_prime_t @ v)\n \n # Ensure that the output vectors are convex combinations of input vectors; that is,\n # the implied attention scores sum to 1\n if self.normalize_output: \n # Equivalent to multiplying K'^T by a ones vector\n d = q_prime @ tf.expand_dims(tf.math.reduce_sum(k_prime), -1)\n \n # Avoid dividing by very small numbers\n d += 2 * self.normalization_stabilizer * (tf.abs(d) <= self.normalization_stabilizer)\n output /= d\n \n return self._finalize_attention_output(output, head_mask)\n \n def _finalize_attention_output(self, context, head_mask=None, att_map_to_output=None):\n def unshape(x):\n \"\"\" group heads \"\"\"\n x = tf.transpose(context, perm=[0, 2, 1, 3]) # [...seq_len, num_heads, dim_per_head]\n new_last_dim = tf.constant(x.shape[-2] * x.shape[-1]) # Multiply num_heads * dim_per_head\n return tf.reshape(x, tf.concat((x.shape[:-2], new_last_dim), axis=0))\n \n # Mask heads if we want to\n if head_mask is not None:\n context = context * head_mask\n \n context = unshape(context) # (bs, q_length, dim)\n context = self.out_lin(context) # (bs, q_length, dim)\n\n if att_map_to_output:\n return context, att_map_to_output\n else:\n return context,\n\n def _generate_feature_matrix(self):\n dim_per_head = self.d_model // self.num_heads\n num_rows = round(dim_per_head * np.log(dim_per_head))\n \n if not self.use_orthogonal_features:\n return tf.random.normal((num_rows, dim_per_head))\n \n def get_square_block(size):\n with tf.device('/CPU:0'):\n unstructured_block = tf.random.normal((size, size))\n orthog, r = tf.linalg.qr(unstructured_block)\n\n return orthog.t()\n\n num_full_blocks = num_rows // dim_per_head\n block_list = [get_square_block(dim_per_head) for _ in range(num_full_blocks)]\n \n remaining_rows = num_rows - num_full_blocks * dim_per_head\n if remaining_rows > 0:\n q = get_square_block(dim_per_head)\n block_list.append(q[:remaining_rows])\n \n final_matrix = tf.concat(block_list)\n \n # This option yields SMREG\n if self.regularize_feature_norms:\n final_matrix *= dim_per_head ** 0.5\n else:\n # Hack to make the matrix columns have the norm we would expect them to have if they were sampled straight\n # from a Gaussian, instead of being all norm 1 since they went through QR decomposition\n multiplier = tf.random.normal((num_rows, dim_per_head)).norm(dim = 1)\n final_matrix = tf.linalg.diag(multiplier) @ final_matrix\n\n self.random_features = final_matrix\n \n def _redraw_features_if_needed(self):\n # We haven't created the projection matrix yet, let's create it\n if self.random_features is None:\n self._generate_feature_matrix()\n \n elif self.feature_redraw_interval is not None:\n if self.redraw_stochastically:\n # Flip a (very biased) coin\n if np.random.default_rng().binomial(1, 1. / self.feature_redraw_interval):\n self.redraw_features_now()\n \n # It's time to redraw the projection matrix\n elif self.calls_since_last_redraw >= self.feature_redraw_interval:\n self.redraw_features_now()\n \n # Keep track of how many forward passes we do before we redraw again\n else:\n self.calls_since_last_redraw += 1\n" ]
[ [ "tensorflow.linalg.diag", "tensorflow.reshape", "tensorflow.abs", "numpy.log", "tensorflow.concat", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.device", "tensorflow.keras.layers.Dense", "tensorflow.constant", "tensorflow.transpose", "tensorflow.keras.layers.Dropout", "tensorflow.math.reduce_max", "tensorflow.expand_dims", "tensorflow.nn.elu", "tensorflow.cast", "tensorflow.linalg.matrix_transpose", "tensorflow.math.reduce_sum", "numpy.random.default_rng", "tensorflow.exp", "tensorflow.random.normal", "tensorflow.linalg.qr" ] ]
ChaplinMarchais/cortana-intelligence-product-detection-from-images
[ "2e5370098f9f83cd27cdaba2eab675f3c30ae157" ]
[ "technical_deployment/train_model/imdb_data.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nfrom __future__ import print_function\nfrom builtins import range\nimport sys, os\nfrom helpers import *\nimport scipy.sparse\nimport scipy.io as sio\nimport pickle as cp\nimport numpy as np\nimport fastRCNN\n\n\nclass imdb_data(fastRCNN.imdb):\n def __init__(self, image_set, classes, maxNrRois, imgDir, roiDir, cacheDir, boAddGroundTruthRois):\n fastRCNN.imdb.__init__(self, image_set + \".cache\") #'data_' + image_set)\n self._image_set = image_set\n self._maxNrRois = maxNrRois\n self._imgDir = imgDir\n self._roiDir = roiDir\n self._cacheDir = cacheDir #cache_path\n self._imgSubdirs ={'train': ['positive', 'negative'], 'test': ['testImages']}\n self._classes = classes\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index, self._image_subdirs = self._load_image_set_index()\n self._roidb_handler = self.selective_search_roidb\n self._boAddGroundTruthRois = boAddGroundTruthRois\n\n\n #overwrite parent definition\n @property\n def cache_path(self):\n return self._cacheDir\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_subdirs[i], self._image_index[i])\n\n def image_path_from_index(self, subdir, fname):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._imgDir, subdir, fname)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Compile list of image indices and the subdirectories they are in.\n \"\"\"\n image_index = []\n image_subdirs = []\n for subdir in self._imgSubdirs[self._image_set]:\n imgFilenames = getFilesInDirectory(os.path.join(self._imgDir,subdir), self._image_ext)\n image_index += imgFilenames\n image_subdirs += [subdir] * len(imgFilenames)\n return image_index, image_subdirs\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cp.load(fid)\n print ('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_annotation(i) for i in range(self.num_images)]\n with open(cache_file, 'wb') as fid:\n cp.dump(gt_roidb, fid, cp.HIGHEST_PROTOCOL)\n print ('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n if sys.version_info[0] < 3: \n roidb = cp.load(fid)\n else: \n roidb = cp.load(fid, encoding='latin1')\n print ('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n\n #add ground truth ROIs\n if self._boAddGroundTruthRois:\n roidb = self.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = ss_roidb\n\n #Keep max of e.g. 2000 rois\n if self._maxNrRois and self._maxNrRois > 0:\n print (\"Only keeping the first %d ROIs..\" % self._maxNrRois)\n for i in range(self.num_images):\n gt_overlaps = roidb[i]['gt_overlaps']\n gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]\n gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)\n roidb[i]['gt_overlaps'] = gt_overlaps\n roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois,:]\n roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]\n\n with open(cache_file, 'wb') as fid:\n cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)\n print ('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def _load_selective_search_roidb(self, gt_roidb):\n # box_list = nrImages x nrBoxes x 4\n box_list = []\n for imgFilename, subdir in zip(self._image_index, self._image_subdirs):\n roiPath = \"{}/{}/{}.roi.txt\".format(self._roiDir, subdir, imgFilename[:-4])\n assert os.path.exists(roiPath), \"Error: rois file not found: \" + roiPath\n rois = np.loadtxt(roiPath, np.int32)\n box_list.append(rois)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_annotation(self, imgIndex):\n \"\"\"\n Load image and bounding boxes info from human annotations.\n\t\t\"\"\"\n #negative images do not have any ground truth annotations\n if self._image_subdirs[imgIndex].lower() == \"negative\":\n return None\n\n imgPath = self.image_path_at(imgIndex)\n bboxesPaths = imgPath[:-4] + \".bboxes.tsv\"\n labelsPaths = imgPath[:-4] + \".bboxes.labels.tsv\"\n assert os.path.exists(bboxesPaths), \"Error: ground truth bounding boxes file not found: \" + bboxesPaths\n assert os.path.exists(labelsPaths), \"Error: ground truth labels file not found: \" + bboxesPaths\n bboxes = np.loadtxt(bboxesPaths, np.float32)\n labels = readFile(labelsPaths)\n\n # in case there's only one annotation and numpy read the array as single array,\n # we need to make sure the input is treated as a multi dimensional array instead of a list/ 1D array\n #if len(bboxes.shape) == 1:\n if len(bboxes)>0 and type(bboxes[0]) == np.float32:\n bboxes = np.array([bboxes])\n\n #remove boxes marked as 'undecided' or 'exclude'\n indicesToKeep = find(labels, lambda x: x!='EXCLUDE' and x!='UNDECIDED')\n bboxes = [bboxes[i] for i in indicesToKeep]\n labels = [labels[i] for i in indicesToKeep]\n\n # Load object bounding boxes into a data frame.\n num_objs = len(bboxes)\n boxes = np.zeros((num_objs,4), dtype=np.uint16)\n gt_classes = np.zeros(num_objs, dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n for bboxIndex,(bbox,label) in enumerate(zip(bboxes,labels)):\n cls = self._class_to_ind[label] #.decode('utf-8')]\n boxes[bboxIndex, :] = bbox\n gt_classes[bboxIndex] = cls\n overlaps[bboxIndex, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps' : overlaps,\n 'flipped' : False}\n\n # main call to compute per-calass average precision\n # shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score\n # (see also test_net() in fastRCNN\\test.py)\n def evaluate_detections(self, all_boxes, output_dir, use_07_metric=False, overlapThreshold = 0.5):\n aps = []\n for classIndex, className in enumerate(self._classes):\n if className != '__background__':\n rec, prec, ap = self._evaluate_detections(classIndex, all_boxes, use_07_metric, overlapThreshold)\n aps += [[className,ap]]\n print('AP for {:>15} = {:.4f}'.format(className, ap))\n print('Mean AP = {:.4f}'.format(np.nanmean(getColumn(aps,1))))\n return aps\n\n def _evaluate_detections(self, classIndex, all_boxes, use_07_metric = False, overlapThreshold = 0.5):\n \"\"\"\n Top level function that does the PASCAL VOC evaluation.\n\n [overlapThreshold]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False)\n \"\"\"\n assert (len(all_boxes) == self.num_classes)\n assert (len(all_boxes[0]) == self.num_images)\n\n # load ground truth annotations for this class\n gtInfos = []\n for imgIndex in range(self.num_images):\n imgPath = self.image_path_at(imgIndex)\n imgSubir = os.path.normpath(imgPath).split(os.path.sep)[-2]\n if imgSubir != 'negative':\n gtBoxes, gtLabels = readGtAnnotation(imgPath)\n gtBoxes = [box for box, label in zip(gtBoxes, gtLabels) if label == self.classes[classIndex]] #.decode('utf-8')\n else:\n gtBoxes = []\n gtInfos.append({'bbox': np.array(gtBoxes),\n 'difficult': [False] * len(gtBoxes),\n 'det': [False] * len(gtBoxes)})\n\n # parse detections for this class\n # shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score\n detBboxes = []\n detImgIndices = []\n detConfidences = []\n for imgIndex in range(self.num_images):\n dets = all_boxes[classIndex][imgIndex]\n if dets != []:\n for k in range(dets.shape[0]):\n detImgIndices.append(imgIndex)\n detConfidences.append(dets[k, -1])\n # the VOCdevkit expects 1-based indices\n detBboxes.append([dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1])\n detBboxes = np.array(detBboxes)\n detConfidences = np.array(detConfidences)\n\n # debug: visualize GT and detections\n # if classIndex == 15: # and imgPath.endswith(\"WIN_20160803_11_42_36_Pro.jpg\"):\n # imgIndex = 6\n # imgPath = self.image_path_at(imgIndex)\n # img = imread(imgPath)\n # tmp_gtBoxes = gtInfos[imgIndex]['bbox']\n # inds = np.where(np.array(detImgIndices) == 1)[0]\n # tmp_detBoxes = detBboxes[inds]\n # print(detConfidences[inds])\n # drawRectangles(img, tmp_gtBoxes, color = (255, 0, 0)) #thickness=thickness)\n # drawRectangles(img, tmp_detBoxes, color= (0, 255, 0)) # thickness=thickness)\n # imshow(img, maxDim=800)\n\n # compute precision / recall / ap\n rec, prec, ap = self._voc_computePrecisionRecallAp(\n class_recs=gtInfos,\n confidence=detConfidences,\n image_ids=detImgIndices,\n BB=detBboxes,\n ovthresh=overlapThreshold,\n use_07_metric=use_07_metric)\n\n return rec, prec, ap\n\n\n #########################################################################\n # Python evaluation functions (copied/refactored from faster-RCNN)\n ##########################################################################\n def _voc_computePrecisionRecallAp(self, class_recs, confidence, image_ids, BB, ovthresh=0.5, use_07_metric=False):\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n npos = sum([len(cr['bbox']) for cr in class_recs])\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = computeAveragePrecision(rec, prec, use_07_metric)\n return rec, prec, ap" ]
[ [ "numpy.cumsum", "numpy.zeros", "numpy.maximum", "numpy.argsort", "numpy.argmax", "numpy.max", "numpy.array", "numpy.finfo", "numpy.loadtxt", "numpy.minimum" ] ]
disktnk/chainer-compiler
[ "5cfd027b40ea6e4abf73eb42be70b4fba74d1cde" ]
[ "ch2o/tests/syntax/MultiFunction.py" ]
[ "# coding: utf-8\n\nimport chainer\nimport chainer.links as L\n\n# Network definition\n\n\nclass A(chainer.Chain):\n\n def __init__(self):\n super(A, self).__init__()\n with self.init_scope():\n self.l0 = L.Linear(7)\n self.l1 = L.Linear(5)\n\n def g(self, y):\n return self.l1(y)\n\n def forward(sl, x):\n x1 = sl.l0(x)\n x2 = sl.g(x1)\n return x2\n\n\n# ======================================\n\nimport ch2o\n\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(314)\n\n model = A()\n\n v = np.random.rand(10, 20).astype(np.float32)\n ch2o.generate_testcase(model, [v])\n" ]
[ [ "numpy.random.seed", "numpy.random.rand" ] ]
888dahong888/open3dTest
[ "cf28df9f9f5d24b1ca614414804a1c18d349467c" ]
[ "test01.py" ]
[ "#读写点云,网格,图片文件\n\nimport numpy as np\nimport open3d as o3d\npcd=o3d.io.read_point_cloud(\"data/rs1.pcd\")\n\nprint(pcd) #打印点云数量\n\n#可视化一下\no3d.visualization.draw_geometries([pcd])\n \n#下采样\ndownpcd = pcd.voxel_down_sample(voxel_size=0.05)\no3d.visualization.draw_geometries([downpcd])\n \n#计算法向量\ndownpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\no3d.visualization.draw_geometries([downpcd])\n \n#原来这样获取\nprint(\"Print a normal vector of the 0th point\")\nprint(downpcd.normals[0])\nprint(\"Print the normal vectors of the first 10 points\")\nprint(np.asarray(downpcd.normals)[:10, :])\n\no3d.io.write_point_cloud(\"data/copy_rs1.pcd\",pcd)\n\n#打印网格\nmesh=o3d.io.read_triangle_mesh(\"data/Box.stl\")\no3d.visualization.draw_geometries([mesh])\nprint(mesh)\no3d.io.write_triangle_mesh(\"data/copy_box.stl\",mesh)\n\n#读写图像\nimg=o3d.io.read_image('data/image.jpg')\nprint(img)\no3d.io.write_image(\"data/copy_img.jpg\",img)\n" ]
[ [ "numpy.asarray" ] ]
INK-USC/shifted-label-distribution
[ "3cf2b7ced3b2e18234db405f6014f049c4830d71" ]
[ "NeuralATT/train.py" ]
[ "'''\nTraining script with ramdom splitting dev set\n'''\n__author__ = 'Maosen'\nimport torch\nfrom model import Model, Wrapper\nimport utils\nfrom utils import Dataset\nimport argparse\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nimport os\nimport random\n\ntorch.backends.cudnn.deterministic = True\n\n\ndef train(args):\n\t# Training\n\tlogging.info(str(args))\n\n\tmodel = Model(args, device, rel2id, emb_matrix)\n\twrapper = Wrapper(model, args, device, train_dset.rel2id)\n\n\tmax_dev_f1 = 0.0\n\ttest_result_on_max_dev_f1 = (0.0, 0.0, 0.0)\n\n\tfor iter in range(niter):\n\t\t# print('Iteration %d:' % iter)\n\t\tloss = 0.0\n\t\tfor idx, batch in enumerate(tqdm(train_dset.batched_data)):\n\t\t\tscope = train_dset.batched_scope[idx]\n\t\t\tloss_batch = wrapper.update(batch, scope)\n\t\t\tloss += loss_batch\n\t\tloss /= len(train_dset.batched_data)\n\n\t\tvalid_loss, (dev_prec, dev_recall, dev_f1), _, _, _ = wrapper.eval(dev_dset)\n\t\tlogging.info('Iteration %d, Train loss %f' % (iter, loss))\n\t\tlogging.info(\n\t\t\t'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdev_f1))\n\t\ttest_loss, (test_prec, test_recall, test_f1), _, _, _ = wrapper.eval(test_dset)\n\t\tlogging.info(\n\t\t\t'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t test_f1))\n\t\tif dev_f1 > max_dev_f1:\n\t\t\tmax_dev_f1 = dev_f1\n\t\t\ttest_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)\n\t\t\tsave_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))\n\t\t\twrapper.save(save_filename, iter)\n\n\t\twrapper.update_lr(valid_loss)\n\n\tlogging.info('Max dev F1: %f' % max_dev_f1)\n\ttest_p, test_r, test_f1 = test_result_on_max_dev_f1\n\tlogging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))\n\tlogging.info('\\n')\n\n\treturn max_dev_f1, test_result_on_max_dev_f1\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--data_dir', type=str, default='data/neural_att/KBP')\n\tparser.add_argument('--vocab_dir', type=str, default='data/neural/vocab')\n\n\tparser.add_argument('--encoder', type=str, default='pcnn', help='Model')\n\n\tparser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')\n\tparser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')\n\tparser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')\n\tparser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')\n\tparser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')\n\n\tparser.add_argument('--hidden', type=int, default=230, help='RNN hidden state size.')\n\tparser.add_argument('--window_size', type=int, default=3, help='Convolution window size')\n\tparser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')\n\n\tparser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.')\n\tparser.set_defaults(bidirectional=True)\n\n\t# Data Loading & Pre-processing\n\tparser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')\n\tparser.add_argument('--no-lower', dest='lower', action='store_false')\n\tparser.set_defaults(lower=True)\n\tparser.add_argument('--batch_size', type=int, default=64)\n\n\t# Optimization\n\tparser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')\n\tparser.add_argument('--lr_decay', type=float, default=0.9)\n\tparser.add_argument('--num_epoch', type=int, default=30)\n\tparser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')\n\n\t# Optimization - Dropout\n\tparser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')\n\tparser.add_argument('--in_drop', type=float, default=0.5, help='Input dropout rate.')\n\tparser.add_argument('--intra_drop', type=float, default=0.3, help='Intra-layer dropout rate.')\n\tparser.add_argument('--out_drop', type=float, default=0.7, help='Output dropout rate.')\n\n\t# Other options\n\tparser.add_argument('--seed', type=int, default=7698)\n\tparser.add_argument('--repeat', type=int, default=5)\n\tparser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')\n\tparser.add_argument('--info', type=str, default='KBP_default_ATT', help='Optional info for the experiment.')\n\n\targs = parser.parse_args()\n\n\trandom.seed(args.seed)\n\tnp.random.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\n\tlogger = logging.getLogger()\n\tlogger.setLevel(logging.INFO)\n\n\t# Load vocab file (id2word)\n\twith open(args.vocab_dir + '/vocab.pkl', 'rb') as f:\n\t\tvocab = pickle.load(f)\n\tword2id = {}\n\tfor idx, word in enumerate(vocab):\n\t\tword2id[word] = idx\n\n\t# Load word embedding\n\temb_file = args.vocab_dir + '/embedding.npy'\n\temb_matrix = np.load(emb_file)\n\tassert emb_matrix.shape[0] == len(vocab)\n\tassert emb_matrix.shape[1] == args.emb_dim\n\targs.vocab_size = len(vocab)\n\tniter = args.num_epoch\n\n\tdevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\tprint('Using device: %s' % device.type)\n\n\tprint('Reading data......')\n\trel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)\n\ttrain_filename = '%s/train.json' % args.data_dir\n\ttest_filename = '%s/test.json' % args.data_dir\n\tdev_filename = '%s/dev.json' % args.data_dir\n\ttrain_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, use_bag=True)\n\ttest_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, use_bag=False)\n\tdev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, use_bag=False)\n\n\tif not os.path.isdir(args.save_dir):\n\t\tos.makedirs(args.save_dir)\n\n\tfor runid in range(1, args.repeat + 1):\n\t\tlogging.info('Run model %d times......' % runid)\n\t\tdev_f1, test_result = train(args)\n\t\tlogging.info('')\n" ]
[ [ "numpy.load", "torch.manual_seed", "torch.cuda.is_available", "numpy.random.seed" ] ]
xiaoMrzhang/mmdetection3d
[ "1e7695297e60afe3e09834de1582c3437086ed49" ]
[ "mmdet3d/models/backbones/second_ran.py" ]
[ "from mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import load_checkpoint, force_fp32\nfrom torch import nn as nn\nimport torch\nimport numpy as np\n\nfrom mmdet.models import BACKBONES\nfrom mmdet3d.utils.soft_mask import SoftMask\n\[email protected]_module()\nclass SECOND_RAN(nn.Module):\n \"\"\"Backbone network for SECOND with residual attention network\n\n Args:\n in_channels (int): Input channels.\n out_channels (list[int]): Output channels for multi-scale feature maps.\n layer_nums (list[int]): Number of layers in each stage.\n layer_strides (list[int]): Strides of each stage.\n norm_cfg (dict): Config dict of normalization layers.\n conv_cfg (dict): Config dict of convolutional layers.\n \"\"\"\n\n def __init__(self,\n in_channels=128,\n out_channels=[128, 128, 256],\n layer_nums=[3, 5, 5],\n layer_strides=[2, 2, 2],\n norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),\n conv_cfg=dict(type='Conv2d', bias=False)):\n super(SECOND_RAN, self).__init__()\n assert len(layer_strides) == len(layer_nums)\n assert len(out_channels) == len(layer_nums)\n\n in_filters = [in_channels, *out_channels[:-1]]\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n blocks = []\n for i, layer_num in enumerate(layer_nums):\n block = [\n build_conv_layer(\n conv_cfg,\n in_filters[i],\n out_channels[i],\n 3,\n stride=layer_strides[i],\n padding=1),\n build_norm_layer(norm_cfg, out_channels[i])[1],\n nn.ReLU(inplace=True),\n ]\n for j in range(layer_num):\n block.append(\n build_conv_layer(\n conv_cfg,\n out_channels[i],\n out_channels[i],\n 3,\n padding=1))\n block.append(build_norm_layer(norm_cfg, out_channels[i])[1])\n block.append(nn.ReLU(inplace=True))\n\n block = nn.Sequential(*block)\n blocks.append(block)\n\n self.blocks = nn.ModuleList(blocks)\n\n first_layer_conv = build_conv_layer(\n conv_cfg,\n in_filters[0],\n out_channels[0],\n 3,\n stride=2,\n padding=1)\n first_bn = build_norm_layer(norm_cfg, out_channels[0])[1]\n first_relu = nn.ReLU(inplace=True)\n soft_mask = SoftMask(in_channels, [128, 128, 128], out_type=4)\n self.soft_mask_block = nn.Sequential(first_layer_conv, first_bn, first_relu, soft_mask)\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize weights of the 2D backbone.\"\"\"\n # Do not initialize the conv layers\n # to follow the original implementation\n if isinstance(pretrained, str):\n from mmdet3d.utils import get_root_logger\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n\n def forward(self, x):\n \"\"\"Forward function.\n\n Args:\n x (torch.Tensor): Input with shape (N, C, H, W).\n\n Returns:\n tuple[torch.Tensor]: Multi-scale features.\n \"\"\"\n masks = self.soft_mask_block(x)\n outs = []\n for i in range(len(self.blocks)):\n x = self.blocks[i](x)\n # x = torch.mul(x, masks[i]) + x\n outs.append(x)\n return tuple([outs, masks])\n\n @force_fp32(apply_to=('prediction'))\n def focal_loss(self, prediction, target):\n loss_dict = dict()\n self.alpha = 2\n self.beta = 4\n\n positive_index = target.eq(1).float()\n negative_index = target.lt(1).float()\n negative_weights = torch.pow(1 - target, self.beta)\n loss = 0.\n # prediction = torch.clamp(prediction, 1e-3, .999)\n positive_loss = torch.log(prediction + 1e-6) \\\n * torch.pow(1 - prediction, self.alpha) * positive_index\n negative_loss = torch.log(1 - prediction + 1e-6) \\\n * torch.pow(prediction, self.alpha) * negative_weights * negative_index\n\n num_positive = positive_index.float().sum()\n positive_loss = positive_loss.sum()\n negative_loss = negative_loss.sum()\n\n if num_positive == 0:\n loss -= negative_loss\n else:\n loss -= (positive_loss + negative_loss) / num_positive\n loss_dict[\"loss_heatmap\"] = loss\n\n # dice loss\n # intersection = (target * prediction).sum(axis=[1,2,3])\n # dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)\n # dice_loss = 1 - torch.mean(dice_score, axis=0)\n # loss_dict[\"loss_dice\"] = dice_loss * 0.2\n # if torch.isnan(loss) or torch.isnan(dice_loss):\n # import pdb;pdb.set_trace()\n\n return loss_dict\n\n @force_fp32(apply_to=('prediction'))\n def loss(self, prediction, target):\n positive_index = target.eq(1).float()\n loss = 0.\n loss_dict = dict()\n\n positive_loss = torch.log(prediction + 1e-6) * positive_index\n negative_loss = torch.log(1 - prediction + 1e-6) * (1 - positive_index)\n num_positive = positive_index.float().sum()\n num_negative = (1 - positive_index).float().sum()\n positive_loss = positive_loss.sum()\n negative_loss = negative_loss.sum()\n\n bec_loss = -(positive_loss / (num_positive+1) + negative_loss / (num_negative+1))\n loss_dict[\"loss_heatmap\"] = bec_loss\n\n # intersection = (target * prediction).sum(axis=[1,2,3])\n # dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)\n # dice_loss = 1 - dice_score.mean()\n # loss_dict[\"loss_dice\"] = dice_loss\n\n return loss_dict\n" ]
[ [ "torch.log", "torch.nn.ModuleList", "torch.nn.Sequential", "torch.nn.ReLU", "torch.pow" ] ]
ZJU-lishuang/mmaction2
[ "ee34d952e792fd1adea2c2e397b29faff68eaec9" ]
[ "tests/test_models/test_recognizers/test_recognizer2d.py" ]
[ "import torch\n\nfrom mmaction.models import build_recognizer\nfrom ..base import generate_recognizer_demo_inputs, get_recognizer_cfg\n\n\ndef test_tsn():\n config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 3, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n\ndef test_tsm():\n config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # test twice sample + 3 crops\n input_shape = (2, 48, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n imgs = demo_inputs['imgs']\n\n config.model.test_cfg = dict(average_clips='prob')\n recognizer = build_recognizer(config.model)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n\ndef test_tpn():\n config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 224, 224)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n assert 'loss_aux' in losses and 'loss_cls' in losses\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n\n # Test forward dummy\n with torch.no_grad():\n _recognizer = build_recognizer(config.model)\n img_list = [img[None, :] for img in imgs]\n if hasattr(_recognizer, 'forward_dummy'):\n _recognizer.forward = _recognizer.forward_dummy\n for one_img in img_list:\n _recognizer(one_img)\n\n\ndef test_tanet():\n config = get_recognizer_cfg(\n 'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')\n config.model['backbone']['pretrained'] = None\n\n recognizer = build_recognizer(config.model)\n\n input_shape = (1, 8, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n\n imgs = demo_inputs['imgs']\n gt_labels = demo_inputs['gt_labels']\n\n losses = recognizer(imgs, gt_labels)\n assert isinstance(losses, dict)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # test twice sample + 3 crops\n input_shape = (2, 48, 3, 32, 32)\n demo_inputs = generate_recognizer_demo_inputs(input_shape)\n imgs = demo_inputs['imgs']\n\n config.model.test_cfg = dict(average_clips='prob')\n recognizer = build_recognizer(config.model)\n\n # Test forward test\n with torch.no_grad():\n img_list = [img[None, :] for img in imgs]\n for one_img in img_list:\n recognizer(one_img, None, return_loss=False)\n\n # Test forward gradcam\n recognizer(imgs, gradcam=True)\n for one_img in img_list:\n recognizer(one_img, gradcam=True)\n" ]
[ [ "torch.no_grad" ] ]
yikir/mmdetection
[ "dfceb61b0252f81b010f550f2acbe46c7dad6ef6" ]
[ "port.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n# n(net) o(oil) h(hang) r(rust) 检测模块\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(current_dir)\n\nfrom mmdet.models import build_detector\nimport mmcv\nimport torch\nimport cv2\nimport time\nimport json\nfrom mmcv.runner import load_checkpoint\nimport PIL.Image as Image\nimport numpy as np\nfrom torchvision.transforms import transforms\nimport pycocotools.mask as maskUtils\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nconfig_file = os.path.join(current_dir, 'configs','config_cascade_rcnn.py')\nweight_file = '/home/kilox/weights/nohr_best.pth'\n# weight_file = '/Weights/verified/oil_detection_v1/oil_best.pth'\n\nclass Object(object):\n def __init__(self):\n self.class_name = \"Unknown\"\n self.trust = 0.0\n self.rank = 0\n \n def to_json(self):\n return json.dumps(self.__dict__)\n\n\nclass Port:\n def __init__(self):\n self.cfg = mmcv.Config.fromfile(config_file)\n # 创建模型 , test_cfg 是rpn rcnn的nms等配置\n self.detector = build_detector(self.cfg.model, train_cfg=None, test_cfg=self.cfg.test_cfg)\n # 加载权重\n load_checkpoint(self.detector, weight_file, map_location='cpu')\n self.detector = self.detector.to('cuda')\n self.detector.eval()\n self.class_names = ('油污','鸟巢','锈蚀','飘挂物')\n \n def process(self, image,save=None):\n \"\"\"\n :param image: PIL.Image 输入图像\n \"\"\"\n np_image = np.asarray(image)\n img, img_meta = self.prepare_single(np_image)\n # forward\n with torch.no_grad():\n # 传入rescale则代表返回的mask是原图的\n result = self.detector.simple_test(img, [img_meta], proposals=None, rescale=True)\n # 将mask 以及bbox画在图上\n img = self.draw_image(np_image, img_meta, result)\n real_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))\n output_file_name = os.path.join(real_time + '.jpg')\n cv2.imwrite(output_file_name, img)\n return False,None,output_file_name\n\n # 将图片添加meta的函数\n def prepare_single(self,img):\n img_info = {'height': img.shape[0], 'width': img.shape[1]}\n img_norm_cfg = self.cfg.img_norm_cfg\n size_divisor = self.cfg.data.test.size_divisor\n \n img, scale_factor = mmcv.imrescale(img, (4014,2400), return_scale=True)\n img_shape = img.shape\n \n img = mmcv.imnormalize(img, img_norm_cfg.mean, img_norm_cfg.std, img_norm_cfg.to_rgb)\n img = mmcv.impad_to_multiple(img, size_divisor)\n pad_shape = img.shape\n _img = transforms.ToTensor()(img).float()\n _img = _img.unsqueeze(0)\n _img_meta = dict(\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flip=False)\n _img = _img.to('cuda')\n return _img, _img_meta,\n\n def draw_image(self,img, meta, result, score_thr=0.9):\n def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):\n num_imgs = tensor.size(0)\n mean = np.array(mean, dtype=np.float32)\n std = np.array(std, dtype=np.float32)\n imgs = []\n for img_id in range(num_imgs):\n img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)\n img = mmcv.imdenormalize(\n img, mean, std, to_bgr=to_rgb).astype(np.uint8)\n imgs.append(np.ascontiguousarray(img))\n return imgs\n \n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n \n h, w, _ = meta['ori_shape']\n img_show = img[:h, :w, :].copy()\n \n bboxes = np.vstack(bbox_result)\n # 画mask\n # # draw segmentation masks\n # if segm_result is not None:\n # segms = mmcv.concat_list(segm_result)\n # inds = np.where(bboxes[:, -1] > score_thr)[0]\n # for i in inds:\n # color_mask = np.random.randint(\n # 0, 256, (1, 3), dtype=np.uint8)\n # mask = maskUtils.decode(segms[i]).astype(np.bool)\n # # todo fix dimension not equal\n # img_check_shape = tuple(img_show.shape[0:2])\n # if mask.shape != img_check_shape:\n # width_diff = mask.shape[1] - img_check_shape[1]\n # if mask.shape[1] < img_check_shape[1]:\n # mask = np.pad(mask, (0, width_diff), mode='constant', constant_values=False)\n # np.insert(mask, False, )\n # else:\n # mask = mask[:, :-width_diff]\n # img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5\n # 画bbox\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n assert bboxes.shape[1] == 5\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n \n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n cv2.rectangle(\n img_show, left_top, right_bottom, (0, 255, 0), thickness=2)\n label_text = self.class_names[\n label] if self.class_names is not None else 'cls {}'.format(label)\n if len(bbox) > 4:\n label_text += '|{:.02f}'.format(bbox[-1])\n cv2.putText(img_show, label_text, (bbox_int[0], bbox_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0))\n \n return img_show\n\ndef test():\n pass\n\n\nif __name__ == '__main__':\n im = Image.open('/home/kilox/3.jpg')\n port = Port()\n print(port.process(im,True))\n" ]
[ [ "numpy.vstack", "numpy.ascontiguousarray", "torch.no_grad", "numpy.asarray", "numpy.array", "numpy.concatenate", "numpy.full" ] ]
balakrishnan273818/AdvancedLaneDetection
[ "c0993aa9422654258a41fe9616ab4e24b29e6a7a" ]
[ "examples/unwanted/example.py" ]
[ "'''\ndef warper(img, src, dst):\n\n # Compute and apply perpective transform\n img_size = (img.shape[1], img.shape[0])\n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image\n\n return warped\n'''\n\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n#%matplotlib qt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('../camera_cal/*.jpg')\n\n# Step through the list and search for chessboard corners\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6),None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9,6), corners, ret)\n cv2.imshow('img',img)\n cv2.waitKey(30)\n\ncv2.destroyAllWindows()" ]
[ [ "numpy.zeros" ] ]
DanielSun94/kgenlu
[ "bbf377c6740040cb1a8b656785e7c5bfdb8371d5" ]
[ "src/test/rnn_test.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nNLP From Scratch: Translation with a Sequence to Sequence Network and Attention\n*******************************************************************************\n**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_\n\nThis is the third and final tutorial on doing \"NLP From Scratch\", where we\nwrite our own classes and functions to preprocess the data to do our NLP\nmodeling tasks. We hope after you complete this tutorial that you'll proceed to\nlearn how `torchtext` can handle much of this preprocessing for you in the\nthree tutorials immediately following this one.\n\nIn this project we will be teaching a neural network to translate from\nFrench to English.\n\n::\n\n [KEY: > input, = target, < output]\n\n > il est en train de peindre un tableau .\n = he is painting a picture .\n < he is painting a picture .\n\n > pourquoi ne pas essayer ce vin delicieux ?\n = why not try that delicious wine ?\n < why not try that delicious wine ?\n\n > elle n est pas poete mais romanciere .\n = she is not a poet but a novelist .\n < she not not a poet but a novelist .\n\n > vous etes trop maigre .\n = you re too skinny .\n < you re all alone .\n\n... to varying degrees of success.\n\nThis is made possible by the simple but powerful idea of the `sequence\nto sequence network <https://arxiv.org/abs/1409.3215>`__, in which two\nrecurrent neural networks work together to transform one sequence to\nanother. An encoder network condenses an input sequence into a vector,\nand a decoder network unfolds that vector into a new sequence.\n\n.. figure:: /_static/img/seq-seq-images/seq2seq.png\n :alt:\n\nTo improve upon this model we'll use an `attention\nmechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder\nlearn to focus over a specific range of the input sequence.\n\n**Recommended Reading:**\n\nI assume you have at least installed PyTorch, know Python, and\nunderstand Tensors:\n\n- https://pytorch.org/ For installation instructions\n- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general\n- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview\n- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user\n\n\nIt would also be useful to know about Sequence to Sequence networks and\nhow they work:\n\n- `Learning Phrase Representations using RNN Encoder-Decoder for\n Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__\n- `Sequence to Sequence Learning with Neural\n Networks <https://arxiv.org/abs/1409.3215>`__\n- `Neural Machine Translation by Jointly Learning to Align and\n Translate <https://arxiv.org/abs/1409.0473>`__\n- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__\n\nYou will also find the previous tutorials on\n:doc:`/intermediate/char_rnn_classification_tutorial`\nand :doc:`/intermediate/char_rnn_generation_tutorial`\nhelpful as those concepts are very similar to the Encoder and Decoder\nmodels, respectively.\n\n**Requirements**\n\"\"\"\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n######################################################################\n# Loading data files\n# ==================\n#\n# The data for this project is a set of many thousands of English to\n# French translation pairs.\n#\n# `This question on Open Data Stack\n# Exchange <https://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__\n# pointed me to the open translation site https://tatoeba.org/ which has\n# downloads available at https://tatoeba.org/eng/downloads - and better\n# yet, someone did the extra work of splitting language pairs into\n# individual text files here: https://www.manythings.org/anki/\n#\n# The English to French pairs are too big to include in the repo, so\n# download to ``data/eng-fra.txt`` before continuing. The file is a tab\n# separated list of translation pairs:\n#\n# ::\n#\n# I am cold. J'ai froid.\n#\n# .. Note::\n# Download the data from\n# `here <https://download.pytorch.org/tutorial/data.zip>`_\n# and extract it to the current directory.\n\n######################################################################\n# Similar to the character encoding used in the character-level RNN\n# tutorials, we will be representing each word in a language as a one-hot\n# vector, or giant vector of zeros except for a single one (at the index\n# of the word). Compared to the dozens of characters that might exist in a\n# language, there are many many more words, so the encoding vector is much\n# larger. We will however cheat a bit and trim the data to only use a few\n# thousand words per language.\n#\n# .. figure:: /_static/img/seq-seq-images/word-encoding.png\n# :alt:\n#\n#\n\n\n######################################################################\n# We'll need a unique index per word to use as the inputs and targets of\n# the networks later. To keep track of all this we will use a helper class\n# called ``Lang`` which has word → index (``word2index``) and index → word\n# (``index2word``) dictionaries, as well as a count of each word\n# ``word2count`` which will be used to replace rare words later.\n#\n\nSOS_token = 0\nEOS_token = 1\n\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\"}\n self.n_words = 2 # Count SOS and EOS\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n######################################################################\n# The files are all in Unicode, to simplify we will turn Unicode\n# characters to ASCII, make everything lowercase, and trim most\n# punctuation.\n#\n\n# Turn a Unicode string to plain ASCII, thanks to\n# https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n# Lowercase, trim, and remove non-letter characters\n\n\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\n######################################################################\n# To read the data file we will split the file into lines, and then split\n# lines into pairs. The files are all English → Other Language, so if we\n# want to translate from Other Language → English I added the ``reverse``\n# flag to reverse the pairs.\n#\n\ndef readLangs(lang1, lang2, reverse=False):\n print(\"Reading lines...\")\n\n # Read the file and split into lines\n lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\\\n read().strip().split('\\n')\n\n # Split every line into pairs and normalize\n pairs = [[normalizeString(s) for s in l.split('\\t')] for l in lines]\n\n # Reverse pairs, make Lang instances\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, pairs\n\n\n######################################################################\n# Since there are a *lot* of example sentences and we want to train\n# something quickly, we'll trim the data set to only relatively short and\n# simple sentences. Here the maximum length is 10 words (that includes\n# ending punctuation) and we're filtering to sentences that translate to\n# the form \"I am\" or \"He is\" etc. (accounting for apostrophes replaced\n# earlier).\n#\n\nMAX_LENGTH = 10\n\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\n\ndef filterPair(p):\n return len(p[0].split(' ')) < MAX_LENGTH and \\\n len(p[1].split(' ')) < MAX_LENGTH and \\\n p[1].startswith(eng_prefixes)\n\n\ndef filterPairs(pairs):\n return [pair for pair in pairs if filterPair(pair)]\n\n\n######################################################################\n# The full process for preparing the data is:\n#\n# - Read text file and split into lines, split lines into pairs\n# - Normalize text, filter by length and content\n# - Make word lists from sentences in pairs\n#\n\ndef prepareData(lang1, lang2, reverse=False):\n input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)\n print(\"Read %s sentence pairs\" % len(pairs))\n pairs = filterPairs(pairs)\n print(\"Trimmed to %s sentence pairs\" % len(pairs))\n print(\"Counting words...\")\n for pair in pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n print(\"Counted words:\")\n print(input_lang.name, input_lang.n_words)\n print(output_lang.name, output_lang.n_words)\n return input_lang, output_lang, pairs\n\n\ninput_lang, output_lang, pairs = prepareData('eng', 'fra', True)\nprint(random.choice(pairs))\n\n\n######################################################################\n# The Seq2Seq Model\n# =================\n#\n# A Recurrent Neural Network, or RNN, is a network that operates on a\n# sequence and uses its own output as input for subsequent steps.\n#\n# A `Sequence to Sequence network <https://arxiv.org/abs/1409.3215>`__, or\n# seq2seq network, or `Encoder Decoder\n# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model\n# consisting of two RNNs called the encoder and decoder. The encoder reads\n# an input sequence and outputs a single vector, and the decoder reads\n# that vector to produce an output sequence.\n#\n# .. figure:: /_static/img/seq-seq-images/seq2seq.png\n# :alt:\n#\n# Unlike sequence prediction with a single RNN, where every input\n# corresponds to an output, the seq2seq model frees us from sequence\n# length and order, which makes it ideal for translation between two\n# languages.\n#\n# Consider the sentence \"Je ne suis pas le chat noir\" → \"I am not the\n# black cat\". Most of the words in the input sentence have a direct\n# translation in the output sentence, but are in slightly different\n# orders, e.g. \"chat noir\" and \"black cat\". Because of the \"ne/pas\"\n# construction there is also one more word in the input sentence. It would\n# be difficult to produce a correct translation directly from the sequence\n# of input words.\n#\n# With a seq2seq model the encoder creates a single vector which, in the\n# ideal case, encodes the \"meaning\" of the input sequence into a single\n# vector — a single point in some N dimensional space of sentences.\n#\n\n\n######################################################################\n# The Encoder\n# -----------\n#\n# The encoder of a seq2seq network is a RNN that outputs some value for\n# every word from the input sentence. For every input word the encoder\n# outputs a vector and a hidden state, and uses the hidden state for the\n# next input word.\n#\n# .. figure:: /_static/img/seq-seq-images/encoder-network.png\n# :alt:\n#\n#\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n\n def forward(self, input, hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = embedded\n output, hidden = self.gru(output, hidden)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n######################################################################\n# The Decoder\n# -----------\n#\n# The decoder is another RNN that takes the encoder output vector(s) and\n# outputs a sequence of words to create the translation.\n#\n\n\n######################################################################\n# Simple Decoder\n# ^^^^^^^^^^^^^^\n#\n# In the simplest seq2seq decoder we use only last output of the encoder.\n# This last output is sometimes called the *context vector* as it encodes\n# context from the entire sequence. This context vector is used as the\n# initial hidden state of the decoder.\n#\n# At every step of decoding, the decoder is given an input token and\n# hidden state. The initial input token is the start-of-string ``<SOS>``\n# token, and the first hidden state is the context vector (the encoder's\n# last hidden state).\n#\n# .. figure:: /_static/img/seq-seq-images/decoder-network.png\n# :alt:\n#\n#\n\nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n######################################################################\n# I encourage you to train and observe the results of this model, but to\n# save space we'll be going straight for the gold and introducing the\n# Attention Mechanism.\n#\n\n\n######################################################################\n# Attention Decoder\n# ^^^^^^^^^^^^^^^^^\n#\n# If only the context vector is passed between the encoder and decoder,\n# that single vector carries the burden of encoding the entire sentence.\n#\n# Attention allows the decoder network to \"focus\" on a different part of\n# the encoder's outputs for every step of the decoder's own outputs. First\n# we calculate a set of *attention weights*. These will be multiplied by\n# the encoder output vectors to create a weighted combination. The result\n# (called ``attn_applied`` in the code) should contain information about\n# that specific part of the input sequence, and thus help the decoder\n# choose the right output words.\n#\n# .. figure:: https://i.imgur.com/1152PYf.png\n# :alt:\n#\n# Calculating the attention weights is done with another feed-forward\n# layer ``attn``, using the decoder's input and hidden state as inputs.\n# Because there are sentences of all sizes in the training data, to\n# actually create and train this layer we have to choose a maximum\n# sentence length (input length, for encoder outputs) that it can apply\n# to. Sentences of the maximum length will use all the attention weights,\n# while shorter sentences will only use the first few.\n#\n# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png\n# :alt:\n#\n#\n\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):\n super(AttnDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n self.attn = nn.Linear(self.hidden_size * 2, self.max_length)\n self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.gru = nn.GRU(self.hidden_size, self.hidden_size)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n attn_weights = F.softmax(\n self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights\n\n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\n######################################################################\n# .. note:: There are other forms of attention that work around the length\n# limitation by using a relative position approach. Read about \"local\n# attention\" in `Effective Approaches to Attention-based Neural Machine\n# Translation <https://arxiv.org/abs/1508.04025>`__.\n#\n# Training\n# ========\n#\n# Preparing Training Data\n# -----------------------\n#\n# To train, for each pair we will need an input tensor (indexes of the\n# words in the input sentence) and target tensor (indexes of the words in\n# the target sentence). While creating these vectors we will append the\n# EOS token to both sequences.\n#\n\ndef indexesFromSentence(lang, sentence):\n return [lang.word2index[word] for word in sentence.split(' ')]\n\n\ndef tensorFromSentence(lang, sentence):\n indexes = indexesFromSentence(lang, sentence)\n indexes.append(EOS_token)\n return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\n\n\ndef tensorsFromPair(pair):\n input_tensor = tensorFromSentence(input_lang, pair[0])\n target_tensor = tensorFromSentence(output_lang, pair[1])\n return (input_tensor, target_tensor)\n\n\n######################################################################\n# Training the Model\n# ------------------\n#\n# To train we run the input sentence through the encoder, and keep track\n# of every output and the latest hidden state. Then the decoder is given\n# the ``<SOS>`` token as its first input, and the last hidden state of the\n# encoder as its first hidden state.\n#\n# \"Teacher forcing\" is the concept of using the real target outputs as\n# each next input, instead of using the decoder's guess as the next input.\n# Using teacher forcing causes it to converge faster but `when the trained\n# network is exploited, it may exhibit\n# instability <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.378.4095&rep=rep1&type=pdf>`__.\n#\n# You can observe outputs of teacher-forced networks that read with\n# coherent grammar but wander far from the correct translation -\n# intuitively it has learned to represent the output grammar and can \"pick\n# up\" the meaning once the teacher tells it the first few words, but it\n# has not properly learned how to create the sentence from the translation\n# in the first place.\n#\n# Because of the freedom PyTorch's autograd gives us, we can randomly\n# choose to use teacher forcing or not with a simple if statement. Turn\n# ``teacher_forcing_ratio`` up to use more of it.\n#\n\nteacher_forcing_ratio = 0.5\n\n\ndef train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden()\n\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n loss = 0\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(\n input_tensor[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device)\n\n decoder_hidden = encoder_hidden\n\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n loss += criterion(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di] # Teacher forcing\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di])\n if decoder_input.item() == EOS_token:\n break\n\n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item() / target_length\n\n\n######################################################################\n# This is a helper function to print time elapsed and estimated time\n# remaining given the current time and progress %.\n#\n\nimport time\nimport math\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\n######################################################################\n# The whole training process looks like this:\n#\n# - Start a timer\n# - Initialize optimizers and criterion\n# - Create set of training pairs\n# - Start empty losses array for plotting\n#\n# Then we call ``train`` many times and occasionally print the progress (%\n# of examples, time so far, estimated time) and average loss.\n#\n\ndef trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n training_pairs = [tensorsFromPair(random.choice(pairs))\n for i in range(n_iters)]\n criterion = nn.NLLLoss()\n\n for iter in range(1, n_iters + 1):\n training_pair = training_pairs[iter - 1]\n input_tensor = training_pair[0]\n target_tensor = training_pair[1]\n\n loss = train(input_tensor, target_tensor, encoder,\n decoder, encoder_optimizer, decoder_optimizer, criterion)\n print_loss_total += loss\n plot_loss_total += loss\n\n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),\n iter, iter / n_iters * 100, print_loss_avg))\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n showPlot(plot_losses)\n\n\n######################################################################\n# Plotting results\n# ----------------\n#\n# Plotting is done with matplotlib, using the array of loss values\n# ``plot_losses`` saved while training.\n#\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.ticker as ticker\nimport numpy as np\n\n\ndef showPlot(points):\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n plt.plot(points)\n\n\n######################################################################\n# Evaluation\n# ==========\n#\n# Evaluation is mostly the same as training, but there are no targets so\n# we simply feed the decoder's predictions back to itself for each step.\n# Every time it predicts a word we add it to the output string, and if it\n# predicts the EOS token we stop there. We also store the decoder's\n# attention outputs for display later.\n#\n\ndef evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):\n with torch.no_grad():\n input_tensor = tensorFromSentence(input_lang, sentence)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],\n encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n decoder_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n if topi.item() == EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n\n return decoded_words, decoder_attentions[:di + 1]\n\n\n######################################################################\n# We can evaluate random sentences from the training set and print out the\n# input, target, and output to make some subjective quality judgements:\n#\n\ndef evaluateRandomly(encoder, decoder, n=10):\n for i in range(n):\n pair = random.choice(pairs)\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = evaluate(encoder, decoder, pair[0])\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')\n\n\n######################################################################\n# Training and Evaluating\n# =======================\n#\n# With all these helper functions in place (it looks like extra work, but\n# it makes it easier to run multiple experiments) we can actually\n# initialize a network and start training.\n#\n# Remember that the input sentences were heavily filtered. For this small\n# dataset we can use relatively small networks of 256 hidden nodes and a\n# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some\n# reasonable results.\n#\n# .. Note::\n# If you run this notebook you can train, interrupt the kernel,\n# evaluate, and continue training later. Comment out the lines where the\n# encoder and decoder are initialized and run ``trainIters`` again.\n#\n\nhidden_size = 256\nencoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)\nattn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)\n\ntrainIters(encoder1, attn_decoder1, 75000, print_every=5000)\n\n######################################################################\n#\n\nevaluateRandomly(encoder1, attn_decoder1)\n\n\n######################################################################\n# Visualizing Attention\n# ---------------------\n#\n# A useful property of the attention mechanism is its highly interpretable\n# outputs. Because it is used to weight specific encoder outputs of the\n# input sequence, we can imagine looking where the network is focused most\n# at each time step.\n#\n# You could simply run ``plt.matshow(attentions)`` to see attention output\n# displayed as a matrix, with the columns being input steps and rows being\n# output steps:\n#\n\noutput_words, attentions = evaluate(\n encoder1, attn_decoder1, \"je suis trop froid .\")\nplt.matshow(attentions.numpy())\n\n\n######################################################################\n# For a better viewing experience we will do the extra work of adding axes\n# and labels:\n#\n\ndef showAttention(input_sentence, output_words, attentions):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n\n # Set up axes\n ax.set_xticklabels([''] + input_sentence.split(' ') +\n ['<EOS>'], rotation=90)\n ax.set_yticklabels([''] + output_words)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()\n\n\ndef evaluateAndShowAttention(input_sentence):\n output_words, attentions = evaluate(\n encoder1, attn_decoder1, input_sentence)\n print('input =', input_sentence)\n print('output =', ' '.join(output_words))\n showAttention(input_sentence, output_words, attentions)\n\n\nevaluateAndShowAttention(\"elle a cinq ans de moins que moi .\")\n\nevaluateAndShowAttention(\"elle est trop petit .\")\n\nevaluateAndShowAttention(\"je ne crains pas de mourir .\")\n\nevaluateAndShowAttention(\"c est un jeune directeur plein de talent .\")\n\n\n######################################################################\n# Exercises\n# =========\n#\n# - Try with a different dataset\n#\n# - Another language pair\n# - Human → Machine (e.g. IOT commands)\n# - Chat → Response\n# - Question → Answer\n#\n# - Replace the embeddings with pre-trained word embeddings such as word2vec or\n# GloVe\n# - Try with more layers, more hidden units, and more sentences. Compare\n# the training time and results.\n# - If you use a translation file where pairs have two of the same phrase\n# (``I am test \\t I am test``), you can use this as an autoencoder. Try\n# this:\n#\n# - Train as an autoencoder\n# - Save only the Encoder network\n# - Train a new Decoder for translation from there\n#" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Linear", "matplotlib.pyplot.figure", "matplotlib.pyplot.switch_backend", "torch.no_grad", "torch.tensor", "torch.nn.Embedding", "matplotlib.pyplot.subplots", "torch.nn.LogSoftmax", "torch.nn.GRU", "torch.nn.functional.relu", "matplotlib.pyplot.show", "torch.cuda.is_available", "torch.zeros", "matplotlib.pyplot.plot", "matplotlib.ticker.MultipleLocator", "torch.cat", "torch.nn.Dropout" ] ]
theKasra/14-puzzle-problem-bidirectionalsearch
[ "f6fe4e0d8a1db1b1675933d8b2461981ac08686b" ]
[ "Project_2.py" ]
[ "from copy import deepcopy\r\nfrom collections import deque\r\nimport time\r\nimport numpy as np\r\n\r\nclass Node:\r\n def __init__(self, parent, grid):\r\n self.parent = parent\r\n self.grid = grid\r\n\r\ndef print_answer(p1, p2):\r\n initial_to_middle = []\r\n while p1:\r\n initial_to_middle.insert(0, p1.grid)\r\n p1 = p1.parent\r\n print(\"\\nStep by step solution:\\n\")\r\n for i in initial_to_middle:\r\n print(np.matrix(i), \"\\n\")\r\n print(\"-----------middle--------------\", \"\\n\")\r\n while p2:\r\n print(np.matrix(p2.grid), \"\\n\")\r\n p2 = p2.parent\r\n\r\ndef search(node, frontier):\r\n frontier_len = len(frontier)\r\n \r\n for i in range(frontier_len):\r\n if frontier[i].grid == node.grid:\r\n return frontier[i]\r\n return None\r\n\r\ndef check_grid(grid, frontier, explored):\r\n frontier_len = len(frontier)\r\n if frontier_len == 0:\r\n if grid not in explored:\r\n return True\r\n else:\r\n if grid not in explored:\r\n for i in range(frontier_len):\r\n if frontier[i].grid == grid:\r\n return False\r\n else:\r\n return False\r\n return True\r\n\r\ndef expand(node, frontier, explored):\r\n first_0 = [None, None]\r\n second_0 = [None, None]\r\n\r\n found_first_0 = False\r\n found_all_0 = False\r\n for i in range(4):\r\n if not found_all_0:\r\n for j in range(4):\r\n if node.grid[i][j] == 0:\r\n if not found_first_0:\r\n first_0 = [i, j]\r\n found_first_0 = True\r\n else:\r\n second_0 = [i, j]\r\n found_all_0 = True\r\n break\r\n else:\r\n break\r\n \r\n move_left(node, first_0, frontier, explored)\r\n move_left(node, second_0, frontier, explored)\r\n move_right(node, first_0, frontier, explored)\r\n move_right(node, second_0, frontier, explored)\r\n move_up(node, first_0, frontier, explored)\r\n move_up(node, second_0, frontier, explored)\r\n move_down(node, first_0, frontier, explored)\r\n move_down(node, second_0, frontier, explored)\r\n\r\ndef add_to_frontier(node, child_grid, frontier):\r\n child = Node(node, child_grid)\r\n frontier.append(child)\r\n\r\ndef move_left(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if j == 0 or node.grid[i][j-1] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i][j-1] = child_grid[i][j-1], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_right(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if j == 3 or node.grid[i][j+1] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i][j+1] = child_grid[i][j+1], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_up(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if i == 0 or node.grid[i-1][j] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i-1][j] = child_grid[i-1][j], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef move_down(node, coordinate, frontier, explored):\r\n i, j = coordinate[0], coordinate[1]\r\n if i == 3 or node.grid[i+1][j] == 0:\r\n pass\r\n else:\r\n child_grid = deepcopy(node.grid)\r\n child_grid[i][j], child_grid[i+1][j] = child_grid[i+1][j], child_grid[i][j]\r\n if check_grid(child_grid, frontier, explored):\r\n add_to_frontier(node, child_grid, frontier)\r\n\r\ndef bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal):\r\n while frontier_initial and frontier_goal:\r\n node_initial = deque.popleft(frontier_initial)\r\n result_initial = search(node_initial, frontier_goal)\r\n if result_initial:\r\n p1 = node_initial\r\n p2 = result_initial\r\n break\r\n else:\r\n explored_initial.append(node_initial.grid)\r\n expand(node_initial, frontier_initial, explored_initial)\r\n \r\n node_goal = deque.popleft(frontier_goal)\r\n result_goal = search(node_goal, frontier_initial)\r\n if result_goal:\r\n p1 = result_goal\r\n p2 = node_goal\r\n break\r\n else:\r\n explored_goal.append(node_goal.grid)\r\n expand(node_goal, frontier_goal, explored_goal)\r\n print_answer(p1, p2)\r\n\r\ndef read_input_file(filename, grid):\r\n numbers = \"\"\r\n numbers_counter = 0\r\n\r\n f = open(filename, \"r\")\r\n numbers = f.readline().split(\" \")\r\n f.close()\r\n\r\n for i in range(4):\r\n for j in range(4):\r\n grid[i][j] = int(numbers[numbers_counter])\r\n numbers_counter += 1\r\n \r\n return grid\r\n\r\ngrid = [[None for _ in range(4)] for _ in range(4)]\r\ngrid = read_input_file(\"input.txt\", grid)\r\n\r\ninitial = Node(None, grid)\r\nfrontier_initial = deque()\r\nfrontier_initial.append(initial)\r\nexplored_initial = []\r\n\r\ngoal_grid = [[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 0, 0]]\r\ngoal = Node(None, goal_grid)\r\nfrontier_goal = deque()\r\nfrontier_goal.append(goal)\r\nexplored_goal = []\r\n\r\nstart_time = time.time()\r\n\r\nbidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal)\r\n\r\nprint(\"Initial side\")\r\nprint(\"frontier: \", len(frontier_initial))\r\nprint(\"explored: \", len(explored_initial), \"\\n\")\r\nprint(\"Goal side\")\r\nprint(\"frontier: \", len(frontier_goal))\r\nprint(\"explored: \", len(explored_goal))\r\n\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n" ]
[ [ "numpy.matrix" ] ]
cloudcomputinghust/IoT
[ "5db3f9078be427fa23549add1747a067c2add767" ]
[ "test-component/draw_graph_2.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom influxdb import InfluxDBClient\nimport time\nimport datetime\nimport collections\n\n\ntime_min = '2017-04-03 16:35:00'\ntime_max = '2017-04-03 22:35:00'\ntime_min_2 = '2017-04-06 09:30:00'\ntime_max_2 = '2017-04-06 14:30:00'\n# time_min = '2017-03-25 00:00:00'\n# time_max = '2017-03-25 11:28:16'\n\ntime_grouped = '30s'\ntime_step = 5\nonem2m = ['onem2m-1', 'onem2m-2', 'onem2m-3']\nonem2m_naming = {'onem2m-1': '10 messages/m', 'onem2m-2': '20 messages/m', 'onem2m-3': '40 messages/m'}\nopenhab = ['openhab-1', 'openhab-2', 'openhab-3']\nopenhab_naming = {'openhab-1': '10 messages/m', 'openhab-2': '20 messages/m', 'openhab-3': '40 messages/m'}\ncluster = ['128.199.91.17', '139.59.98.138', '139.59.98.157']\nfog_mqtt = ['mqtt']\ncloud_mqtt = ['mqtt']\ncloud_processing = 'measure-data-rate'\ntime_range = 'AND time >\\'' + time_min + '\\' AND time < \\'' + time_max + '\\' '\nfog_namespace = 'kube-system'\ncloud_namespace = 'cloud-kube-system'\n# sensing_topic = ['onem2m_pf_1/temperature', 'onem2m_pf_6/temperature', 'onem2m_pf_11/temperature',\n# 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']\nsensing_topic = ['onem2m_pf_1/temperature','onem2m_pf_6/temperature', 'onem2m_pf_11/temperature', 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']\n\ndef cpu_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\")/20 FROM \"cpu/usage_rate\" WHERE \"type\" = \\'node\\' AND \"nodename\"=\\'' + _cluster_name + '\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time(' + str(\n time_grouped) + '), \"nodename\" fill(null);'\n\ndef memory_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\")*100/(1024*1.95) FROM \"memory/usage\" WHERE \"type\" = \\'node\\' ' +time_range+\\\n ' AND \"nodename\"=\\''+_cluster_name+'\\' ' +\\\n 'GROUP BY time('+time_grouped+'), \"nodename\" fill(null);'\n\ndef net_cluster_query(_cluster_name):\n return 'SELECT sum(\"value\") FROM \"network/tx_rate\" WHERE \"type\" = \\'node\\' '+\\\n time_range + ' AND \"nodename\"=\\''+_cluster_name+'\\' ' + \\\n ' GROUP BY time('+time_grouped+'), \"nodename\" fill(null);'\n\ndef cpu_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\") FROM \"cpu/usage_rate\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _cpu_query(_namespace):\n return 'SELECT sum(\"value\")/10 FROM \"cpu/usage_rate\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _mem_query(_namespace):\n return 'SELECT sum(\"value\")/(1024*1024) FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef _mem_query_2(_namespace):\n return 'SELECT * FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"container_name\"=\\'onem2m-1\\' AND time =\\'' + \\\n time_min + '\\' ;'.format(\n time_grouped=time_grouped)\n\ndef _net_query(_namespace, _group_by):\n return 'SELECT sum(\"value\")/1024 FROM \"network/tx_rate\" WHERE \"type\" = \\'pod\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND time >\\'' + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"{group_by}\" fill(null);'.format(\n time_grouped=time_grouped, group_by=_group_by)\n\ndef mem_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\")/(1024*1024) FROM \"memory/usage\" WHERE \"type\" = \\'pod_container\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}), \"container_name\" fill(null);'.format(\n time_grouped=time_grouped)\n\n\ndef net_query(_pod_name, _namespace):\n return 'SELECT sum(\"value\")/1024 FROM \"network/tx_rate\" WHERE \"type\" = \\'pod\\' AND \"namespace_name\" = \\''+_namespace+'\\' AND \"pod_name\" = \\'{pod_name}\\' AND time >\\''.format(\n pod_name=_pod_name) + \\\n time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped}) fill(null);'.format(\n time_grouped=time_grouped)\n\n\ndef data_rate_query():\n return 'SELECT sum(\"num_of_message\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min + '\\' AND time < \\'' + time_max + '\\' GROUP BY time({time_grouped});'.format(\n time_grouped=time_grouped)\n\ndef data_sensing_query():\n return 'SELECT mean(\"value\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min_2 + '\\' AND time < \\'' + time_max_2 + '\\' GROUP BY time({time_grouped}), \"topic_id\" fill(null);'.format(\n time_grouped=time_grouped)\n\ndef data_deplay_query(select_field):\n return 'SELECT mean(\"'+select_field+'\") FROM \"data_collect_rate\" WHERE time >\\'' + time_min_2 + '\\' AND time < \\'' + time_max_2 + '\\' GROUP BY \"num_of_sensor\" fill(null);'\n\n# def query_metric(_query):\n# result = client.query(_query)\n# x_val = list()\n# y_val = list()\n# for k, v in result.items():\n# _list = list(v)\n# _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n# for item in _list:\n# val = 0\n# if len(y_val) > 0:\n# val = y_val[len(y_val) - 1]\n# if item['sum']:\n# val = item['sum']\n# time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n# x_val.append((time_stamp - _time_start) / 60)\n# y_val.append(val)\n# break\n# time.sleep(2)\n# return {'x': x_val, 'y': y_val}\n\ndef query_metric(_query, _group_by=None, _aggre_metric=None):\n if (not _group_by) and (not _aggre_metric):\n result = client.query(_query)\n x_val = list()\n y_val = list()\n for k, v in result.items():\n _list = list(v)\n _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n for item in _list:\n # val = 0\n # if len(y_val) > 0:\n # val = y_val[len(y_val) - 1]\n val = None\n if item['sum']:\n val = item['sum']\n time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n x_val.append((time_stamp - _time_start) / 60)\n y_val.append(val)\n break\n time.sleep(2)\n return {'x': x_val, 'y': y_val}\n result = client.query(_query)\n lines = dict()\n for k, v in result.items():\n _list = list(v)\n _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n for item in _list:\n # val = 0\n val = None\n if item[_aggre_metric]:\n val = item[_aggre_metric]\n time_stamp = time.mktime(datetime.datetime.strptime(item['time'], \"%Y-%m-%dT%H:%M:%SZ\").timetuple())\n if not lines.get(k[1][_group_by]):\n lines[k[1][_group_by]] = {'x': list(), 'y': list()}\n lines.get(k[1][_group_by]).get('x').append((time_stamp - _time_start) / 60)\n lines.get(k[1][_group_by]).get('y').append(val)\n time.sleep(2)\n return lines\n\ndef mean_values(values, field_1='x', field_2='y'):\n result = []\n result_2 = []\n min_len = len(values[0][field_2])\n if len(values[0][field_1]) > len(values[1][field_1]):\n min_len = len(values[1][field_2])\n if min_len > len(values[2][field_2]):\n min_len = len(values[2][field_2])\n for index in range(0, min_len):\n if values[0][field_2][index] and values[1][field_2][index] and values[2][field_2][index]:\n result.append((values[0][field_2][index] + values[1][field_2][index] + values[2][field_2][index]) / 3)\n else:\n result.append(None)\n result_2.append(values[0][field_1][index])\n return {field_1: result_2, field_2: result}\n\ndef gen_plot_by_row(plt, data, y_index,num_col, num_row, row_label, titles, line_type, marker=None, scale=False):\n # num_of_col = len(data)\n x_index = 0\n for item in data:\n if x_index == 0:\n gen_plot(plt=plt, data=item, index=(x_index+y_index*num_col+1), line_type=line_type, y_label=row_label,\n title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)\n else:\n gen_plot(plt=plt, data=item, index=(x_index + y_index * num_col + 1), line_type=line_type,\n title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)\n x_index += 1\n\ndef gen_plot(plt, data, index, line_type, num_col, nul_row,y_label=None, x_label='time(s)', title=None, marker=None, scale=False):\n plt.subplot(int('{}{}{}'.format(nul_row, num_col, index)))\n if isinstance(data, list):\n for line in data:\n plt.plot(line['x'], line['y'])\n elif isinstance(data, dict):\n if data.get('x', 0) == 0:\n count = 0\n temp = dict()\n keys = data.keys()\n sorted(keys)\n for k in keys:\n temp[k] = data[k]\n for _key_group, _values in temp.items():\n series1 = np.array(_values['y']).astype(np.double)\n s1mask = np.isfinite(series1)\n series = np.array(_values['x'])\n if len(data) > 3:\n # plt.plot(series[s1mask], series1[s1mask], marker=marker[count], linewidth=1)\n plt.plot(series[s1mask], series1[s1mask], linewidth=2, linestyle = line_type[count])\n else:\n plt.plot(series[s1mask], series1[s1mask], linewidth=1)\n if scale:\n plt.yscale('log')\n count += 1\n # plt.plot(_values['x'], _values['y'])\n # plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper left')\n plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper right', columnspacing=1.5, labelspacing=0.0,\n handletextpad=0.0, handlelength=1.0, fontsize='small')\n else:\n plt.plot(data['x'], data['y'], line_type[0])\n if y_label:\n plt.ylabel(y_label)\n if x_label:\n plt.xlabel(x_label)\n plt.title(title)\n plt.grid(True)\n plt.xticks(np.arange(0, 360 + 1, 30.0))\n # plt.xticks(np.arange(0, 120 + 1, 10.0))\n\ndef draw_graps(data=dict()):\n line_type = ['-', '-.', '--', ':', '-.', '--']\n marker = ['.', 'o', 'v', 'x', '+', '<', '*']\n # plot with various axes scales\n plt.figure(1)\n # cpu\n # col_1 = {onem2m_naming[k]: data['fog']['cpu'][k] for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['cpu'][k] for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['cpu'][k] for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M CPU USAGE', 'OPENHAB CPU USAGE', 'MQTT CPU USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n #\n # col_1 = {onem2m_naming[k]: data['fog']['memory'][k] for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['memory'][k] for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['memory'][k] for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M MEM USAGE', 'OPENHAB MEM USAGE', 'MQTT MEM USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n #\n # col_1 = {onem2m_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in onem2m}\n # # col_1['mean'] = mean_values(list(col_1.values()))\n # col_2 = {openhab_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in openhab}\n # # col_2['mean'] = mean_values(list(col_2.values()))\n # col_3 = {k: data['fog']['network'].get('app:{}'.format(k)) for k in fog_mqtt}\n # rows = [col_1, col_2, col_3]\n # titles = ['ONEM2M NET USAGE', 'OPENHAB NET USAGE', 'MQTT NET USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=len(data['fog']), num_row=3,\n # line_type=line_type)\n # plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.96, hspace=0.51,\n # wspace=0.19)\n # plt.show()\n # #\n # # ################\n # plt.figure(2)\n # col_1 = {cloud_processing: data['cloud']['cpu'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['cpu'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['cpu'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # titles = ['DATA_PROCESSING CPU USAGE', 'CLOUD MQTT CPU USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n #\n # col_1 = {cloud_processing: data['cloud']['memory'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['memory'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['memory'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # # rows = [data['cloud']['memory'][cloud_processing], data['cloud']['memory'][cloud_mqtt]]\n # titles = ['DATA_PROCESSING MEM USAGE', 'CLOUD MQTT MEM USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n #\n # col_1 = {cloud_processing: data['cloud']['network'][cloud_processing]}\n # # col_2 = {cloud_mqtt: data['cloud']['network'][cloud_mqtt]}\n # col_2 = {k: data['cloud']['network'][k] for k in cloud_mqtt}\n # rows = [col_1, col_2]\n # # rows = [data['cloud']['network'][cloud_processing], data['cloud']['network'][cloud_mqtt]]\n # titles = ['DATA_PROCESSING NET USAGE', 'CLOUD MQTT NET USAGE']\n # gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=2, num_row=3,\n # line_type=line_type)\n # plt.show()\n\n #################\n plt.figure(3)\n\n rows = [{k: data['cloud']['sensing_data'][k] for k in sensing_topic}]\n titles = ['SENSING DATA']\n gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='Value', titles=titles, num_col=1,\n num_row=1,\n line_type=line_type, marker=marker)\n\n # show\n plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.99, hspace=0.85,\n wspace=0.19)\n plt.show()\n return\n\n\nclient = InfluxDBClient('188.166.238.158', 32485, 'root', 'root', 'k8s')\ndata = dict()\n\n# get metric\npod_names = {'fog': {'onem2m': onem2m, 'openhab': openhab, 'mqtt': fog_mqtt}, 'cloud': {'mqtt': cloud_mqtt, 'processing': cloud_processing}}\nnamespaces = {'fog': fog_namespace, 'cloud': cloud_namespace}\nresource_metrics = {'cpu', 'memory', 'network'}\nresource_query = {'cpu': _cpu_query, 'memory': _mem_query, 'network': _net_query}\ndata['fog'] = dict()\ndata['cloud'] = dict()\n# data['fog']['cpu'] = query_metric(_cpu_query(namespaces['fog']), 'container_name', 'sum')\n# data['fog']['memory'] = query_metric(_mem_query(namespaces['fog']), 'container_name', 'sum')\n# data['fog']['network'] = query_metric(_net_query(namespaces['fog'], 'labels'), 'labels', 'sum')\n# temp = dict(data['fog']['network'])\n# for key, value in temp.items():\n# for check_key in onem2m:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n# for check_key in openhab:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n# for check_key in fog_mqtt:\n# if key.find(check_key) >= 0:\n# data['fog']['network'][check_key] = value\n# continue\n#\n# print('query fog done')\n# data['cloud']['cpu'] = query_metric(_cpu_query(namespaces['cloud']), 'container_name', 'sum')\n# data['cloud']['memory'] = query_metric(_mem_query(namespaces['cloud']), 'container_name', 'sum')\n# data['cloud']['network'] = query_metric(_net_query(namespaces['cloud'], 'pod_name'), 'pod_name', 'sum')\n# temp = dict(data['cloud']['network'])\n# for key, value in temp.items():\n# for check_key in cloud_mqtt:\n# if key.find(check_key) >= 0:\n# data['cloud']['network'][check_key] = value\n# continue\n# if key.find(cloud_processing) >= 0:\n# data['cloud']['network'][cloud_processing] = value\n# continue\n# data['cloud']['sensing_data'] = query_metric(data_sensing_query(), 'topic_id', 'mean')\n# for k,v in data['cloud']['sensing_data'].items():\n# print(k)\n# print(v)\nprint('query cloud done')\n# draw_graps(data)\n\n# _data = client.query(data_deplay_query('round_trip_3'))\n# for k, v in _data.items():\n# print(k[1]['num_of_sensor'])\n# print(list(v)[0]['mean'])\n# print('-----------------------------------------------')\n\n# _data_1 = client.query(data_deplay_query('time_send_cloud'))\n# series_1 = {'x': list(), 'y': list()}\n# for k, v in _data_1.items():\n# # series_1['x'].append(int(k[1]['num_of_sensor']))\n# # series_1['y'].append(float(list(v)[0]['mean']))\n# print(k[1]['num_of_sensor'])\n# print(list(v)[0])\n # print(list(v)[0]['mean'])\n#\n_data_1 = client.query(data_deplay_query('round_trip_1'))\nseries_1 = {'x': list(), 'y': list()}\nfor k, v in _data_1.items():\n series_1['x'].append(int(k[1]['num_of_sensor']))\n series_1['y'].append(float(list(v)[0]['mean']))\n # print(k[1]['num_of_sensor'])\n # print(list(v)[0]['mean'])\nprint('-----------------------------------------------')\nseries_2 = {'x': list(), 'y': list()}\n_data_2 = client.query(data_deplay_query('round_trip_2'))\n\nfor k, v in _data_2.items():\n # print(k[1]['num_of_sensor'])\n # print(list(v)[0]['mean'])\n series_2['x'].append(int(k[1]['num_of_sensor']))\n series_2['y'].append(float(list(v)[0]['mean']+1))\n\nprint(series_1)\nprint(series_2)\n\nwidth = 1 # the width of the bars: can also be len(x) sequence\n\np1 = plt.bar(series_1['x'], series_1['y'], width, color='#d62728')\np2 = plt.bar(series_2['x'], series_2['y'], width,\n bottom=series_1['y'])\n\n\nplt.ylabel('Transmission Time (seconds)')\nplt.xlabel('Number of sensors per platform (on 5 platforms)')\nplt.title('Tranmission time by number of sensor')\nplt.xticks(series_1['x'])\n# plt.yticks(np.arange(0, 300, 10))\nplt.legend((p1[0], p2[0]), ('Sensor - Platform Transmission Time', 'Platform - Cloud Transmission Time'))\n\n# def autolabel(rects):\n# \"\"\"\n# Attach a text label above each bar displaying its height\n# \"\"\"\n# for rect in rects:\n# height = rect.get_height()\n# plt.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n# '%d' % int(height),\n# ha='center', va='bottom')\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "matplotlib.pyplot.grid", "matplotlib.pyplot.figure", "matplotlib.pyplot.yscale", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.isfinite", "matplotlib.pyplot.bar" ] ]
slipperlobster/flipper
[ "527952a74bc76f76cf3a2d25755386f8db285885" ]
[ "jesse/indicators/gatorosc.py" ]
[ "from collections import namedtuple\n\nimport numpy as np\nimport talib\n\nfrom jesse.helpers import get_candle_source, np_shift\nfrom jesse.helpers import slice_candles\n\nGATOR = namedtuple('GATOR', ['upper', 'lower', 'upper_change', 'lower_change'])\n\n\ndef gatorosc(candles: np.ndarray, source_type: str = \"close\", sequential: bool = False) -> GATOR:\n \"\"\"\n Gator Oscillator by Bill M. Williams\n\n :param candles: np.ndarray\n :param source_type: str - default: \"close\"\n :param sequential: bool - default: False\n\n :return: GATOR(upper, lower, upper_change, lower_change)\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)\n teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)\n lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)\n\n upper = np.abs(jaw - teeth)\n lower = -np.abs(teeth - lips)\n\n upper_change = talib.MOM(upper, timeperiod=1)\n lower_change = -talib.MOM(lower, timeperiod=1)\n\n if sequential:\n return GATOR(upper, lower, upper_change, lower_change)\n else:\n return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])\n\n\ndef numpy_ewma(data, window):\n \"\"\"\n\n :param data:\n :param window:\n :return:\n \"\"\"\n alpha = 1 / window\n # scale = 1 / (1 - alpha)\n n = data.shape[0]\n scale_arr = (1 - alpha) ** (-1 * np.arange(n))\n weights = (1 - alpha) ** np.arange(n)\n pw0 = (1 - alpha) ** (n - 1)\n mult = data * pw0 * scale_arr\n cumsums = mult.cumsum()\n return cumsums * scale_arr[::-1] / weights.cumsum()\n" ]
[ [ "numpy.arange", "numpy.abs" ] ]
legend-of-zyda/LuxPythonEnvGym
[ "7d818b5943dad1b7fae3c66b612aae93c743bd0e" ]
[ "examples/agent_policy.py" ]
[ "import sys\nimport time\nfrom functools import partial # pip install functools\nimport copy\nimport random\n\nimport numpy as np\nfrom gym import spaces\n\nfrom luxai2021.env.agent import Agent, AgentWithModel\nfrom luxai2021.game.actions import *\nfrom luxai2021.game.game_constants import GAME_CONSTANTS\nfrom luxai2021.game.position import Position\n\n\n# https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points\ndef closest_node(node, nodes):\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n return np.argmin(dist_2)\ndef furthest_node(node, nodes):\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n return np.argmax(dist_2)\n\ndef smart_transfer_to_nearby(game, team, unit_id, unit, target_type_restriction=None, **kwarg):\n \"\"\"\n Smart-transfers from the specified unit to a nearby neighbor. Prioritizes any\n nearby carts first, then any worker. Transfers the resource type which the unit\n has most of. Picks which cart/worker based on choosing a target that is most-full\n but able to take the most amount of resources.\n\n Args:\n team ([type]): [description]\n unit_id ([type]): [description]\n\n Returns:\n Action: Returns a TransferAction object, even if the request is an invalid\n transfer. Use TransferAction.is_valid() to check validity.\n \"\"\"\n\n # Calculate how much resources could at-most be transferred\n resource_type = None\n resource_amount = 0\n target_unit = None\n\n if unit != None:\n for type, amount in unit.cargo.items():\n if amount > resource_amount:\n resource_type = type\n resource_amount = amount\n\n # Find the best nearby unit to transfer to\n unit_cell = game.map.get_cell_by_pos(unit.pos)\n adjacent_cells = game.map.get_adjacent_cells(unit_cell)\n\n \n for c in adjacent_cells:\n for id, u in c.units.items():\n # Apply the unit type target restriction\n if target_type_restriction == None or u.type == target_type_restriction:\n if u.team == team:\n # This unit belongs to our team, set it as the winning transfer target\n # if it's the best match.\n if target_unit is None:\n target_unit = u\n else:\n # Compare this unit to the existing target\n if target_unit.type == u.type:\n # Transfer to the target with the least capacity, but can accept\n # all of our resources\n if( u.get_cargo_space_left() >= resource_amount and \n target_unit.get_cargo_space_left() >= resource_amount ):\n # Both units can accept all our resources. Prioritize one that is most-full.\n if u.get_cargo_space_left() < target_unit.get_cargo_space_left():\n # This new target it better, it has less space left and can take all our\n # resources\n target_unit = u\n \n elif( target_unit.get_cargo_space_left() >= resource_amount ):\n # Don't change targets. Current one is best since it can take all\n # the resources, but new target can't.\n pass\n \n elif( u.get_cargo_space_left() > target_unit.get_cargo_space_left() ):\n # Change targets, because neither target can accept all our resources and \n # this target can take more resources.\n target_unit = u\n elif u.type == Constants.UNIT_TYPES.CART:\n # Transfer to this cart instead of the current worker target\n target_unit = u\n \n # Build the transfer action request\n target_unit_id = None\n if target_unit is not None:\n target_unit_id = target_unit.id\n\n # Update the transfer amount based on the room of the target\n if target_unit.get_cargo_space_left() < resource_amount:\n resource_amount = target_unit.get_cargo_space_left()\n \n return TransferAction(team, unit_id, target_unit_id, resource_type, resource_amount)\n\n########################################################################################################################\n# This is the Agent that you need to design for the competition\n########################################################################################################################\nclass AgentPolicy(AgentWithModel):\n def __init__(self, mode=\"train\", model=None) -> None:\n \"\"\"\n Arguments:\n mode: \"train\" or \"inference\", which controls if this agent is for training or not.\n model: The pretrained model, or if None it will operate in training mode.\n \"\"\"\n super().__init__(mode, model)\n\n # Define action and observation space\n # They must be gym.spaces objects\n # Example when using discrete actions:\n self.actions_units = [\n partial(MoveAction, direction=Constants.DIRECTIONS.CENTER), # This is the do-nothing action\n partial(MoveAction, direction=Constants.DIRECTIONS.NORTH),\n partial(MoveAction, direction=Constants.DIRECTIONS.WEST),\n partial(MoveAction, direction=Constants.DIRECTIONS.SOUTH),\n partial(MoveAction, direction=Constants.DIRECTIONS.EAST),\n partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.CART), # Transfer to nearby cart\n partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.WORKER), # Transfer to nearby worker\n SpawnCityAction,\n PillageAction,\n ]\n self.actions_cities = [\n SpawnWorkerAction,\n SpawnCartAction,\n ResearchAction,\n ]\n self.action_space = spaces.Discrete(max(len(self.actions_units), len(self.actions_cities)))\n\n # Observation space: (Basic minimum for a miner agent)\n # Object:\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n #\n # 5x direction_nearest_wood\n # 1x distance_nearest_wood\n # 1x amount\n #\n # 5x direction_nearest_coal\n # 1x distance_nearest_coal\n # 1x amount\n #\n # 5x direction_nearest_uranium\n # 1x distance_nearest_uranium\n # 1x amount\n #\n # 5x direction_nearest_city\n # 1x distance_nearest_city\n # 1x amount of fuel\n #\n # 28x (the same as above, but direction, distance, and amount to the furthest of each)\n #\n # 5x direction_nearest_worker\n # 1x distance_nearest_worker\n # 1x amount of cargo\n # Unit:\n # 1x cargo size\n # State:\n # 1x is night\n # 1x percent of game done\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n self.observation_shape = (3 + 7 * 5 * 2 + 1 + 1 + 1 + 2 + 2 + 2 + 3,)\n self.observation_space = spaces.Box(low=0, high=1, shape=\n self.observation_shape, dtype=np.float16)\n\n self.object_nodes = {}\n\n def get_agent_type(self):\n \"\"\"\n Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.\n \"\"\"\n if self.mode == \"train\":\n return Constants.AGENT_TYPE.LEARNING\n else:\n return Constants.AGENT_TYPE.AGENT\n\n def get_observation(self, game, unit, city_tile, team, is_new_turn):\n \"\"\"\n Implements getting a observation from the current game for this unit or city\n \"\"\"\n observation_index = 0\n if is_new_turn:\n # It's a new turn this event. This flag is set True for only the first observation from each turn.\n # Update any per-turn fixed observation space that doesn't change per unit/city controlled.\n\n # Build a list of object nodes by type for quick distance-searches\n self.object_nodes = {}\n\n # Add resources\n for cell in game.map.resources:\n if cell.resource.type not in self.object_nodes:\n self.object_nodes[cell.resource.type] = np.array([[cell.pos.x, cell.pos.y]])\n else:\n self.object_nodes[cell.resource.type] = np.concatenate(\n (\n self.object_nodes[cell.resource.type],\n [[cell.pos.x, cell.pos.y]]\n ),\n axis=0\n )\n\n # Add your own and opponent units\n for t in [team, (team + 1) % 2]:\n for u in game.state[\"teamStates\"][team][\"units\"].values():\n key = str(u.type)\n if t != team:\n key = str(u.type) + \"_opponent\"\n\n if key not in self.object_nodes:\n self.object_nodes[key] = np.array([[u.pos.x, u.pos.y]])\n else:\n self.object_nodes[key] = np.concatenate(\n (\n self.object_nodes[key],\n [[u.pos.x, u.pos.y]]\n )\n , axis=0\n )\n\n # Add your own and opponent cities\n for city in game.cities.values():\n for cells in city.city_cells:\n key = \"city\"\n if city.team != team:\n key = \"city_opponent\"\n\n if key not in self.object_nodes:\n self.object_nodes[key] = np.array([[cells.pos.x, cells.pos.y]])\n else:\n self.object_nodes[key] = np.concatenate(\n (\n self.object_nodes[key],\n [[cells.pos.x, cells.pos.y]]\n )\n , axis=0\n )\n\n # Observation space: (Basic minimum for a miner agent)\n # Object:\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n # 5x direction_nearest_wood\n # 1x distance_nearest_wood\n # 1x amount\n #\n # 5x direction_nearest_coal\n # 1x distance_nearest_coal\n # 1x amount\n #\n # 5x direction_nearest_uranium\n # 1x distance_nearest_uranium\n # 1x amount\n #\n # 5x direction_nearest_city\n # 1x distance_nearest_city\n # 1x amount of fuel\n #\n # 5x direction_nearest_worker\n # 1x distance_nearest_worker\n # 1x amount of cargo\n #\n # 28x (the same as above, but direction, distance, and amount to the furthest of each)\n #\n # Unit:\n # 1x cargo size\n # State:\n # 1x is night\n # 1x percent of game done\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n obs = np.zeros(self.observation_shape)\n \n # Update the type of this object\n # 1x is worker\n # 1x is cart\n # 1x is citytile\n observation_index = 0\n if unit is not None:\n if unit.type == Constants.UNIT_TYPES.WORKER:\n obs[observation_index] = 1.0 # Worker\n else:\n obs[observation_index+1] = 1.0 # Cart\n if city_tile is not None:\n obs[observation_index+2] = 1.0 # CityTile\n observation_index += 3\n \n pos = None\n if unit is not None:\n pos = unit.pos\n else:\n pos = city_tile.pos\n\n if pos is None:\n observation_index += 7 * 5 * 2\n else:\n # Encode the direction to the nearest objects\n # 5x direction_nearest\n # 1x distance\n for distance_function in [closest_node, furthest_node]:\n for key in [\n Constants.RESOURCE_TYPES.WOOD,\n Constants.RESOURCE_TYPES.COAL,\n Constants.RESOURCE_TYPES.URANIUM,\n \"city\",\n str(Constants.UNIT_TYPES.WORKER)]:\n # Process the direction to and distance to this object type\n\n # Encode the direction to the nearest object (excluding itself)\n # 5x direction\n # 1x distance\n if key in self.object_nodes:\n if (\n (key == \"city\" and city_tile is not None) or\n (unit is not None and str(unit.type) == key and len(game.map.get_cell_by_pos(unit.pos).units) <= 1 )\n ):\n # Filter out the current unit from the closest-search\n closest_index = closest_node((pos.x, pos.y), self.object_nodes[key])\n filtered_nodes = np.delete(self.object_nodes[key], closest_index, axis=0)\n else:\n filtered_nodes = self.object_nodes[key]\n\n if len(filtered_nodes) == 0:\n # No other object of this type\n obs[observation_index + 5] = 1.0\n else:\n # There is another object of this type\n closest_index = distance_function((pos.x, pos.y), filtered_nodes)\n\n if closest_index is not None and closest_index >= 0:\n closest = filtered_nodes[closest_index]\n closest_position = Position(closest[0], closest[1])\n direction = pos.direction_to(closest_position)\n mapping = {\n Constants.DIRECTIONS.CENTER: 0,\n Constants.DIRECTIONS.NORTH: 1,\n Constants.DIRECTIONS.WEST: 2,\n Constants.DIRECTIONS.SOUTH: 3,\n Constants.DIRECTIONS.EAST: 4,\n }\n obs[observation_index + mapping[direction]] = 1.0 # One-hot encoding direction\n\n # 0 to 1 distance\n distance = pos.distance_to(closest_position)\n obs[observation_index + 5] = min(distance / 20.0, 1.0)\n\n # 0 to 1 value (amount of resource, cargo for unit, or fuel for city)\n if key == \"city\":\n # City fuel as % of upkeep for 200 turns\n c = game.cities[game.map.get_cell_by_pos(closest_position).city_tile.city_id]\n obs[observation_index + 6] = min(\n c.fuel / (c.get_light_upkeep() * 200.0),\n 1.0\n )\n elif key in [Constants.RESOURCE_TYPES.WOOD, Constants.RESOURCE_TYPES.COAL,\n Constants.RESOURCE_TYPES.URANIUM]:\n # Resource amount\n obs[observation_index + 6] = min(\n game.map.get_cell_by_pos(closest_position).resource.amount / 500,\n 1.0\n )\n else:\n # Unit cargo\n obs[observation_index + 6] = min(\n next(iter(game.map.get_cell_by_pos(\n closest_position).units.values())).get_cargo_space_left() / 100,\n 1.0\n )\n\n observation_index += 7\n\n if unit is not None:\n # Encode the cargo space\n # 1x cargo size\n obs[observation_index] = unit.get_cargo_space_left() / GAME_CONSTANTS[\"PARAMETERS\"][\"RESOURCE_CAPACITY\"][\n \"WORKER\"]\n observation_index += 1\n else:\n observation_index += 1\n\n # Game state observations\n\n # 1x is night\n obs[observation_index] = game.is_night()\n observation_index += 1\n\n # 1x percent of game done\n obs[observation_index] = game.state[\"turn\"] / GAME_CONSTANTS[\"PARAMETERS\"][\"MAX_DAYS\"]\n observation_index += 1\n\n # 2x citytile counts [cur player, opponent]\n # 2x worker counts [cur player, opponent]\n # 2x cart counts [cur player, opponent]\n max_count = 30\n for key in [\"city\", str(Constants.UNIT_TYPES.WORKER), str(Constants.UNIT_TYPES.CART)]:\n if key in self.object_nodes:\n obs[observation_index] = len(self.object_nodes[key]) / max_count\n if (key + \"_opponent\") in self.object_nodes:\n obs[observation_index + 1] = len(self.object_nodes[(key + \"_opponent\")]) / max_count\n observation_index += 2\n\n # 1x research points [cur player]\n # 1x researched coal [cur player]\n # 1x researched uranium [cur player]\n obs[observation_index] = game.state[\"teamStates\"][team][\"researchPoints\"] / 200.0\n obs[observation_index+1] = float(game.state[\"teamStates\"][team][\"researched\"][\"coal\"])\n obs[observation_index+2] = float(game.state[\"teamStates\"][team][\"researched\"][\"uranium\"])\n\n return obs\n\n def action_code_to_action(self, action_code, game, unit=None, city_tile=None, team=None):\n \"\"\"\n Takes an action in the environment according to actionCode:\n action_code: Index of action to take into the action array.\n Returns: An action.\n \"\"\"\n # Map action_code index into to a constructed Action object\n try:\n x = None\n y = None\n if city_tile is not None:\n x = city_tile.pos.x\n y = city_tile.pos.y\n elif unit is not None:\n x = unit.pos.x\n y = unit.pos.y\n \n if city_tile != None:\n action = self.actions_cities[action_code%len(self.actions_cities)](\n game=game,\n unit_id=unit.id if unit else None,\n unit=unit,\n city_id=city_tile.city_id if city_tile else None,\n citytile=city_tile,\n team=team,\n x=x,\n y=y\n )\n else:\n action = self.actions_units[action_code%len(self.actions_units)](\n game=game,\n unit_id=unit.id if unit else None,\n unit=unit,\n city_id=city_tile.city_id if city_tile else None,\n citytile=city_tile,\n team=team,\n x=x,\n y=y\n )\n \n return action\n except Exception as e:\n # Not a valid action\n print(e)\n return None\n\n def take_action(self, action_code, game, unit=None, city_tile=None, team=None):\n \"\"\"\n Takes an action in the environment according to actionCode:\n actionCode: Index of action to take into the action array.\n \"\"\"\n action = self.action_code_to_action(action_code, game, unit, city_tile, team)\n self.match_controller.take_action(action)\n\n def game_start(self, game):\n \"\"\"\n This function is called at the start of each game. Use this to\n reset and initialize per game. Note that self.team may have\n been changed since last game. The game map has been created\n and starting units placed.\n\n Args:\n game ([type]): Game.\n \"\"\"\n self.units_last = 0\n self.city_tiles_last = 0\n self.fuel_collected_last = 0\n\n def get_reward(self, game, is_game_finished, is_new_turn, is_game_error):\n \"\"\"\n Returns the reward function for this step of the game. Reward should be a\n delta increment to the reward, not the total current reward.\n \"\"\"\n if is_game_error:\n # Game environment step failed, assign a game lost reward to not incentivise this\n print(\"Game failed due to error\")\n return -1.0\n\n if not is_new_turn and not is_game_finished:\n # Only apply rewards at the start of each turn or at game end\n return 0\n\n # Get some basic stats\n unit_count = len(game.state[\"teamStates\"][self.team][\"units\"])\n\n city_count = 0\n city_count_opponent = 0\n city_tile_count = 0\n city_tile_count_opponent = 0\n for city in game.cities.values():\n if city.team == self.team:\n city_count += 1\n else:\n city_count_opponent += 1\n\n for cell in city.city_cells:\n if city.team == self.team:\n city_tile_count += 1\n else:\n city_tile_count_opponent += 1\n \n rewards = {}\n \n # Give a reward for unit creation/death. 0.05 reward per unit.\n rewards[\"rew/r_units\"] = (unit_count - self.units_last) * 0.05\n self.units_last = unit_count\n\n # Give a reward for city creation/death. 0.1 reward per city.\n rewards[\"rew/r_city_tiles\"] = (city_tile_count - self.city_tiles_last) * 0.1\n self.city_tiles_last = city_tile_count\n\n # Reward collecting fuel\n fuel_collected = game.stats[\"teamStats\"][self.team][\"fuelGenerated\"]\n rewards[\"rew/r_fuel_collected\"] = ( (fuel_collected - self.fuel_collected_last) / 20000 )\n self.fuel_collected_last = fuel_collected\n \n # Give a reward of 1.0 per city tile alive at the end of the game\n rewards[\"rew/r_city_tiles_end\"] = 0\n if is_game_finished:\n self.is_last_turn = True\n rewards[\"rew/r_city_tiles_end\"] = city_tile_count\n\n '''\n # Example of a game win/loss reward instead\n if game.get_winning_team() == self.team:\n rewards[\"rew/r_game_win\"] = 100.0 # Win\n else:\n rewards[\"rew/r_game_win\"] = -100.0 # Loss\n '''\n \n reward = 0\n for name, value in rewards.items():\n reward += value\n\n return reward\n\n def turn_heurstics(self, game, is_first_turn):\n \"\"\"\n This is called pre-observation actions to allow for hardcoded heuristics\n to control a subset of units. Any unit or city that gets an action from this\n callback, will not create an observation+action.\n\n Args:\n game ([type]): Game in progress\n is_first_turn (bool): True if it's the first turn of a game.\n \"\"\"\n return\n\n \n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.argmin", "numpy.argmax", "numpy.delete", "numpy.array", "numpy.concatenate" ] ]
pritesh-mehta/dwi-utilities
[ "f1e307fcf51ef4e4cc95ac311f031e3521c1fbbf" ]
[ "dwi_utilities/comp_high_b.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n@author: pritesh-mehta\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom pathlib import Path\nfrom argparse import ArgumentParser\n\nfrom dwi_utilities.monoexponential_decay import log_func, func\nimport dwi_utilities.nifti_utilities as nutil\n\ndef comp_high_b_case(case_dir, target_bval, save_case=False, output_dir=None, extension='.nii.gz'):\n \"\"\"Generate high b-value DWI using low b-value DWI (case)\n \"\"\"\n eps = 1e-8 \n \n data_stack = []\n bval_list = []\n filepaths = nutil.path_generator(case_dir)\n for path in filepaths:\n name, nii, data = nutil.load(path)\n data_stack.append(data)\n bval_list.append(name.replace('.nii.gz','').replace('b',''))\n \n # order data stack in order of ascending b-value\n bval_list, data_stack = \\\n zip(*sorted(zip(bval_list, data_stack)))\n \n # generate high b-value\n bval_list = np.array(bval_list)\n data = np.array(data_stack)\n \n shape = np.shape(data[0])\n highb_data = np.zeros(shape)\n \n for i in range(shape[0]):\n for j in range(shape[1]):\n for k in range(shape[2]):\n y = []\n for array in data:\n y.append(array[i][j][k])\n x = bval_list\n y = np.array(y) + eps\n z = np.log(y)\n popt, pcov = curve_fit(log_func, x, z)\n if popt[1] < 0:\n highb_data[i][j][k] = 0\n else:\n highb_data[i][j][k] = func(target_bval, np.exp(popt[0]), popt[1]) \n \n if save_case:\n case_name = Path(case_dir).parts[-1]\n save_path = Path(output_dir) / (case_name + extension)\n nutil.save(save_path, nii, highb_data)\n \n return highb_data\n\ndef comp_high_b_dir(cases_dir, target_bval, output_dir, extension='.nii.gz'):\n \"\"\"Generate high b-value DWI using low b-value DWI (directory)\n \"\"\"\n for case_dir in Path(cases_dir).iterdir():\n print(\"Processing:\", case_dir)\n comp_high_b_case(case_dir, target_bval, save_case=True, output_dir=output_dir, extension=extension)\n return None\n\ndef process():\n parser = ArgumentParser()\n parser.add_argument('--input_dir', required=True, type=str)\n parser.add_argument('--target_bval', required=True, type=int)\n parser.add_argument('--output_dir', required=True, type=str)\n parser.add_argument('--case', required=False, action=\"store_true\")\n parser.add_argument('--extension', required=False, type=str, default='.nii.gz')\n \n args = parser.parse_args()\n \n if args.case:\n comp_high_b_case(args.input_dir, args.target_bval, save_case=True, output_dir=args.output_dir, \n extension=args.extension)\n else:\n comp_high_b_dir(args.input_dir, args.target_bval, args.output_dir,\n extension=args.extension)\n \nif __name__ == \"__main__\":\n process()\n" ]
[ [ "numpy.zeros", "scipy.optimize.curve_fit", "numpy.exp", "numpy.log", "numpy.shape", "numpy.array" ] ]
SarderLab/HistomicsTK_PodoSighter
[ "9a75302f645bfb3dfd9688d247388c9948f4eadb" ]
[ "histomicstk/deeplab/utils/get_dataset_colormap.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Visualizes the segmentation results via specified color map.\n\nVisualizes the semantic segmentation results by the color map\ndefined by the different datasets. Supported colormaps are:\n\n* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/).\n\n* Cityscapes dataset (https://www.cityscapes-dataset.com).\n\n* Mapillary Vistas (https://research.mapillary.com).\n\n* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom six.moves import range\n\n# Dataset names.\n_ADE20K = 'ade20k'\n_CITYSCAPES = 'cityscapes'\n_MAPILLARY_VISTAS = 'mapillary_vistas'\n_PASCAL = 'pascal'\n_PC1 = 'PC1'\n\n# Max number of entries in the colormap for each dataset.\n_DATASET_MAX_ENTRIES = {\n _ADE20K: 151,\n _CITYSCAPES: 256,\n _MAPILLARY_VISTAS: 66,\n _PASCAL: 512,\n _PC1: 256,\n}\n\ndef create_pc1_label_colormap():\n \"\"\"Creates a label colormap used in PC1 segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=np.uint8)\n colormap[0] = [128, 64, 128]\n colormap[1] = [244, 35, 232]\n colormap[2] = [70, 70, 70]\n colormap[3] = [102, 102, 156]\n return colormap\n\ndef create_ade20k_label_colormap():\n \"\"\"Creates a label colormap used in ADE20K segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n return np.asarray([\n [0, 0, 0],\n [120, 120, 120],\n [180, 120, 120],\n [6, 230, 230],\n [80, 50, 50],\n [4, 200, 3],\n [120, 120, 80],\n [140, 140, 140],\n [204, 5, 255],\n [230, 230, 230],\n [4, 250, 7],\n [224, 5, 255],\n [235, 255, 7],\n [150, 5, 61],\n [120, 120, 70],\n [8, 255, 51],\n [255, 6, 82],\n [143, 255, 140],\n [204, 255, 4],\n [255, 51, 7],\n [204, 70, 3],\n [0, 102, 200],\n [61, 230, 250],\n [255, 6, 51],\n [11, 102, 255],\n [255, 7, 71],\n [255, 9, 224],\n [9, 7, 230],\n [220, 220, 220],\n [255, 9, 92],\n [112, 9, 255],\n [8, 255, 214],\n [7, 255, 224],\n [255, 184, 6],\n [10, 255, 71],\n [255, 41, 10],\n [7, 255, 255],\n [224, 255, 8],\n [102, 8, 255],\n [255, 61, 6],\n [255, 194, 7],\n [255, 122, 8],\n [0, 255, 20],\n [255, 8, 41],\n [255, 5, 153],\n [6, 51, 255],\n [235, 12, 255],\n [160, 150, 20],\n [0, 163, 255],\n [140, 140, 140],\n [250, 10, 15],\n [20, 255, 0],\n [31, 255, 0],\n [255, 31, 0],\n [255, 224, 0],\n [153, 255, 0],\n [0, 0, 255],\n [255, 71, 0],\n [0, 235, 255],\n [0, 173, 255],\n [31, 0, 255],\n [11, 200, 200],\n [255, 82, 0],\n [0, 255, 245],\n [0, 61, 255],\n [0, 255, 112],\n [0, 255, 133],\n [255, 0, 0],\n [255, 163, 0],\n [255, 102, 0],\n [194, 255, 0],\n [0, 143, 255],\n [51, 255, 0],\n [0, 82, 255],\n [0, 255, 41],\n [0, 255, 173],\n [10, 0, 255],\n [173, 255, 0],\n [0, 255, 153],\n [255, 92, 0],\n [255, 0, 255],\n [255, 0, 245],\n [255, 0, 102],\n [255, 173, 0],\n [255, 0, 20],\n [255, 184, 184],\n [0, 31, 255],\n [0, 255, 61],\n [0, 71, 255],\n [255, 0, 204],\n [0, 255, 194],\n [0, 255, 82],\n [0, 10, 255],\n [0, 112, 255],\n [51, 0, 255],\n [0, 194, 255],\n [0, 122, 255],\n [0, 255, 163],\n [255, 153, 0],\n [0, 255, 10],\n [255, 112, 0],\n [143, 255, 0],\n [82, 0, 255],\n [163, 255, 0],\n [255, 235, 0],\n [8, 184, 170],\n [133, 0, 255],\n [0, 255, 92],\n [184, 0, 255],\n [255, 0, 31],\n [0, 184, 255],\n [0, 214, 255],\n [255, 0, 112],\n [92, 255, 0],\n [0, 224, 255],\n [112, 224, 255],\n [70, 184, 160],\n [163, 0, 255],\n [153, 0, 255],\n [71, 255, 0],\n [255, 0, 163],\n [255, 204, 0],\n [255, 0, 143],\n [0, 255, 235],\n [133, 255, 0],\n [255, 0, 235],\n [245, 0, 255],\n [255, 0, 122],\n [255, 245, 0],\n [10, 190, 212],\n [214, 255, 0],\n [0, 204, 255],\n [20, 0, 255],\n [255, 255, 0],\n [0, 153, 255],\n [0, 41, 255],\n [0, 255, 204],\n [41, 0, 255],\n [41, 255, 0],\n [173, 0, 255],\n [0, 245, 255],\n [71, 0, 255],\n [122, 0, 255],\n [0, 255, 184],\n [0, 92, 255],\n [184, 255, 0],\n [0, 133, 255],\n [255, 214, 0],\n [25, 194, 194],\n [102, 255, 0],\n [92, 0, 255],\n ])\n\n\ndef create_cityscapes_label_colormap():\n \"\"\"Creates a label colormap used in CITYSCAPES segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=np.uint8)\n colormap[0] = [128, 64, 128]\n colormap[1] = [244, 35, 232]\n colormap[2] = [70, 70, 70]\n colormap[3] = [102, 102, 156]\n colormap[4] = [190, 153, 153]\n colormap[5] = [153, 153, 153]\n colormap[6] = [250, 170, 30]\n colormap[7] = [220, 220, 0]\n colormap[8] = [107, 142, 35]\n colormap[9] = [152, 251, 152]\n colormap[10] = [70, 130, 180]\n colormap[11] = [220, 20, 60]\n colormap[12] = [255, 0, 0]\n colormap[13] = [0, 0, 142]\n colormap[14] = [0, 0, 70]\n colormap[15] = [0, 60, 100]\n colormap[16] = [0, 80, 100]\n colormap[17] = [0, 0, 230]\n colormap[18] = [119, 11, 32]\n return colormap\n\n\ndef create_mapillary_vistas_label_colormap():\n \"\"\"Creates a label colormap used in Mapillary Vistas segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n return np.asarray([\n [165, 42, 42],\n [0, 192, 0],\n [196, 196, 196],\n [190, 153, 153],\n [180, 165, 180],\n [102, 102, 156],\n [102, 102, 156],\n [128, 64, 255],\n [140, 140, 200],\n [170, 170, 170],\n [250, 170, 160],\n [96, 96, 96],\n [230, 150, 140],\n [128, 64, 128],\n [110, 110, 110],\n [244, 35, 232],\n [150, 100, 100],\n [70, 70, 70],\n [150, 120, 90],\n [220, 20, 60],\n [255, 0, 0],\n [255, 0, 0],\n [255, 0, 0],\n [200, 128, 128],\n [255, 255, 255],\n [64, 170, 64],\n [128, 64, 64],\n [70, 130, 180],\n [255, 255, 255],\n [152, 251, 152],\n [107, 142, 35],\n [0, 170, 30],\n [255, 255, 128],\n [250, 0, 30],\n [0, 0, 0],\n [220, 220, 220],\n [170, 170, 170],\n [222, 40, 40],\n [100, 170, 30],\n [40, 40, 40],\n [33, 33, 33],\n [170, 170, 170],\n [0, 0, 142],\n [170, 170, 170],\n [210, 170, 100],\n [153, 153, 153],\n [128, 128, 128],\n [0, 0, 142],\n [250, 170, 30],\n [192, 192, 192],\n [220, 220, 0],\n [180, 165, 180],\n [119, 11, 32],\n [0, 0, 142],\n [0, 60, 100],\n [0, 0, 142],\n [0, 0, 90],\n [0, 0, 230],\n [0, 80, 100],\n [128, 64, 64],\n [0, 0, 110],\n [0, 0, 70],\n [0, 0, 192],\n [32, 32, 32],\n [0, 0, 0],\n [0, 0, 0],\n ])\n\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n\n Returns:\n A colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int)\n ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int)\n\n for shift in reversed(list(range(8))):\n for channel in range(3):\n colormap[:, channel] |= bit_get(ind, channel) << shift\n ind >>= 3\n\n return colormap\n\n\ndef get_ade20k_name():\n return _ADE20K\n\n\ndef get_cityscapes_name():\n return _CITYSCAPES\n\n\ndef get_mapillary_vistas_name():\n return _MAPILLARY_VISTAS\n\n\ndef get_pascal_name():\n return _PASCAL\n\ndef get_pc1_name():\n return _PC1\n\n\ndef bit_get(val, idx):\n \"\"\"Gets the bit value.\n\n Args:\n val: Input value, int or numpy int array.\n idx: Which bit of the input val.\n\n Returns:\n The \"idx\"-th bit of input val.\n \"\"\"\n return (val >> idx) & 1\n\n\ndef create_label_colormap(dataset=_PC1):\n \"\"\"Creates a label colormap for the specified dataset.\n\n Args:\n dataset: The colormap used in the dataset.\n\n Returns:\n A numpy array of the dataset colormap.\n\n Raises:\n ValueError: If the dataset is not supported.\n \"\"\"\n if dataset == _ADE20K:\n return create_ade20k_label_colormap()\n elif dataset == _CITYSCAPES:\n return create_cityscapes_label_colormap()\n elif dataset == _MAPILLARY_VISTAS:\n return create_mapillary_vistas_label_colormap()\n elif dataset == _PASCAL:\n return create_pascal_label_colormap()\n elif dataset == _PC1:\n return create_pc1_label_colormap()\n else:\n raise ValueError('Unsupported dataset.')\n\n\ndef label_to_color_image(label, dataset=_PC1):\n \"\"\"Adds color defined by the dataset colormap to the label.\n\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n dataset: The colormap used in the dataset.\n\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the dataset color map.\n\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))\n\n if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]:\n raise ValueError(\n 'label value too large: {} >= {}.'.format(\n np.max(label), _DATASET_MAX_ENTRIES[dataset]))\n\n colormap = create_label_colormap(dataset)\n return colormap[label]\n\n\ndef get_dataset_colormap_max_entries(dataset):\n return _DATASET_MAX_ENTRIES[dataset]\n" ]
[ [ "numpy.arange", "numpy.max", "numpy.asarray", "numpy.zeros" ] ]
Jasonandy/Python-X
[ "2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe" ]
[ "cn/opencv/finger/finger.py" ]
[ "import cv2 as cv\nimport numpy as np\nimport math\nimport time\n\ncapture = cv.VideoCapture(0)\n\n# video = \"http://admin:[email protected]:8081/\" # admin是账号:admin是密码 后面是局域网\n# capture = cv.VideoCapture(video)\n\n\n# 获得欧几里距离\ndef _get_eucledian_distance(vect1, vect2):\n distant = vect1[0] - vect2[0]\n dist = np.sqrt(np.sum(np.square(distant)))\n # 或者用numpy内建方法\n # vect1 = list(vect1)\n # vect2 = list(vect2)\n # dist = np.linalg.norm(vect1 - vect2)\n return dist\n\n\ndef gesture_recognition():\n\n while True:\n ret, frame = capture.read() # 读取摄像头\n # frame = cv.flip(frame, 1)\n fgbg = cv.createBackgroundSubtractorMOG2() # 利用BackgroundSubtractorMOG2算法消除背景\n # fgmask = bgModel.apply(frame)\n fgmask = fgbg.apply(frame)\n # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n kernel = np.ones((5, 5), np.uint8)\n fgmask = cv.erode(fgmask, kernel, iterations=1) # 膨胀\n res = cv.bitwise_and(frame, frame, mask=fgmask)\n ycrcb = cv.cvtColor(res, cv.COLOR_BGR2YCrCb) # 分解为YUV图像,得到CR分量\n (_, cr, _) = cv.split(ycrcb)\n cr1 = cv.GaussianBlur(cr, (5, 5), 0) # 高斯滤波\n _, skin = cv.threshold(cr1, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) # OTSU图像二值化\n # dst = cv.GaussianBlur(frame, (3, 3), 0)\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n # cv.imshow(\"binary_image\", binary)\n # hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) # hsv 色彩空间 分割肤色\n # ycrcb = cv.cvtColor(frame, cv.COLOR_BGR2YCrCb) # Ycrcb 色彩空间 分割肤色\n # # lower_hsv = np.array([0, 15, 0])\n # # upper_hsv = np.array([17, 170, 255])\n # lower_ycrcb = np.array([0, 135, 85])\n # upper_ycrcb = np.array([255, 180, 135])\n # # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv) # hsv 掩码\n # mask = cv.inRange(ycrcb, lowerb=lower_ycrcb, upperb=upper_ycrcb) # ycrcb 掩码\n # dst = cv.GaussianBlur(mask, (11, 11), 0) # 高斯去噪\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n\n # edge_output = cv.Canny(gray, 50, 150) # 图像边缘提取\n # kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) # 获取图像结构化元素\n # # dst = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel) # 开操作\n # dst = cv.erode(skin, kernel) # 膨胀操作\n gesture_roi = skin[0:350, 380:700]\n cv.imshow(\"dst_demo\", skin)\n # cv.imshow(\"gesture_roi\", gesture_roi)\n contours, heriachy = cv.findContours(gesture_roi, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓点集(坐标)\n # if contours[0] < [30, 260]:\n # cnt = contours[0]\n # elif 270 <= contours[0] < [60, 260]:\n # cnt = contours[1]\n # else:\n # cnt = contours[2]\n # cnt = contours[0]\n # print(cnt)\n # print(contours)\n # cnt = contours[0]\n for i, contour in enumerate(contours): # 获取轮廓\n cv.drawContours(frame[0:350, 380:700], contours, i, (255, 0, 0), 1) # 绘制轮廓\n # 得到面积\n # area = cv.contourArea(contour)\n # 得到外接矩形\n # x, y, w, h = cv.boundingRect(contour)\n # 得到的几何距是字典类型的\n # mm = cv.moments(contour)\n # cx = mm['m10']/mm['m00']\n # cy = mm['m01']/mm['m00']\n # center, radius = cv.minEnclosingCircle(contour)\n # center = (int(x), int(y))\n # radius = int(radius)\n # cv.circle(frame, center, radius, (0, 255, 255), 2)\n # cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n # print(i)\n # cv.imshow(\"measure_contures\", frame)\n x, y, w, h = cv.boundingRect(contour)\n # center = (int(x), int(y))\n cv.rectangle(frame[0:350, 380:700], (x, y), (x + w, y + h), (100, 100, 0), 1)\n # approxcurve = cv.approxPolyDP(contour, 4, False)\n # if approxcurve.shape[0] < 5:\n # cv.drawContours(frame, contours, -1, (0, 255, 0), 3)\n\n hull = cv.convexHull(contour, True, returnPoints=False) # 获得凸包点 x, y坐标\n defects = cv.convexityDefects(contour, hull) # 计算轮廓的凹点\n # print(hull, defects)\n # cv.polylines(frame[0:350, 380:700], [hull], True, (0, 255, 0), 3)\n \"\"\"\n defect反馈的是Nx4的数组,\n 第一列表示的是起点(轮廓集合中点的编号)\n 第二列表示的是终点(轮廓集合中点的编号)\n 第三列表示的是最远点(轮廓集合中点的编号)\n 第四列表示的是最远点到凸轮廓的最短距离\n \"\"\"\n # cv.drawContours(frame[0:350, 380:700], hull, -1, (255, 0, 0), 5, 8) # 绘制凸包\n\n # dist = np.sqrt(np.sum(np.square(vect1 - vect2)))\n ndefects = 0\n if defects is not None: # 重要!\n\n for i in range(defects.shape[0]):\n s, e, f, d = defects[i, 0]\n # float(s)\n # float(e)\n # float(f)\n # float(d)\n start = tuple(contour[s][0]) # 起点\n end = tuple(contour[e][0]) # 终点\n far = tuple(contour[f][0]) # 最远点\n a = _get_eucledian_distance(start, end)\n b = _get_eucledian_distance(start, far)\n c = _get_eucledian_distance(end, far)\n angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n cv.line(frame[0:350, 380:700], start, end, [255, 255, 0], 2)\n cv.circle(frame[0:350, 380:700], far, 5, [0, 0, 255], -1)\n if angle <= math.pi / 5: # <30度:\n ndefects = ndefects + 1\n print(\"数字 = %f\" % ndefects)\n\n\n # cv.polylines(frame[50:350, 380:700], [hull], True, (0, 255, 0), 2)\n # retval = cv.pointPolygonTest(contour, center, True)\n # cv.drawContours(frame, defects, -1, (0, 255, 0), 3)\n # cv.imshow(\"defects\", defects)\n cv.imshow(\"video\", frame)\n c = cv.waitKey(50)\n if c == 27:\n\n break\n\n\ndef gesture_recognition_two():\n img = cv.imread(\"E:/pictureprocessing/practice/picture/practice_one.png\")\n img = cv.flip(img, 1)\n # dst = cv.GaussianBlur(frame, (3, 3), 0)\n # gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n # cv.imshow(\"binary_image\", binary)\n # hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # 通过hsv将颜色过滤出来\n # lower_hsv = np.array([100, 43, 46])\n # upper_hsv = np.array([124, 255, 255])\n # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n dst = cv.GaussianBlur(binary, (1, 1), 0) # 高斯去噪\n # cv.imshow(\"dst_demo\", dst)\n contours, heriachy = cv.findContours(dst, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓本身\n for i, contour in enumerate(contours): # 获取轮廓\n cv.drawContours(img, contours, i, (0, 255, 0), 3) # 绘制轮廓\n print(i)\n\n cv.imshow(\"img_demo\", img)\n\n\ncv.namedWindow(\"video\")\ngesture_recognition()\n# gesture_recognition_two()\n\ncv.waitKey(0)\ncapture.release()\ncv.destroyAllWindows()" ]
[ [ "numpy.ones", "numpy.square" ] ]
winnerineast/pythia
[ "b6fe288405490f6e02a3e59dbf32a181aee35645" ]
[ "pythia/utils/general.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport collections\nimport gc\nimport os\nfrom bisect import bisect\n\nimport requests\nimport torch\nimport tqdm\nimport yaml\nfrom torch import nn\n\n\ndef lr_lambda_update(i_iter, cfg):\n if (\n cfg[\"training_parameters\"][\"use_warmup\"] is True\n and i_iter <= cfg[\"training_parameters\"][\"warmup_iterations\"]\n ):\n alpha = float(i_iter) / float(cfg[\"training_parameters\"][\"warmup_iterations\"])\n return cfg[\"training_parameters\"][\"warmup_factor\"] * (1.0 - alpha) + alpha\n else:\n idx = bisect(cfg[\"training_parameters\"][\"lr_steps\"], i_iter)\n return pow(cfg[\"training_parameters\"][\"lr_ratio\"], idx)\n\n\ndef clip_gradients(model, i_iter, writer, config):\n # TODO: Fix question model retrieval\n max_grad_l2_norm = config[\"training_parameters\"][\"max_grad_l2_norm\"]\n clip_norm_mode = config[\"training_parameters\"][\"clip_norm_mode\"]\n\n if max_grad_l2_norm is not None:\n if clip_norm_mode == \"all\":\n norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)\n\n writer.add_scalars({\"grad_norm\": norm}, i_iter)\n\n elif clip_norm_mode == \"question\":\n question_embedding = model.module.question_embedding_module\n norm = nn.utils.clip_grad_norm(\n question_embedding.parameters(), max_grad_l2_norm\n )\n\n writer.add_scalars({\"question_grad_norm\": norm}, i_iter)\n else:\n raise NotImplementedError(\n \"Clip norm mode %s not implemented\" % clip_norm_mode\n )\n\n\ndef ckpt_name_from_core_args(config):\n return \"%s_%s_%s_%d\" % (\n config[\"tasks\"],\n config[\"datasets\"],\n config[\"model\"],\n config[\"training_parameters\"][\"seed\"],\n )\n\n\ndef foldername_from_config_override(args):\n cfg_override = None\n if hasattr(args, \"config_override\"):\n cfg_override = args.config_override\n elif \"config_override\" in args:\n cfg_override = args[\"config_override\"]\n\n folder_name = \"\"\n if cfg_override is not None and len(cfg_override) > 0:\n folder_name = yaml.safe_dump(cfg_override, default_flow_style=True)\n folder_name = folder_name.replace(\":\", \".\").replace(\"\\n\", \" \")\n folder_name = folder_name.replace(\"/\", \"_\")\n folder_name = \" \".join(folder_name.split())\n folder_name = folder_name.replace(\". \", \".\").replace(\" \", \"_\")\n folder_name = \"_\" + folder_name\n return folder_name\n\n\ndef get_pythia_root():\n from pythia.common.registry import registry\n\n pythia_root = registry.get(\"pythia_root\", no_warning=True)\n if pythia_root is None:\n pythia_root = os.path.dirname(os.path.abspath(__file__))\n pythia_root = os.path.abspath(os.path.join(pythia_root, \"..\"))\n registry.register(\"pythia_root\", pythia_root)\n return pythia_root\n\n\ndef download_file(url, output_dir=\".\", filename=\"\"):\n if len(filename) == 0:\n filename = os.path.join(\".\", url.split(\"/\")[-1])\n\n os.makedirs(output_dir, exist_ok=True)\n\n filename = os.path.join(output_dir, filename)\n r = requests.get(url, stream=True)\n\n file_size = int(r.headers[\"Content-Length\"])\n chunk_size = 1024 * 1024\n num_bars = int(file_size / chunk_size)\n\n with open(filename, \"wb\") as fh:\n for chunk in tqdm.tqdm(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit=\"MB\",\n desc=filename,\n leave=True,\n ):\n fh.write(chunk)\n\n\ndef get_optimizer_parameters(model, config):\n parameters = model.parameters()\n\n has_custom = hasattr(model, \"get_optimizer_parameters\")\n if has_custom:\n parameters = model.get_optimizer_parameters(config)\n\n is_parallel = isinstance(model, nn.DataParallel)\n\n if is_parallel and hasattr(model.module, \"get_optimizer_parameters\"):\n parameters = model.module.get_optimizer_parameters(config)\n\n return parameters\n\n\ndef dict_to_string(dictionary):\n logs = []\n if dictionary is None:\n return \"\"\n for key, val in dictionary.items():\n if hasattr(val, \"item\"):\n val = val.item()\n # if key.count('_') == 2:\n # key = key[key.find('_') + 1:]\n logs.append(\"%s: %.4f\" % (key, val))\n\n return \", \".join(logs)\n\n\ndef get_overlap_score(candidate, target):\n \"\"\"Takes a candidate word and a target word and returns the overlap\n score between the two.\n\n Parameters\n ----------\n candidate : str\n Candidate word whose overlap has to be detected.\n target : str\n Target word against which the overlap will be detected\n\n Returns\n -------\n float\n Overlap score betwen candidate and the target.\n\n \"\"\"\n if len(candidate) < len(target):\n temp = candidate\n candidate = target\n target = temp\n overlap = 0.0\n while len(target) >= 2:\n if target in candidate:\n overlap = len(target)\n return overlap * 1.0 / len(candidate)\n else:\n target = target[:-1]\n return 0.0\n\n\ndef updir(d, n):\n \"\"\"Given path d, go up n dirs from d and return that path\"\"\"\n ret_val = d\n for _ in range(n):\n ret_val = os.path.dirname(ret_val)\n return ret_val\n\n\ndef print_cuda_usage():\n print(\"Memory Allocated:\", torch.cuda.memory_allocated() / (1024 * 1024))\n print(\"Max Memory Allocated:\", torch.cuda.max_memory_allocated() / (1024 * 1024))\n print(\"Memory Cached:\", torch.cuda.memory_cached() / (1024 * 1024))\n print(\"Max Memory Cached:\", torch.cuda.max_memory_cached() / (1024 * 1024))\n\n\ndef get_current_tensors():\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (\n hasattr(obj, \"data\") and torch.is_tensor(obj.data)\n ):\n print(type(obj), obj.size())\n except:\n pass\n" ]
[ [ "torch.cuda.memory_cached", "torch.is_tensor", "torch.cuda.max_memory_allocated", "torch.cuda.memory_allocated", "torch.cuda.max_memory_cached" ] ]
Qianna00/mmdetection
[ "31e7dff4c61000002d27117543b85e68d2619b4c" ]
[ "mmdet/models/detectors/two_stage_with_MetaEmbedding.py" ]
[ "import torch\nimport torch.nn as nn\nimport numpy as np\n\n# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\nfrom tqdm import tqdm\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmcv import Config\nfrom mmdet.core import bbox2roi\nfrom functools import partial\nfrom torch.utils.data.dataloader import DataLoader\n\n\[email protected]_module()\nclass TwoStageDetectorMetaEmbedding(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n init_centroids=False,\n pretrained=None):\n super(TwoStageDetectorMetaEmbedding, self).__init__()\n self.backbone = build_backbone(backbone)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.init_centroids = init_centroids\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n \"\"\"if self.init_centroids:\n for p in self.parameters():\n p.requires_grad = False\"\"\"\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n self.roi_head = build_head(roi_head)\n\n if self.init_centroids:\n self.centroids = self.roi_head.loss_feat.centroids.data\n else:\n self.centroids = None\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n if roi_head[\"type\"] == \"MetaEmbedding_RoIHead\":\n # calculate init_centroids using training dataset\n if self.train_cfg is not None:\n if init_centroids:\n cfg = Config.fromfile(\n \"/mmdetection/configs/faster_rcnn_meta/faster_rcnn_r50_c4_meta_smd_stage2.py\")\n dataset = build_dataset(cfg.centroids_cal)\n # data = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=0, num_gpus=1, shuffle=False)\n # print(data[0])\n self.roi_head.loss_feat.centroids.data = self.centroids_cal(dataset)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def init_weights(self, pretrained=None):\n super(TwoStageDetectorMetaEmbedding, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n \"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n \"\"\"roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\"\"\"\n\n\n roi_losses = self.roi_head(x,\n centroids=self.centroids,\n img_metas=img_metas,\n proposal_list=proposal_list,\n gt_bboxes=gt_bboxes,\n gt_labels=gt_labels,\n gt_bboxes_ignore=gt_bboxes_ignore,\n gt_masks=gt_masks,\n test=False,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n # assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head(x,\n centroids=self.centroids,\n proposal_list=proposal_list,\n img_metas=img_metas,\n test=True)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def centroids_cal(self, data):\n\n centroids = torch.zeros(self.roi_head.num_classes,\n self.roi_head.feat_dim,\n 14,\n 14).cuda()\n\n print('Calculating centroids.')\n\n # Calculate initial centroids only on training data.\n with torch.set_grad_enabled(False):\n self.backbone.cuda()\n self.rpn_head.cuda()\n self.roi_head.cuda()\n class_data_num = [0, 0, 0, 0, 0, 0]\n # class_data_num = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i in tqdm(range(len(data))):\n \"\"\"imgs, gt_labels, gt_bboxes, img_metas = inputs[\"img\"], \\\n inputs[\"gt_labels\"], \\\n inputs[\"gt_bboxes\"],\\\n inputs[\"img_metas\"]\"\"\"\n imgs, gt_labels, gt_bboxes, img_metas = \\\n torch.unsqueeze(data[i]['img'], 0).to(next(self.backbone.parameters()).device), \\\n [data[i]['gt_labels'].to(next(self.backbone.parameters()).device)], \\\n [data[i]['gt_bboxes'].to(next(self.backbone.parameters()).device)], \\\n [data[i]['img_metas']]\n # Calculate Features of each training data\n feats = self.backbone(imgs)\n \"\"\"proposal_list = self.rpn_head.simple_test_rpn(feats, img_metas)\n num_imgs = len(img_metas)\n # if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.roi_head.std_roi_head.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.roi_head.std_roi_head.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in feats])\n sampling_results.append(sampling_result)\n\n rois = bbox2roi([res.bboxes for res in sampling_results])\"\"\"\n rois = bbox2roi(gt_bboxes)\n bbox_feats = self.roi_head.std_roi_head.bbox_roi_extractor(\n feats[:self.roi_head.std_roi_head.bbox_roi_extractor.num_inputs], rois)\n\n \"\"\"labels = self.roi_head.std_roi_head.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels, self.train_cfg.rcnn)[0]\n # Add all calculated features to center tensor\n for i in range(len(labels)):\n label = labels[i]\n if label < self.roi_head.num_classes:\n centroids[label] += bbox_feats[i]\n class_data_num[label] += 1\"\"\"\n for j in range(len(gt_labels[0])):\n label = gt_labels[0][j]\n centroids[label] += bbox_feats[j]\n class_data_num[label] += 1\n for i in range(len(class_data_num)):\n if class_data_num[i] == 0:\n class_data_num[i] = 1\n\n # Average summed features with class count\n centroids /= torch.tensor(class_data_num).float().unsqueeze(1).unsqueeze(2).\\\n unsqueeze(3).repeat(1, 1024, 14, 14).cuda()\n\n return centroids\n\n\ndef class_count(data):\n labels = np.array(data.dataset.labels)\n class_data_num = []\n for l in np.unique(labels):\n class_data_num.append(len(labels[labels == l]))\n return class_data_num" ]
[ [ "torch.unsqueeze", "torch.randn", "torch.set_grad_enabled", "torch.zeros", "torch.tensor", "torch.cuda.is_available", "numpy.array", "numpy.unique" ] ]
BUPT-GAMMA/GammaGL
[ "2b9f32e1ac3533cb75a063243e8a2fa654466d18" ]
[ "profiler/mpops/ms_gpu.py" ]
[ "# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n# @Time : 2022/04/14 08:36\n# @Author : clear\n# @FileName: ms_gpu.py\n\nimport os\nos.environ['TL_BACKEND'] = 'mindspore'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\nimport time\nimport numpy as np\nimport tensorlayerx as tlx\nfrom gammagl.mpops import *\n\nedge_index = np.load('edge_index.npy')\nnum_nodes = int(np.max(edge_index))+1\nsrc = edge_index[0,:]\ndst = edge_index[1,:]\nsrc = tlx.convert_to_tensor(src, tlx.int32)\ndst = tlx.convert_to_tensor(dst, tlx.int32)\nmsg = tlx.convert_to_tensor(np.random.randn(edge_index.shape[1], 500), dtype=tlx.float32)\n\n\nstart_t = time.time()\nfor j in range(200):\n # msg = tlx.gather(x, src)\n # unsorted_segment_sum(msg, dst, num_nodes)\n # unsorted_segment_mean(msg, dst, num_nodes)\n unsorted_segment_max(msg, dst, num_nodes)\nprint(\"{:.3f}\".format(time.time()-start_t))\n# pf.stop()\n# print(pf.output_text(unicode=True, color=True))\n\n\ndst = tlx.convert_to_numpy(dst)\nidx = np.argsort(dst)\ndst = tlx.gather(tlx.convert_to_tensor(dst, dtype=tlx.int32), tlx.convert_to_tensor(idx,dtype=tlx.int32))\n\n# pf.start()\nstart_t = time.time()\nfor j in range(200):\n # msg = tlx.gather(x, src)\n # segment_sum(msg, dst, num_nodes)\n # segment_mean(msg, dst, num_nodes)\n segment_max(msg, dst, num_nodes)\nprint(\"{:.3f}\".format(time.time()-start_t))\n# pf.stop()\n# print(pf.output_text(unicode=True, color=True))" ]
[ [ "numpy.random.randn", "numpy.load", "numpy.max", "numpy.argsort" ] ]
karimul/ebm-sampling
[ "c8c8565a21fde52ac71598f20625857c4ccb8b67" ]
[ "sampling/resgld.py" ]
[ "import torch\nimport numpy as np\nfrom autograd.numpy import sqrt\n\ndef gen_image_resgld(label, FLAGS, model, im_neg, num_steps, sample=False):\n\n im_noise = torch.randn_like(im_neg).detach()\n\n T_multiply=0.9\n T = 0.9\n var=0.1\n resgld_beta_high = im_neg\n resgld_beta_low = im_neg\n swaps = 0\n\n noise_scale = sqrt(2e-6 * FLAGS.step_lr * T)\n\n print(\"noise_scale : \", noise_scale)\n print(\"noise_scale * T_multiply: \", noise_scale* T_multiply)\n\n im_negs_samples = []\n\n for i in range(num_steps):\n im_noise.normal_()\n\n resgld_beta_low = resgld_beta_low + noise_scale * im_noise\n resgld_beta_high = resgld_beta_high + noise_scale * T_multiply * im_noise\n\n resgld_beta_high.requires_grad_(requires_grad=True)\n energy_high = model.forward(resgld_beta_high, label)\n\n resgld_beta_low.requires_grad_(requires_grad=True)\n energy_low = model.forward(resgld_beta_low, label)\n\n im_grad_low = torch.autograd.grad([energy_low.sum()], [resgld_beta_low])[0]\n im_grad_high = torch.autograd.grad([energy_high.sum()], [resgld_beta_high])[0]\n \n if i == num_steps - 1:\n im_neg_orig = resgld_beta_low\n resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low \n resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high \n\n if FLAGS.dataset in (\"cifar10\", \"celeba\", \"cats\"):\n n = 128\n elif FLAGS.dataset == \"celebahq\":\n # Save space\n n = 128\n elif FLAGS.dataset == \"lsun\":\n # Save space\n n = 32\n elif FLAGS.dataset == \"object\":\n # Save space\n n = 32\n elif FLAGS.dataset == \"mnist\":\n n = 128\n elif FLAGS.dataset == \"imagenet\":\n n = 32\n elif FLAGS.dataset == \"stl\":\n n = 32\n\n im_neg_kl = im_neg_orig[:n]\n if sample:\n pass\n else:\n energy = model.forward(im_neg_kl, label)\n im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]\n\n im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]\n im_neg_kl = torch.clamp(im_neg_kl, 0, 1)\n else:\n resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low\n resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high * T_multiply\n\n dT = 1 / T - 1 / (T * T_multiply)\n swap_rate = torch.exp(dT * (energy_low - energy_high - dT * var))\n intensity_r = 0.1\n # print(\"swap_rate\", swap_rate)\n swap_rate = swap_rate.mean().item()\n print(\"swap_rate\", swap_rate)\n random = np.random.uniform(0, 1)\n print(\"random\", random)\n if random < intensity_r * swap_rate:\n resgld_beta_high, resgld_beta_low = resgld_beta_low, resgld_beta_high\n swaps += 1\n print(\"swaps : \", swaps)\n\n im_neg = resgld_beta_low.detach()\n\n if sample:\n im_negs_samples.append(im_neg)\n\n im_neg = torch.clamp(im_neg, 0, 1)\n\n if sample:\n return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad_low.detach().cpu().numpy()).mean()\n else:\n return im_neg, im_neg_kl, np.abs(im_grad_low.detach().cpu().numpy()).mean()" ]
[ [ "numpy.random.uniform", "torch.clamp", "torch.randn_like", "torch.exp" ] ]
jkkummerfeld/lamb
[ "769adaa76394dc74746c2fd8d31afe9c3ca7895b" ]
[ "lamb/nascell.py" ]
[ "# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"rnn_cell.NASCell adapted to support transforms.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\nclass NASCell(tf.nn.rnn_cell.RNNCell):\n \"\"\"Neural Architecture Search (NAS) recurrent network cell.\n\n This implements the recurrent cell from the paper:\n\n https://arxiv.org/abs/1611.01578\n\n Barret Zoph and Quoc V. Le.\n \"Neural Architecture Search with Reinforcement Learning\" Proc. ICLR 2017.\n\n The class uses an optional projection layer.\n \"\"\"\n\n def __init__(self, num_units, num_proj=None,\n use_biases=False, reuse=None,\n initializer=None,\n input_transform=None,\n state_transform=None,\n update_transform=None):\n \"\"\"Initialize the parameters for a NAS cell.\n\n Args:\n num_units: int, The number of units in the NAS cell\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n use_biases: (optional) bool, If True then use biases within the cell. This\n is False by default.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n initializer: Initializer for the variables.\n input_transform: None, or a function of one argument that\n massages the input in some way. For example, variational\n dropout can be implemted by passing a Dropout object here.\n state_transform: Similar to input_transform, this is\n applied to the recurrent state.\n update_transform: Similar to input_transform, this is\n applied to the proposed update ('j').\n \"\"\"\n super(NASCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._num_proj = num_proj\n self._use_biases = use_biases\n self._reuse = reuse\n\n if num_proj is not None:\n self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)\n self._output_size = num_proj\n else:\n self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)\n self._output_size = num_units\n self._initializer = initializer\n self._input_transform = input_transform\n self._state_transform = state_transform\n assert update_transform is None\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def call(self, inputs, state):\n \"\"\"Run one step of NAS Cell.\n\n Args:\n inputs: input Tensor, 2D, batch x num_units.\n state: This must be a tuple of state Tensors, both `2-D`, with column\n sizes `c_state` and `m_state`.\n\n Returns:\n A tuple containing:\n - A `2-D, [batch x output_dim]`, Tensor representing the output of the\n NAS Cell after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of NAS Cell after reading `inputs`\n when the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n sigmoid = tf.sigmoid\n tanh = tf.tanh\n relu = tf.nn.relu\n\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n\n def maybe_transform(transform, x):\n if transform is None:\n return x\n else:\n return transform(x)\n\n (c_prev, m_prev) = state\n m_prev = maybe_transform(self._state_transform, m_prev)\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n inputs = maybe_transform(self._input_transform, inputs)\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n # Variables for the NAS cell. W_m is all matrices multiplying the\n # hiddenstate and W_inputs is all matrices multiplying the inputs.\n concat_w_m = tf.get_variable(\n \"recurrent_kernel\", [num_proj, 8 * self._num_units],\n initializer=self._initializer, dtype=dtype)\n concat_w_inputs = tf.get_variable(\n \"kernel\", [input_size.value, 8 * self._num_units],\n initializer=self._initializer, dtype=dtype)\n\n m_matrix = tf.matmul(m_prev, concat_w_m)\n inputs_matrix = tf.matmul(inputs, concat_w_inputs)\n\n if self._use_biases:\n b = tf.get_variable(\n \"bias\",\n shape=[8 * self._num_units],\n initializer=tf.zeros_initializer(),\n dtype=dtype)\n m_matrix = tf.nn.bias_add(m_matrix, b)\n\n # The NAS cell branches into 8 different splits for both the hiddenstate\n # and the input\n m_matrix_splits = tf.split(axis=1, num_or_size_splits=8,\n value=m_matrix)\n inputs_matrix_splits = tf.split(axis=1, num_or_size_splits=8,\n value=inputs_matrix)\n\n # First layer\n layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])\n layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])\n layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])\n layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])\n layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])\n layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])\n layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])\n layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])\n\n # Second layer\n l2_0 = tanh(layer1_0 * layer1_1)\n l2_1 = tanh(layer1_2 + layer1_3)\n l2_2 = tanh(layer1_4 * layer1_5)\n l2_3 = sigmoid(layer1_6 + layer1_7)\n\n # Inject the cell\n l2_0 = tanh(l2_0 + c_prev)\n\n # Third layer\n l3_0_pre = l2_0 * l2_1\n new_c = l3_0_pre # create new cell\n l3_0 = l3_0_pre\n l3_1 = tanh(l2_2 + l2_3)\n\n # Final layer\n new_m = tanh(l3_0 * l3_1)\n\n # Projection layer if specified\n if self._num_proj is not None:\n concat_w_proj = tf.get_variable(\n \"projection_weights\", [self._num_units, self._num_proj],\n dtype)\n new_m = tf.matmul(new_m, concat_w_proj)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_m)\n return new_m, new_state\n" ]
[ [ "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.split", "tensorflow.compat.v1.nn.rnn_cell.LSTMStateTuple", "tensorflow.compat.v1.nn.bias_add", "tensorflow.compat.v1.get_variable" ] ]
sert121/ivy
[ "286f86e487b0c83d46a3ef8d30aa96316337db32" ]
[ "ivy/functional/backends/tensorflow/array_api/linear_algebra.py" ]
[ "# global\nimport tensorflow as tf\nfrom tensorflow.python.types.core import Tensor\nfrom typing import Union, Optional, Tuple, Literal\n\n# local\nfrom ivy import inf\n\n\n# noinspection PyUnusedLocal,PyShadowingBuiltins\ndef vector_norm(x: Tensor,\n axis: Optional[Union[int, Tuple[int]]] = None, \n keepdims: bool = False,\n ord: Union[int, float, Literal[inf, - inf]] = 2)\\\n -> Tensor:\n\n if ord == -float('inf'):\n tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)\n elif ord == -1:\n tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)\n\n elif ord == 0:\n tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()\n\n else:\n tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)\n\n if tn_normalized_vector.shape == tuple():\n return tf.expand_dims(tn_normalized_vector, 0)\n return tn_normalized_vector\n" ]
[ [ "tensorflow.cast", "tensorflow.abs", "tensorflow.linalg.norm", "tensorflow.expand_dims" ] ]
ddboline/kaggle_predict_west_nile
[ "b4dbb3eed450beaf2b73d2a772e0fb3266926418" ]
[ "feature_extraction.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 16:28:06 2015\n\n@author: ddboline\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport csv\nimport gzip\n\nimport numpy as np\nimport pandas as pd\n\nfrom dateutil.parser import parse\n\nWEATHER_VARS_WITH_M_T = (u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint',\n u'WetBulb', u'Heat', u'Cool', u'Snowfall',\n u'PrecipTotal', u'StnPressure', u'SeaLevel',\n u'ResultSpeed', u'ResultDir', u'AvgSpeed', u'Water1')\n\nWEATHER_PHENOMENA = ('BCFG', 'BLDU', 'BLSN', 'BR', 'DU', 'DZ', 'FG', 'FG+',\n 'FU', 'FZDZ', 'FZFG', 'FZRA', 'GR', 'GS', 'HZ', 'MIFG',\n 'PL', 'PRFG', 'RA', 'SG', 'SN', 'SQ', 'TS', 'TSRA',\n 'TSSN', 'UP', 'VCFG', 'VCTS')\n\ndef haversine_distance(lat1, lon1, lat2, lon2):\n r_earth = 6371.\n dlat = np.abs(lat1-lat2)*np.pi/180.\n dlon = np.abs(lon1-lon2)*np.pi/180.\n lat1 *= np.pi/180.\n lat2 *= np.pi/180.\n dist = 2. * r_earth * np.arcsin(\n np.sqrt(\n np.sin(dlat/2.)**2 +\n np.cos(lat1) * np.cos(lat2) *\n np.sin(dlon/2.)**2))\n return dist\n\ndef lat_lon_box(lat, lon, dist):\n r_earth = 6371.\n d_2r = dist/(2.*r_earth)\n dlat = 2. * (d_2r)\n dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))\n dlat *= 180./np.pi\n dlon *= 180./np.pi\n return abs(dlat), abs(dlon)\n\ndef feature_extraction():\n spray_df = pd.read_csv('spray.csv.gz', compression='gzip')\n\n spray_lat_lon_list = []\n for idx, row in spray_df.iterrows():\n spray_lat_lon_list.append((row['Latitude'], row['Longitude']))\n\n weather_features = []\n cumu_labels = ('Tmax', 'Tmin', 'PrecipTotal')\n cumu_features = {}\n cumu_total = 0\n current_year = -1\n with gzip.open('weather.csv.gz', 'r') as wfile:\n wcsv = csv.reader(wfile)\n weather_labels = next(wcsv)\n for row in wcsv:\n rowdict = dict(zip(weather_labels, row))\n rowdict['Date'] = parse(rowdict['Date'])\n current_date = rowdict['Date']\n if current_date.year != current_year:\n current_year = current_date.year\n cumu_features = {k: 0 for k in cumu_labels}\n cumu_total = 0\n for k in WEATHER_VARS_WITH_M_T:\n if k in rowdict:\n rowdict[k] = rowdict[k].replace('M', 'nan')\n rowdict[k] = rowdict[k].replace('T', '0.0')\n for k in rowdict:\n if rowdict[k] == '-':\n rowdict[k] = 'nan'\n if type(rowdict[k]) == str:\n rowdict[k] = rowdict[k].strip()\n for ph in WEATHER_PHENOMENA:\n rowdict['wp%s' % ph] = '0'\n for ph in rowdict['CodeSum'].split():\n if ph in WEATHER_PHENOMENA:\n rowdict['wp%s' % ph] = '1'\n for lab in cumu_labels:\n _tmp = float(rowdict[lab])\n if not np.isnan(_tmp):\n cumu_features[lab] += _tmp\n cumu_total += 1\n for lab in ('Tmax', 'Tmin', 'PrecipTotal'):\n rowdict['%s_cumu' % lab] = cumu_features[lab] / cumu_total\n weather_features.append(rowdict)\n# print('\\n'.join(['%s: %s' % (k, rowdict[k]) for k in rowdict]))\n# exit(0)\n for ph in WEATHER_PHENOMENA:\n weather_labels.append('wp%s' % ph)\n for lab in cumu_labels:\n weather_labels.append('%s_cumu' % lab)\n\n\n for prefix in 'train', 'test':\n with gzip.open('%s.csv.gz' % prefix, 'rb') as csvfile:\n outfile = gzip.open('%s_full.csv.gz' % prefix, 'wb')\n csv_reader = csv.reader(csvfile)\n labels = next(csv_reader)\n\n out_labels = labels +\\\n ['n_spray_%d' % x for x in range(1,11)]\n for lab in weather_labels:\n if lab == 'Date':\n continue\n out_labels.append(lab)\n\n csv_writer = csv.writer(outfile)\n csv_writer.writerow(out_labels)\n\n for idx, row in enumerate(csv_reader):\n if idx % 1000 == 0:\n print('processed %d' % idx)\n# if idx > 100:\n# exit(0)\n row_dict = dict(zip(labels, row))\n\n current_date = parse(row_dict['Date'])\n cur_lat = float(row_dict['Latitude'])\n cur_lon = float(row_dict['Longitude'])\n\n for idx in range(1, 11):\n row_dict['n_spray_%d' % idx] = 0\n dlat, dlon = lat_lon_box(cur_lat, cur_lon, 1.5)\n for slat, slon in spray_lat_lon_list:\n# print(dlat, dlon, abs(slat-cur_lat), abs(slon-cur_lon))\n if abs(slat-cur_lat) > dlat or abs(slon-cur_lon) > dlon:\n continue\n sdist = haversine_distance(cur_lat, cur_lon, slat, slon)\n for idx in range(1,11):\n if sdist < idx/10.0:\n row_dict['n_spray_%d' % idx] += 1\n\n for lab in ['Tmax_cumu', 'Tmin_cumu', 'PrecipTotal_cumu']:\n row_dict[lab] = 0\n most_recent = 1000000\n most_recent_w = weather_features[0]\n for wfeat in weather_features:\n wdate = wfeat['Date']\n if current_date.year != wdate.year:\n continue\n wdur = abs((current_date - wdate).days)\n if wdur < most_recent:\n most_recent = wdur\n most_recent_w = wfeat\n for lab in weather_labels:\n if lab == 'Date':\n continue\n row_dict[lab] = most_recent_w[lab]\n row_val = [row_dict[col] for col in out_labels]\n csv_writer.writerow(row_val)\n# outfile.flush()\n# print('\\n'.join(['%s: %s' % (k, row_dict[k]) for k in row_dict]))\n# exit(0)\n return\n\nif __name__ == '__main__':\n feature_extraction()\n" ]
[ [ "pandas.read_csv", "numpy.abs", "numpy.cos", "numpy.isnan", "numpy.sin" ] ]
neulab/idiomata-bot
[ "f397e49fb9d1d59b9b74e0e528a72307637a18e9" ]
[ "lang_id.py" ]
[ "import numpy as np\nimport iso639\nfrom collections import defaultdict\n\nall_langs = ('cay', 'dan', 'deu', 'eng', 'fra', 'kwk', 'see', 'swe')\n\ncodelang = [('cay', 'Cayuga'), ('see', 'Seneca'), ('other', 'Other')]\ncode2lang_dict = {c:l for (c,l) in codelang}\nlang2code_dict = {l:c for (c,l) in codelang}\n\ndef code2lang(code):\n if code in code2lang_dict:\n return code2lang_dict[code]\n elif code in iso639.languages.terminology:\n return iso639.languages.terminology[code].inverted\n else:\n return None\n\ndef lang2code(lang):\n if lang in lang2code_dict:\n return lang2code_dict[lang]\n elif lang in iso639.languages.inverted:\n return iso639.languages.inverted[lang].terminology\n else:\n return None\n\nclass LanguageID(object):\n\n def __init__(self, langs=all_langs):\n \"\"\"\n Create a language identifier for the specified languages.\n\n Args:\n langs: The ISO-639 lexographic language codes for each language.\n Defaults to all_langs.\n \"\"\"\n self.langs = langs\n raise NotImplementedError('Need to implement in a subclass')\n\n def predict_word(word):\n \"\"\"\n Calculate the log probability of a word belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.\n\n Args:\n word: A single word string\n\n Returns:\n A numpy array with the log probability of each language\n \"\"\"\n raise NotImplementedError('Need to implement in a subclass')\n\n def predict_words(self, words):\n \"\"\"\n Calculate the log probability of words in a sentence belonging to a particular language specified in `langs`. If `langs` is not specified, it will use `all_langs`.\n\n Args:\n words: A tokenized list of word strings\n langs: A list of three-letter language codes\n\n Returns:\n A numpy array with the log probability of each word (rows) for each language or other (columns)\n \"\"\"\n ret = np.zeros( (len(words), len(self.langs)+1) )\n for i, word in enumerate(words):\n ret[i] = self.predict_word(word)\n return ret\n\n def id_words(self, words, id_type='pos'):\n ret = list(np.argmax(self.predict_words(words), axis=1))\n if id_type == 'pos': return ret\n ret = ['other' if pos == len(self.langs) else self.langs[pos] for pos in ret]\n if id_type == 'code': return ret\n ret = [code2lang(code) for code in ret]\n return ret\n\n\nclass WordCountBasedLanguageID(LanguageID):\n\n def __init__(self, langs=all_langs, other_alpha=1.0e-9, lang_alpha=1.0e-10):\n self.langs = langs\n self.other_alpha = other_alpha\n self.lang_alpha = lang_alpha\n self.counts = [self.load_counts(lang) for lang in langs]\n\n def load_counts(self, lang):\n counts = {}\n with open(f'data/word_counts/{lang}.txt', 'r') as f:\n for line in f:\n word, count = line.strip().split()\n counts[word.lower()] = int(count)\n my_sum = float(sum(counts.values()))\n counts = {word: count/my_sum for (word, count) in counts.items()}\n return counts\n\n def predict_word(self, word):\n my_counts = np.zeros(len(self.langs)+1)\n my_counts[len(self.langs)] = self.other_alpha\n for i, counts in enumerate(self.counts):\n my_counts[i] = counts.get(word.lower(), self.lang_alpha)\n return np.log(my_counts/np.sum(my_counts))\n\nclass WordClassifierLanguageID(LanguageID):\n\n def __init__(self, langs=all_langs, alpha=0.5, ns=(3,4,5), other_bias=1):\n self.langs = langs\n self.alpha = alpha\n self.other_bias = other_bias\n self.ns = ns\n self.ngram_probs = defaultdict(lambda: np.zeros(len(langs)+1) + alpha)\n for i, lang in enumerate(langs):\n with open(f'data/word_counts/{lang}.txt', 'r') as f:\n for line in f:\n word, count = line.strip().split()\n for ngram in self.get_ngrams(word):\n self.ngram_probs[ngram][i] += 1\n for k, v in self.ngram_probs.items():\n self.ngram_probs[k] = np.log(v/np.sum(v))\n\n def predict_word(self, word):\n my_counts = np.zeros(len(self.langs)+1)\n my_counts[len(self.langs)] = self.other_bias\n for ngram in self.get_ngrams(word):\n if ngram in self.ngram_probs:\n my_counts += self.ngram_probs[ngram]\n my_counts -= np.max(my_counts)\n my_counts -= np.log(np.sum(np.exp(my_counts)))\n print(my_counts)\n return my_counts\n\n def get_ngrams(self, word):\n word = word.lower()\n for n in self.ns:\n for i in range(len(word)-n+1):\n yield word[i:i+n]\n\nif __name__ == \"__main__\":\n my_lid = WordClassifierLanguageID()\n words = 'Danke , Bonjour'.split()\n print(' '.join([str(x) for x in my_lid.id_words(words, id_type='name')]))\n" ]
[ [ "numpy.sum", "numpy.max", "numpy.exp" ] ]
drat/Neural-Voice-Cloning-With-Few-Samples
[ "4febde43ccc143fc88d74d5fa0c5a117636778b4" ]
[ "Modules/Attention.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\n\nfrom Modules.MultiHeadAttention import MultiHeadAttention\n\nclass Attention(nn.Module):\n def __init__(self, dim):\n super(Attention, self).__init__()\n\n self.encoders = self._build_model(dim)\n\n def _build_model(self, dim):\n layers = []\n dim = dim\n layers.append(MultiHeadAttention(dim, dim, dim))\n\n return nn.ModuleList(layers)\n\n def forward(self, inputs):\n net_inputs = inputs\n net_inputs.contiguous()\n for enc in self.encoders:\n net_inputs = enc(net_inputs, net_inputs)\n return net_inputs\n" ]
[ [ "torch.nn.ModuleList" ] ]
galvinw/fairmotdocker
[ "032d50a4025788b97ca36b0d97b7df15ddb5986c" ]
[ "monoloco/monoloco/visuals/plot_3d_box.py" ]
[ "\nimport numpy as np\n\n\ndef correct_boxes(boxes, hwls, xyzs, yaws, path_calib):\n\n with open(path_calib, \"r\") as ff:\n file = ff.readlines()\n p2_str = file[2].split()[1:]\n p2_list = [float(xx) for xx in p2_str]\n P = np.array(p2_list).reshape(3, 4)\n boxes_new = []\n for idx in range(boxes):\n hwl = hwls[idx]\n xyz = xyzs[idx]\n yaw = yaws[idx]\n corners_2d, _ = compute_box_3d(hwl, xyz, yaw, P)\n box_new = project_8p_to_4p(corners_2d).reshape(-1).tolist()\n boxes_new.append(box_new)\n return boxes_new\n\n\ndef compute_box_3d(hwl, xyz, ry, P):\n \"\"\" Takes an object and a projection matrix (P) and projects the 3d\n bounding box into the image plane.\n Returns:\n corners_2d: (8,2) array in left image coord.\n corners_3d: (8,3) array in in rect camera coord.\n \"\"\"\n # compute rotational matrix around yaw axis\n R = roty(ry)\n\n # 3d bounding box dimensions\n l = hwl[2]\n w = hwl[1]\n h = hwl[0]\n\n # 3d bounding box corners\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]\n y_corners = [0, 0, 0, 0, -h, -h, -h, -h]\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]\n\n # rotate and translate 3d bounding box\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n # print corners_3d.shape\n corners_3d[0, :] = corners_3d[0, :] + xyz[0]\n corners_3d[1, :] = corners_3d[1, :] + xyz[1]\n corners_3d[2, :] = corners_3d[2, :] + xyz[2]\n # print 'cornsers_3d: ', corners_3d\n # only draw 3d bounding box for objs in front of the camera\n if np.any(corners_3d[2, :] < 0.1):\n corners_2d = None\n return corners_2d, np.transpose(corners_3d)\n\n # project the 3d bounding box into the image plane\n corners_2d = project_to_image(np.transpose(corners_3d), P)\n # print 'corners_2d: ', corners_2d\n return corners_2d, np.transpose(corners_3d)\n\n\ndef roty(t):\n \"\"\" Rotation about the y-axis. \"\"\"\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])\n\n\ndef project_to_image(pts_3d, P):\n \"\"\" Project 3d points to image plane.\n Usage: pts_2d = projectToImage(pts_3d, P)\n input: pts_3d: nx3 matrix\n P: 3x4 projection matrix\n output: pts_2d: nx2 matrix\n P(3x4) dot pts_3d_extended(4xn) = projected_pts_2d(3xn)\n => normalize projected_pts_2d(2xn)\n <=> pts_3d_extended(nx4) dot P'(4x3) = projected_pts_2d(nx3)\n => normalize projected_pts_2d(nx2)\n \"\"\"\n n = pts_3d.shape[0]\n pts_3d_extend = np.hstack((pts_3d, np.ones((n, 1))))\n # print(('pts_3d_extend shape: ', pts_3d_extend.shape))\n pts_2d = np.dot(pts_3d_extend, np.transpose(P)) # nx3\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n return pts_2d[:, 0:2]\n\n\ndef project_8p_to_4p(pts_2d):\n x0 = np.min(pts_2d[:, 0])\n x1 = np.max(pts_2d[:, 0])\n y0 = np.min(pts_2d[:, 1])\n y1 = np.max(pts_2d[:, 1])\n x0 = max(0, x0)\n y0 = max(0, y0)\n return np.array([x0, y0, x1, y1])\n" ]
[ [ "numpy.vstack", "numpy.ones", "numpy.transpose", "numpy.any", "numpy.cos", "numpy.max", "numpy.min", "numpy.array", "numpy.sin" ] ]
Ditskih/Project
[ "87170245e55e615b0a14966d60afe41caece0434" ]
[ "processingfcmsvd.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 21 15:38:52 2019\r\n\r\n@author: Ditskih\r\n\"\"\"\r\nimport os\r\nimport json\r\nimport re\r\nimport csv\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n#from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.random_projection import GaussianRandomProjection as GRP\r\nimport numpy as np\r\nimport sys\r\nsys.path.insert(0, 'FCMeans')\r\nfrom fcmeans import fcmeans\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom scipy.sparse import csr_matrix\r\nimport pandas as pd\r\n\r\ndef my_preprocessor(tweet):\r\n #Convert to lower case\r\n tweet = tweet.lower()\r\n #Convert www.* or https?://* to URL\r\n tweet = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','URL',tweet)\r\n #Convert @username to AT_USER\r\n tweet = re.sub('@[^\\s]+','AT_USER',tweet)\r\n #Remove additional white spaces\r\n tweet = re.sub('[\\s]+', ' ', tweet)\r\n #Replace #word with word\r\n tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\r\n #trim\r\n tweet = tweet.strip('\\'\"')\r\n return tweet\r\n\r\ndef my_tokenizer(tweet):\r\n words = word_tokenize(tweet)\r\n tokens=[]\r\n for w in words:\r\n #replace two or more with two occurrences\r\n pattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL)\r\n w = pattern.sub(r\"\\1\\1\", w)\r\n #strip punctuation\r\n w = w.strip('\\'\"?,.')\r\n #choose words with a pattern\r\n val = re.search(r\"^[a-zA-Z0-9][a-zA-Z0-9]*$\", w)\r\n #add tokens\r\n if(w in ['AT_USER','URL'] or val is None):\r\n continue\r\n else:\r\n tokens.append(w.lower())\r\n\r\n return tokens\r\n\r\n\r\nfor i in range (1):\r\n\r\n # -------\r\n # Loading\r\n # -------\r\n print (\"Loading dataset .... \")\r\n df = csv.reader(open(\"D:\\\\Private Property\\\\Data Kuliah\\\\Akademis\\\\Skripsweet\\\\program\\\\Program1\\\\Program\\\\nyoba\\\\dicobaduluajafix.csv\", encoding=\"utf8\"))\r\n data = []\r\n for column in df:\r\n data.append(column[0].strip() + ' ' + column[1].strip())\r\n\r\n # -----------\r\n # Vectorizing : Preprocessing, Tokenizing, Filtering, Weighting\r\n # -----------\r\n print (\"Vectorizing .....\")\r\n\r\n data_file = csv.reader(open('D:\\Private Property\\Data Kuliah\\Akademis\\Skripsweet\\program\\Program1\\Program\\\\nyoba\\\\stopwords_id.csv'))\r\n stopwords = []\r\n for column in data_file:\r\n stopwords.append(column[0])\r\n my_stop_words = stopwords + ['untuk','toko','nya','false','none''0', '01', '02', '0223', '03', '04', '05', '06', '07', '08', '09',\r\n '0pertandingan', '1', '10', '100', '1001', '101', '102', '1020', '103', '104', '105', '106', '108', '109',\r\n '10th', '11', '110', '112', '113', '115', '12', '120', '121', '122', '123', '125', '129', '13', '130', '131',\r\n '132', '135', '136', '137', '138', '139', '14', '140', '141', '142', '145', '148', '15', '150', '1500',\r\n '152', '153', '154', '155', '157', '16', '160', '161', '162', '165', '166', '168', '17', '170', '1700',\r\n '172', '1731', '175', '1763', '18', '180', '1800', '181', '184', '1848', '185', '187', '19', '190',\r\n '1906', '191', '1930', '1936', '1945', '1947', '1948', '1949', '1950', '1954', '1955', '1958', '196',\r\n '1961', '1962', '1964', '1965', '1967', '1968', '1972', '1973', '1974', '1984', '1985', '1987', '199',\r\n '1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1a', '1musim', '1st', '2', '20',\r\n '200', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '200cc', '201', '2010',\r\n '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022', '2025',\r\n '2041', '2045', '205', '2050', '207', '21', '210', '211', '215', '22', '221', '223', '225', '227', '229',\r\n '23', '230', '234', '235', '238', '239', '24', '240', '241', '25', '250', '250cc', '2560x1440', '258', '259',\r\n '26', '260', '263', '265', '267', '268', '27', '278', '28', '280', '282', '283', '284', '286', '29',\r\n '2pm', '3', '30', '300', '306', '308', '31', '310', '315', '32', '33', '330', '34', '345', '35', '350',\r\n '359', '36', '360', '369', '37', '370', '378', '38', '386', '387', '39', '399', '3c', '3d', '3s', '4',\r\n '40', '400', '407', '41', '410', '42', '43', '44', '45', '450', '46', '4640', '47', '4720', '48', '480',\r\n '49', '4g', '4minute', '4x2', '4x4', '5', '50', '500', '500c', '508', '50mp', '51', '52', '53', '54', '55',\r\n '550', '56', '560', '57', '58', '59', '595', '5c', '5g', '5s', '5th', '6', '60', '600', '61', '62', '623',\r\n '625', '63', '634', '64', '640', '65', '650', '656', '66', '67', '68', '69', '69053', '6a', '6x6', '7', '70',\r\n '700', '71', '72', '720', '73', '737', '74', '7442', '75', '750', '7569', '76', '77', '78', '79', '8', '80',\r\n '800', '80an', '81', '814', '816', '82', '83', '84', '85', '8500', '86', '865', '86th', '87', '88', '889',\r\n '89', '8gb', '9', '90', '900', '91', '911', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'a3', 'a320', 'a66s', 'aa']\r\n\r\n vectorizer = TfidfVectorizer(preprocessor=my_preprocessor,tokenizer=my_tokenizer,\r\n stop_words=my_stop_words,min_df=2,max_df=0.95)\r\n data = vectorizer.fit_transform(data)\r\n feature_names = vectorizer.get_feature_names()\r\n \r\n #print (feature_names)\r\n #break\r\n #print (data)\r\n\r\n # ------------------------------------------\r\n # Model to Transform Data into a Lower Space\r\n # ------------------------------------------\r\n grps = GRP(n_components = 5)\r\n new_data = grps.fit_transform(data)\r\n\r\n # Learning\r\n # --------\r\n for n_topics in range(100,110,10):\r\n print (\"Learning ....\" + str(n_topics))\r\n \r\n #membership (u) calculation in the lower space\r\n m=1.5\r\n cntr, u= fcmeans(new_data.T, n_topics, m, error=0.005, maxiter=1000)\r\n\r\n #centroid (cntr) calculation in the original space\r\n temp = csr_matrix(np.ones((data.shape[1],1)).dot(np.atleast_2d(u.sum(axis=1))).T)\r\n u = csr_matrix(u)\r\n cntr = np.asarray(u.dot(data) / temp)\r\n \r\n ''' \r\n # Find centroids for initialization\r\n svd = TruncatedSVD(n_components = n_topics)\r\n svd.fit(new_data)\r\n cntr = svd.components_\r\n #cntr[cntr<0.001]=0.0\r\n \r\n # Find centroids by FCM\r\n cntr, u = fcmeans(new_data.T, n_topics, m=1.5, error=0.005, maxiter=1000, init=cntr.T)\r\n cntr = np.asarray(cntr)\r\n ''' \r\n # Prints topics\r\n n_top_words = 10\r\n hasil = open('D:\\\\Private Property\\\\Data Kuliah\\\\Akademis\\\\Skripsweet\\\\program\\\\Program1\\\\Program\\\\nyoba\\\\topikgrp' + str(n_topics) + \".txt\", 'w')\r\n for topic_idx, topic in enumerate(cntr):\r\n print(\"Topic \" + str(topic_idx) + \" : \" + \" \".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))\r\n hasil.write(\"\"+\" \".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) + \"\\n\")\r\n hasil.close()\r\n" ]
[ [ "scipy.sparse.csr_matrix", "numpy.ones", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.random_projection.GaussianRandomProjection" ] ]
hengma1001/molecules
[ "c6694cc77ef1eb246f3fdab1f201481d1bcaa07c" ]
[ "molecules/utils/callback.py" ]
[ "import os\nimport time\nimport torch\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass Callback:\n def __init__(self): pass\n def on_train_begin(self, logs): pass\n def on_train_end(self, logs): pass\n def on_epoch_begin(self, epoch, logs): pass\n def on_epoch_end(self, epoch, logs): pass\n def on_batch_begin(self, batch, epoch, logs): pass\n def on_batch_end(self, batch, epoch, logs): pass\n\n\n# TODO: need way to share SummaryWriter among multiple callbacks for a model\n# could make writer global variable\n\nclass LossCallback(Callback):\n def on_train_begin(self, logs):\n #from torch.utils.tensorboard import SummaryWriter\n #self.writer = SummaryWriter()\n\n self.train_losses = []\n self.valid_losses = []\n\n def on_epoch_end(self, epoch, logs):\n\n # self.writer.add_scalar('epoch training loss',\n # logs['train_loss'],\n # logs['global_step'])\n # self.writer.add_scalar('epoch validation loss',\n # logs['valid_loss'],\n # logs['global_step'])\n\n self.train_losses.append(logs['train_loss'])\n self.valid_losses.append(logs['valid_loss'])\n\n def save(self, path):\n \"\"\"\n Save train and validation loss from the end of each epoch.\n\n Parameters\n ----------\n path: str\n Path to save train and validation loss history\n \"\"\"\n torch.save({'loss': self.train_losses, 'valid': self.valid_losses}, path)\n\nclass CheckpointCallback(Callback):\n def __init__(self, interval=0,\n directory=os.path.join('.', 'checkpoints')):\n \"\"\"\n Checkpoint interface for saving dictionary objects to disk\n during training. Typically used to save model state_dict\n and optimizer state_dict in order to resume training and\n record model weight history.\n\n Parameters\n ----------\n directory : str\n Directory to store checkpoint files.\n Files are named 'epoch-{e}-%Y%m%d-%H%M%S.pt'\n\n interval : int\n Checkpoints model every interval batches, default is once per epoch.\n \"\"\"\n\n if interval < 0:\n raise ValueError('Checkpoint interval must be non-negative')\n\n os.makedirs(directory, exist_ok=True)\n\n self.interval = interval\n self.directory = directory\n\n def on_batch_end(self, batch, epoch, logs):\n if self.interval and batch % self.interval == 0:\n self._save(epoch, logs)\n\n def on_epoch_end(self, epoch, logs):\n if not self.interval:\n self._save(epoch, logs)\n\n def _save(self, epoch, logs):\n \"\"\"Saves optimizer state and encoder/decoder weights.\"\"\"\n\n checkpoint = {\n 'encoder_state_dict': logs['model'].encoder.state_dict(),\n 'decoder_state_dict': logs['model'].decoder.state_dict(),\n 'optimizer_state_dict': logs['optimizer'].state_dict(),\n 'epoch': epoch\n }\n\n time_stamp = time.strftime(f'epoch-{epoch}-%Y%m%d-%H%M%S.pt')\n path = os.path.join(self.directory, time_stamp)\n torch.save(checkpoint, path)\n\n\nclass EmbeddingCallback(Callback):\n \"\"\"\n Saves embeddings of random samples.\n\n Parameters\n ----------\n data : torch.Tensor\n Dataset from which to sample for embeddings.\n\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n def on_train_begin(self, logs):\n self.embeddings = []\n self.data_index = []\n\n def on_epoch_end(self, epoch, logs):\n # TODO: may need to change the torch device\n idx = torch.randint(len(self.data), (1,))\n embedding = logs['model'].encode(self.data[idx].to(device))\n self.data_index.append(idx)\n self.embeddings.append(embedding)\n\n def save(self, path):\n \"\"\"\n Save embeddings and index of associated data point.\n\n Parameters\n ----------\n path: str\n Path to save embeddings and indices\n\n \"\"\"\n\n torch.save({'embeddings': self.embeddings, 'indices': self.data_index}, path)\n" ]
[ [ "torch.save", "torch.cuda.is_available" ] ]
bemrdo/CTF-2019
[ "424512f7c43278d72091aa737da78907c14f9fc1" ]
[ "watevrCTF-2019/challenges/web/NewPwd/train.py" ]
[ "import requests\nimport urllib.parse\nimport base64\nimport json\nimport io\nimport numpy as np\nfrom PIL import Image\nimport cv2.cv2 as cv\nfrom solve import *\n\ndef combine_and_show_alphabet():\n imgTop = np.empty((50, 0))\n imgBottom = np.empty((50, 0))\n for char in alphabet[:16]:\n imgTop = np.append(imgTop, np.min(trained_key[char], axis=0), axis=1)\n for char in alphabet[16:]:\n imgBottom = np.append(imgBottom, np.min(trained_key[char], axis=0), axis=1)\n img = np.rot90(np.append(np.rot90(imgTop), np.rot90(imgBottom), axis=1), 3)\n cv.imshow(\"alphabet\", img)\n\n\ncombine_and_show_alphabet()\n\nlastchar = 0\ncount = 0\ncheat_amount = 0\n\nwhile True:\n captcha = get_captcha()\n solution = list(captcha[2])\n captcha_no_overlay = remove_overlay(captcha)\n chars = []\n for i in range(5):\n chars.append(captcha_no_overlay[:, i * 40 : (i + 1) * 40])\n\n while len(chars) != 0:\n cv.imshow(\"character\", chars[0])\n if cheat_amount <= 0:\n key = cv.waitKey(0)\n else:\n key = ord(solution[0].lower())\n if key not in [ord(char) for char in alphabet.lower()] + [8, 13, 27, 225]:\n continue\n if key == 8: # backspace\n trained_key[lastchar].pop()\n combine_and_show_alphabet()\n elif key == 27: # escape\n for char in alphabet:\n cv.imwrite(\"training/%s.png\" % char, np.min(trained_key[char], axis=0))\n cv.destroyAllWindows()\n exit()\n elif key == 13: # enter\n for char in alphabet:\n cv.imwrite(\"training/%s.png\" % char, np.min(trained_key[char], axis=0))\n elif key == 225: # left shift\n key = ord(solution[0].lower())\n cheat_amount = 10\n if key not in [8, 13, 27, 225]:\n trained_key[chr(key).upper()].append(chars[0])\n chars.pop(0)\n solution.pop(0)\n lastchar = chr(key).upper()\n combine_and_show_alphabet()\n count += 1\n cheat_amount -= 1\n print(count)\n" ]
[ [ "numpy.rot90", "numpy.min", "numpy.empty" ] ]
ToddSmall/beanmachine
[ "85768bd1785bf6a8b3760a04f37a8fca69b4e4ca" ]
[ "src/beanmachine/ppl/inference/tests/inference_test.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nimport sys\n\nimport beanmachine.ppl as bm\nimport pytest\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.proposer.base_proposer import (\n BaseProposer,\n)\nfrom beanmachine.ppl.world import World, init_from_prior\n\n\nclass SampleModel:\n @bm.random_variable\n def foo(self):\n return dist.Normal(0.0, 1.0)\n\n @bm.random_variable\n def bar(self):\n return dist.Normal(self.foo(), 1.0)\n\n @bm.functional\n def baz(self):\n return self.bar() * 2.0\n\n\nclass SampleDoubleModel:\n @bm.random_variable\n def foo(self):\n return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())\n\n @bm.random_variable\n def bar(self):\n return dist.Normal(self.foo(), torch.tensor(1.0).double())\n\n\[email protected](\"multiprocess\", [False, True])\ndef test_inference(multiprocess):\n if multiprocess and sys.platform.startswith(\"win\"):\n pytest.skip(\n \"Windows does not support fork-based multiprocessing (which is necessary \"\n \"for running parallel inference within pytest.\"\n )\n\n model = SampleModel()\n mh = bm.SingleSiteAncestralMetropolisHastings()\n queries = [model.foo(), model.baz()]\n observations = {model.bar(): torch.tensor(0.5)}\n num_samples = 30\n num_chains = 2\n samples = mh.infer(\n queries,\n observations,\n num_samples,\n num_adaptive_samples=num_samples,\n num_chains=num_chains,\n run_in_parallel=multiprocess,\n mp_context=\"fork\",\n )\n\n assert model.foo() in samples\n assert isinstance(samples[model.foo()], torch.Tensor)\n assert samples[model.foo()].shape == (num_chains, num_samples)\n assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2\n # make sure that the RNG state for each chain is different\n assert not torch.equal(\n samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]\n )\n\n\ndef test_get_proposers():\n world = World()\n model = SampleModel()\n world.call(model.bar())\n nuts = bm.GlobalNoUTurnSampler()\n proposers = nuts.get_proposers(world, world.latent_nodes, 10)\n assert all(isinstance(proposer, BaseProposer) for proposer in proposers)\n\n\ndef test_initialize_world():\n model = SampleModel()\n nuts = bm.GlobalNoUTurnSampler()\n world = nuts._initialize_world([model.bar()], {})\n assert model.foo() in world\n assert model.bar() in world\n\n\ndef test_initialize_from_prior():\n mh = bm.SingleSiteAncestralMetropolisHastings()\n model = SampleModel()\n queries = [model.foo()]\n\n samples_from_prior = []\n for _ in range(10000):\n world = mh._initialize_world(queries, {}, init_from_prior)\n val = world.get(model.foo())\n samples_from_prior.append(val.item())\n\n assert samples_from_prior[0] != samples_from_prior[1]\n assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)\n\n\ndef test_initialization_resampling():\n mh = bm.SingleSiteAncestralMetropolisHastings()\n\n @bm.random_variable\n def foo():\n return dist.Uniform(3.0, 5.0)\n\n # verify that the method re-sample as expected\n retries = 0\n\n def init_after_three_tries(d: dist.Distribution):\n nonlocal retries\n retries += 1\n return torch.tensor(float(\"nan\")) if retries < 3 else d.sample()\n\n sampler = mh.sampler(\n [foo()], {}, num_samples=10, initialize_fn=init_after_three_tries\n )\n for world in sampler:\n assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())\n\n # an extreme case where the init value is always out of the support\n def init_to_zero(d: dist.Distribution):\n return torch.zeros_like(d.sample())\n\n with pytest.raises(ValueError, match=\"Cannot find a valid initialization\"):\n mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)\n\n\[email protected](\n \"algorithm\",\n [\n bm.GlobalNoUTurnSampler(),\n bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),\n bm.SingleSiteAncestralMetropolisHastings(),\n bm.SingleSiteNewtonianMonteCarlo(),\n bm.SingleSiteUniformMetropolisHastings(),\n ],\n)\ndef test_inference_with_double_dtype(algorithm):\n model = SampleDoubleModel()\n queries = [model.foo()]\n bar_val = torch.tensor(0.5).double()\n # make sure that the inference can run successfully\n samples = algorithm.infer(\n queries,\n {model.bar(): bar_val},\n num_samples=20,\n num_chains=1,\n )\n assert samples[model.foo()].dtype == bar_val.dtype\n" ]
[ [ "torch.distributions.Uniform", "torch.tensor", "torch.distributions.Normal" ] ]
hzyjerry/InfoGAIL
[ "89bf3bee42242f4a8a41401d17296773294e6b6a" ]
[ "wgail_info_2/preprocess.py" ]
[ "from keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.models import Model\nimport numpy as np\nimport time\nimport cv2\n\n\ndef collect_demo(path, num_patch, aux_dim, action_dim):\n\n for i in range(num_patch):\n path_patch = path + str(i) + \"/\"\n demo_name = path_patch + \"demo.txt\"\n demo_raw = open(demo_name, 'r').readlines()\n state_name = path_patch + \"states.txt\"\n state_raw = open(state_name, 'r').readlines()\n\n pa = np.zeros(6, dtype=np.float32)\n\n print(\"Loading patch %d ...\" % i)\n for j in range(0, len(demo_raw)):\n action_data = np.array(demo_raw[j].strip().split(\" \")).astype(np.float32)\n state_data = np.array(state_raw[j].strip().split(\" \")).astype(np.float32)\n\n aux = np.expand_dims([state_data[-3], state_data[-1]], axis=0).astype(np.float32)\n action = np.expand_dims(action_data[:], axis=0).astype(np.float32)\n \n img_path = path_patch + str(j) + \".jpg\"\n img = image.load_img(img_path)\n img = image.img_to_array(img)\n img = cv2.resize(img, (256, 256))\n #img = img[40:, :, :]\n\n '''\n if j < 130 and i == 1:\n img_cv2 = cv2.imread(img_path)\n img_cv2 = cv2.resize(img_cv2, (200, 150))\n img_cv2 = img_cv2[40:, :, :]\n cv2.imshow('image', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)/255.0)\n cv2.waitKey(0)\n '''\n img = np.expand_dims(img, axis=0).astype(np.uint8)\n\n\n if j == 0:\n auxs_tmp = aux\n actions_tmp = action\n imgs_tmp = img\n else:\n auxs_tmp = np.concatenate((auxs_tmp, aux), axis=0)\n actions_tmp = np.concatenate((actions_tmp, action), axis=0)\n imgs_tmp = np.concatenate((imgs_tmp, img), axis=0)\n\n if i == 0:\n auxs = auxs_tmp\n actions = actions_tmp\n imgs = imgs_tmp\n else:\n auxs = np.concatenate((auxs, auxs_tmp), axis=0)\n actions = np.concatenate((actions, actions_tmp), axis=0)\n imgs = np.concatenate((imgs, imgs_tmp), axis=0)\n\n print(\"Current total:\", imgs.shape, auxs.shape, actions.shape)\n\n print(\"Images:\", imgs.shape, \"Auxs:\", auxs.shape, \"Actions:\", actions.shape)\n\n return imgs, auxs, actions\n\n\ndef normalize(x):\n x[:, 0:4] /= 200.\n return x\n\n\ndef main():\n aux_dim = 66\n action_dim = 3\n num_patch = 240\n #demo_path = \"/home/yunzhu/Desktop/human_low_case_1/demo_\"\n demo_path = \"/home/zhiyang/Desktop/intention/reacher/rl_demo/demo_\"\n\n imgs, auxs, actions = collect_demo(demo_path, num_patch, aux_dim, action_dim)\n auxs = normalize(auxs)\n\n #np.savez_compressed(\"/home/zhiyang/Desktop/intention/reacher/rl_demo/demo.npz\",\n #imgs=imgs, auxs=auxs, actions=actions)\n print(\"Finished.\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.concatenate", "numpy.expand_dims", "numpy.zeros" ] ]
anlavandier/dask-image
[ "a858c61ac5beb7de7d7644d7e85714b5c16c2a7a" ]
[ "tests/test_dask_image/test_ndfilters/test__conv.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\nimport numpy as np\nimport scipy.ndimage\n\nimport dask.array as da\n\nimport dask_image.ndfilters\n\n\[email protected](\n \"da_func\",\n [\n (dask_image.ndfilters.convolve),\n (dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"err_type, weights, origin\",\n [\n (ValueError, np.ones((1,)), 0),\n (ValueError, np.ones((1, 0)), 0),\n (RuntimeError, np.ones((1, 1)), (0,)),\n (RuntimeError, np.ones((1, 1)), [(0,)]),\n (ValueError, np.ones((1, 1)), 1),\n (TypeError, np.ones((1, 1)), 0.0),\n (TypeError, np.ones((1, 1)), (0.0, 0.0)),\n (TypeError, np.ones((1, 1)), 1+0j),\n (TypeError, np.ones((1, 1)), (0+0j, 1+0j)),\n ]\n)\ndef test_convolutions_params(da_func,\n err_type,\n weights,\n origin):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n with pytest.raises(err_type):\n da_func(d,\n weights,\n origin=origin)\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.convolve,\n dask_image.ndfilters.correlate,\n ]\n)\ndef test_convolutions_shape_type(da_func):\n weights = np.ones((1, 1))\n\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n assert all([(type(s) is int) for s in d.shape])\n\n d2 = da_func(d, weights)\n\n assert all([(type(s) is int) for s in d2.shape])\n\n\[email protected](\n \"da_func\",\n [\n dask_image.ndfilters.convolve,\n dask_image.ndfilters.correlate,\n ]\n)\ndef test_convolutions_comprehensions(da_func):\n np.random.seed(0)\n\n a = np.random.random((3, 12, 14))\n d = da.from_array(a, chunks=(3, 6, 7))\n\n weights = np.ones((1, 1))\n\n l2s = [da_func(d[i], weights) for i in range(len(d))]\n l2c = [da_func(d[i], weights)[None] for i in range(len(d))]\n\n da.utils.assert_eq(np.stack(l2s), da.stack(l2s))\n da.utils.assert_eq(np.concatenate(l2c), da.concatenate(l2c))\n\n\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights\",\n [\n np.ones((1, 1)),\n ]\n)\ndef test_convolutions_identity(sp_func,\n da_func,\n weights):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n da.utils.assert_eq(\n d, da_func(d, weights)\n )\n\n da.utils.assert_eq(\n sp_func(a, weights),\n da_func(d, weights)\n )\n\n\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights, origin\",\n [\n (np.ones((2, 2)), 0),\n (np.ones((2, 3)), 0),\n (np.ones((2, 3)), (0, 1)),\n (np.ones((2, 3)), (0, -1)),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),\n ((np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),\n (np.ones((5, 5)), 0),\n (np.ones((7, 7)), 0),\n (np.ones((8, 8)), 0),\n (np.ones((10, 10)), 0),\n (np.ones((5, 5)), 2),\n (np.ones((5, 5)), -2),\n ]\n)\ndef test_convolutions_compare(sp_func,\n da_func,\n weights,\n origin):\n a = np.arange(140.0).reshape(10, 14)\n d = da.from_array(a, chunks=(5, 7))\n\n da.utils.assert_eq(\n sp_func(\n a, weights, origin=origin\n ),\n da_func(\n d, weights, origin=origin\n )\n )\[email protected](\n \"sp_func, da_func\",\n [\n (scipy.ndimage.filters.convolve, dask_image.ndfilters.convolve),\n (scipy.ndimage.filters.correlate, dask_image.ndfilters.correlate),\n ]\n)\[email protected](\n \"weights\",\n [\n np.ones((1,5)),\n np.ones((5,1)),\n ]\n)\[email protected](\n \"mode\",\n [\"reflect\",\"wrap\",\"nearest\",\"constant\",\"mirror\"])\ndef test_convolutions_modes(sp_func,\n da_func,\n weights,\n mode):\n a = np.arange(140).reshape(10,14)\n d = da.from_array(a,chunks =(5, 7))\n \n da.utils.assert_eq(\n sp_func(\n a, weights, mode = mode\n ),\n da_func(\n d, weights, mode = mode\n )\n )" ]
[ [ "numpy.ones", "numpy.random.seed", "numpy.random.random", "numpy.arange", "numpy.stack", "numpy.concatenate" ] ]
ViniViniAntunes/Prevendo_Valor_de_Aluguel_em_SP
[ "e37d54da0b2c8ce3c6ddb4ec45191b069834427c" ]
[ "Previsao_valor_aluguel/app.py" ]
[ "# Importando as bibliotecas necessárias\nimport pandas as pd\nimport streamlit as st\nimport plotly.express as px\nfrom sklearn.ensemble import RandomForestRegressor\n\n# Criando uma função para carregar o dataset\n#@st.cache # Notação para ficar em cache\ndef get_data():\n return pd.read_csv(\"model/data_deploy.csv\")\n\n# Criando uma função para treinar o nosso modelo\ndef train_model():\n data = get_data()\n X = data.drop([\"valor\", \"bairro\"], axis=1)\n y = data[\"valor\"]\n rf_regressor = RandomForestRegressor(n_estimators=100)\n rf_regressor.fit(X, y)\n return rf_regressor\n\ndef get_villages_and_id():\n data = get_data()\n names_and_id = dict(zip(data['bairro'], data['bairro_id']))\n return names_and_id\n\ndef return_id_village(village):\n return get_villages_and_id()[village]\n\n# Armazenando o dataframe na variável 'data'\ndata = get_data().drop(\"bairro_id\", axis=1)\n\n# Treinando o modelo\nmodel = train_model()\n\n# Configurando o título do Data App\nst.title(\"Data App - Prevendo Valores de Imóveis\")\n\n# Configurando o subtítulo do data app\nst.markdown(\"Este é um Data App utilizado para exibir a solução de Machine Learning que prevê valores de aluguel de apartamentos na capital de São Paulo.\")\nst.markdown('Criado por: Vini Antunes')\nst.markdown('LinkedIn: https://www.linkedin.com/in/vini-antunes')\n\n# Verificando o dataset\nst.subheader(\"Selecionando apenas um pequeno conjunto de atributos\")\n\n# Selecionando atributos para serem exibidos por padrão\ndefault_cols = [\"quartos\",\"bairro\",\"valor\"]\n\n# Defindo atributos a partir do multiselect\ncols = st.multiselect(\"Atributos\", data.columns.tolist(), default=default_cols)\n\n# Exibindo os top 10 registros do DataFrame\nst.dataframe(data[cols].head(10))\n\n# Configurando outro subtítulo\nst.subheader(\"Distribuição de imóveis por preço do aluguel\")\n\n# Definindo a faixa de valores\nfaixa_valores = st.slider(\"Faixa de preço\", float(data['valor'].min()), float(data['valor'].max()), (1000.0, 2000.0))\n\n# Filtrando os dados\nfiltered_data = data[data['valor'].between(left=faixa_valores[0], right=faixa_valores[1])]\n\n# Plotando a distribuição dos dados\nf = px.histogram(filtered_data, x=\"valor\", nbins=20, title=\"Distribuição de Preços do Aluguel\")\nf.update_xaxes(title=\"valor\")\nf.update_yaxes(title=\"Total Imóveis\")\nst.plotly_chart(f)\n\n# Configurando subtítulo da lateral\nst.sidebar.subheader(\"Defina os atributos do imóvel para predição\")\n\n####### Mapeando dados #######\n# Armazena os nomes dos bairros e seus respectivos ids\nvillages = get_villages_and_id().keys()\n\n# Selecionando o bairro\nvillage = st.sidebar.selectbox(\"Em qual bairro?\", sorted(list(villages)))\n\n# Trocando o nome do bairro' pelo seus respectivo id\nid_village = return_id_village(village)\n\n# Selecionando a área do apartamento\narea = st.sidebar.number_input(\"Área (em m²)?\", min_value=float(data['area'].min()), max_value=float(data['area'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de quartos\nrooms = st.sidebar.number_input(\"Quantos quartos?\", min_value=float(data['quartos'].min()), max_value=float(data['quartos'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de suites\nsuites = st.sidebar.number_input(\"Quantas suítes?\", min_value=float(data['suites'].min()), max_value=float(data['suites'].max()), step=1.0, format=\"%.0f\")\n\n# Selecionando a quantidade de vagas de garagem\nparking_spaces = st.sidebar.number_input(\"Quantas vagas de garagem?\", min_value=float(data['vagas'].min()), max_value=float(data['vagas'].max()), step=1.0, format=\"%.0f\")\n\n# inserindo um botão na tela\nbtn_predict = st.sidebar.button(\"Realizar Predição\")\n\n# verifica se o botão foi acionado\nif btn_predict:\n result = model.predict([[area, rooms, suites, parking_spaces, id_village]])\n st.sidebar.subheader(\"O valor previsto para do aluguel é:\")\n st.sidebar.subheader(\"\")\n result = f\"R$ {str(round(result[0], 2))}\"\n st.sidebar.subheader(result)" ]
[ [ "pandas.read_csv", "sklearn.ensemble.RandomForestRegressor" ] ]
LucasLorenc/tensorflow
[ "10a7b61cdf55d13c85c2a3cc5ca669e3d9ea8e11" ]
[ "tensorflow/python/keras/layers/core.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Core Keras layers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport sys\nimport types as python_types\nimport warnings\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import standard_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Masking')\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n Example:\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you\n lack data for these timesteps. You can:\n\n - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n samples, timesteps, features = 32, 10, 8\n inputs = np.random.random([samples, timesteps, features]).astype(np.float32)\n inputs[:, 3, :] = 0.\n inputs[:, 5, :] = 0.\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Masking(mask_value=0.,\n input_shape=(timesteps, features)))\n model.add(tf.keras.layers.LSTM(32))\n\n output = model(inputs)\n # The time step 3 and 5 will be skipped from LSTM calculation.\n ```\n\n See [the masking and padding\n guide](https://www.tensorflow.org/guide/keras/masking_and_padding)\n for more details.\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n self._compute_output_and_mask_jointly = True\n\n def compute_mask(self, inputs, mask=None):\n return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(\n math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)\n outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)\n # Compute the mask and outputs simultaneously.\n outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Dropout')\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n \"\"\"\n\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = rate\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, inputs):\n # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,\n # which will override `self.noise_shape`, and allows for custom noise\n # shapes with dynamically sized inputs.\n if self.noise_shape is None:\n return None\n\n concrete_inputs_shape = array_ops.shape(inputs)\n noise_shape = []\n for i, value in enumerate(self.noise_shape):\n noise_shape.append(concrete_inputs_shape[i] if value is None else value)\n return ops.convert_to_tensor(noise_shape)\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n\n def dropped_inputs():\n return nn.dropout(\n inputs,\n noise_shape=self._get_noise_shape(inputs),\n seed=self.seed,\n rate=self.rate)\n\n output = tf_utils.smart_cond(training,\n dropped_inputs,\n lambda: array_ops.identity(inputs))\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'rate': self.rate,\n 'noise_shape': self.noise_shape,\n 'seed': self.seed\n }\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.SpatialDropout1D')\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\n@keras_export('keras.layers.SpatialDropout2D')\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 4D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('data_format must be in '\n '{\"channels_last\", \"channels_first\"}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], 1, 1)\n elif self.data_format == 'channels_last':\n return (input_shape[0], 1, 1, input_shape[3])\n\n\n@keras_export('keras.layers.SpatialDropout3D')\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n Arguments:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Call arguments:\n inputs: A 5D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n Output shape:\n Same as input.\n\n References:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('data_format must be in '\n '{\"channels_last\", \"channels_first\"}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], 1, 1, 1)\n elif self.data_format == 'channels_last':\n return (input_shape[0], 1, 1, 1, input_shape[4])\n\n\n@keras_export('keras.layers.Activation')\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n Arguments:\n activation: Activation function, such as `tf.nn.relu`, or string name of\n built-in activation function, such as \"relu\".\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Reshape')\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n Arguments:\n target_shape: Target shape. Tuple of integers,\n does not include the samples dimension (batch size).\n\n Input shape:\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n `(batch_size,) + target_shape`\n\n Example:\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, None, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Find and replace a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n Arguments:\n input_shape: Shape of array being reshaped\n output_shape: Desired shape of the array with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n Returns:\n The new output shape with a -1 replaced with its computed value.\n\n Raises:\n ValueError: If the total array size of the output_shape is\n different than the input_shape, or more than one unknown dimension\n is specified.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n return output_shape\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if None in input_shape[1:]:\n output_shape = [input_shape[0]]\n # input shape (partially) unknown? replace -1's with None's\n output_shape += tuple(s if s != -1 else None for s in self.target_shape)\n else:\n output_shape = [input_shape[0]]\n output_shape += self._fix_unknown_dimension(input_shape[1:],\n self.target_shape)\n return tensor_shape.TensorShape(output_shape)\n\n def call(self, inputs):\n return array_ops.reshape(inputs,\n (array_ops.shape(inputs)[0],) + self.target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Permute')\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n Arguments:\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n if sorted(dims) != list(range(1, len(dims) + 1)):\n raise ValueError(\n 'Invalid permutation `dims` for Permute Layer: %s. '\n 'The set of indices in `dims` must be consecutive and start from 1.' %\n (dims,))\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tensor_shape.TensorShape(output_shape)\n\n def call(self, inputs):\n return array_ops.transpose(inputs, perm=(0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Flatten')\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n If inputs are shaped `(batch,)` without a channel dimension, then flattening\n adds an extra channel dimension and output shapes are `(batch, 1)`.\n\n Arguments:\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Convolution2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, data_format=None, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.input_spec = InputSpec(min_ndim=1)\n\n def call(self, inputs):\n if (self.data_format == 'channels_first'\n and K.ndim(inputs) is not None and K.ndim(inputs) > 1):\n permutation = [0]\n permutation.extend([i for i in\n range(2, K.ndim(inputs))])\n permutation.append(1)\n inputs = array_ops.transpose(inputs, perm=permutation)\n\n input_shape = inputs.shape\n if input_shape[1:].is_fully_defined():\n flattened_dim = tensor_shape.dimension_value(\n np.prod(input_shape[1:], dtype=int))\n # Temporary fix for integer overflow issue.\n if flattened_dim > np.iinfo(np.int32).max:\n shape_dtype = dtypes.int64\n else:\n shape_dtype = dtypes.int32\n outputs = array_ops.reshape(\n inputs, constant_op.constant((-1, flattened_dim), dtype=shape_dtype))\n else:\n batch_size = tensor_shape.dimension_value(inputs.shape[0])\n if batch_size:\n # Temporary fix for integer overflow issue.\n if batch_size > np.iinfo(np.int32).max:\n shape_dtype = dtypes.int64\n else:\n shape_dtype = dtypes.int32\n outputs = array_ops.reshape(\n inputs, constant_op.constant((batch_size, -1), dtype=shape_dtype))\n else:\n outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))\n if not context.executing_eagerly():\n outputs.set_shape(self.compute_output_shape(inputs.shape))\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.as_shape(input_shape).as_list()\n if not input_shape:\n output_shape = tensor_shape.TensorShape([1])\n else:\n output_shape = [input_shape[0]]\n if all(input_shape[1:]):\n output_shape += [np.prod(input_shape[1:], dtype=int)]\n else:\n output_shape += [None]\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = {'data_format': self.data_format}\n base_config = super(Flatten, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.RepeatVector')\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n Arguments:\n n: Integer, repetition factor.\n\n Input shape:\n 2D tensor of shape `(num_samples, features)`.\n\n Output shape:\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Lambda')\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expressions as a `Layer` object.\n\n The `Lambda` layer exists so that arbitrary TensorFlow functions\n can be used when constructing `Sequential` and Functional API\n models. `Lambda` layers are best suited for simple operations or\n quick experimentation. For more advanced usecases, follow \n [this guide](https://www.tensorflow.org/alpha/guide/keras/custom_layers_and_models) \n for subclassing `tf.keras.layers.Layer`. \n \n The main reason to subclass `tf.keras.layers.Layer` instead of using a \n `Lambda` layer is saving and inspecting a Model. `Lambda` layers \n are saved by serializing the Python bytecode, whereas subclassed \n Layers can be saved via overriding their `get_config` method. Overriding \n `get_config` improves the portability of Models. Models that rely on \n subclassed Layers are also often easier to visualize and reason about.\n\n Examples:\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n model.add(Lambda(antirectifier))\n ```\n\n Variables can be created within a `Lambda` layer. Like with\n other layers, these variables will be created only once and reused\n if the `Lambda` layer is called on new inputs. If creating more\n than one variable in a given `Lambda` instance, be sure to use\n a different name for each variable. Note that calling sublayers\n from within a `Lambda` is not supported.\n\n Example of variable creation:\n\n ```python\n def linear_transform(x):\n v1 = tf.Variable(1., name='multiplier')\n v2 = tf.Variable(0., name='bias')\n return x*v1 + v2\n\n linear_layer = Lambda(linear_transform)\n model.add(linear_layer)\n model.add(keras.layers.Dense(10, activation='relu'))\n model.add(linear_layer) # Reuses existing Variables\n ```\n\n Note that creating two instances of `Lambda` using the same function\n will *not* share Variables between the two instances. Each instance of\n `Lambda` will create and manage its own weights.\n\n Arguments:\n function: The function to be evaluated. Takes input tensor as first\n argument.\n output_shape: Expected output shape from function. This argument can be\n inferred if not explicitly provided. Can be a tuple or function. If a\n tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input: `output_shape =\n (input_shape[0], ) + output_shape` or, the input is `None` and\n the sample dimension is also `None`: `output_shape = (None, ) +\n output_shape` If a function, it specifies the entire shape as a function\n of the\n input shape: `output_shape = f(input_shape)`\n mask: Either None (indicating no masking) or a callable with the same\n signature as the `compute_mask` layer method, or a tensor that will be\n returned as output mask regardless what the input is.\n arguments: Optional dictionary of keyword arguments to be passed to the\n function.\n Input shape: Arbitrary. Use the keyword argument input_shape (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n Output shape: Specified by `output_shape` argument\n \"\"\"\n\n def __init__(self, function, output_shape=None, mask=None, arguments=None,\n **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n self._supports_ragged_inputs = True\n self._output_shape = output_shape\n self._variable_dict = {}\n # These attributes are inherited from `Layer`.\n self._trainable_weights = []\n self._non_trainable_weights = []\n\n function_args = tf_inspect.getfullargspec(self.function).args\n self._fn_expects_training_arg = 'training' in function_args\n self._fn_expects_mask_arg = 'mask' in function_args\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # Make use of existing autocomputation but provide Lambda-specific\n # error message. This is always safe to run even when the outer context\n # is Graph mode because Lambda layers don't have side effects such as\n # `add_loss`.\n with context.eager_mode():\n try:\n return super(Lambda, self).compute_output_shape(input_shape)\n except NotImplementedError:\n raise NotImplementedError(\n 'We could not automatically infer the shape of the Lambda\\'s '\n 'output. Please specify `output_shape` for this Lambda.')\n\n if callable(self._output_shape):\n output_shapes = self._output_shape(input_shape)\n return tf_utils.convert_shapes(output_shapes, to_tuples=False)\n\n # Output shapes are passed directly and don't include batch dimension.\n input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None\n\n def _add_batch(shape):\n return tensor_shape.TensorShape([batch_size] + shape.as_list())\n\n output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)\n return nest.map_structure(_add_batch, output_shapes)\n\n def call(self, inputs, mask=None, training=None):\n arguments = self.arguments\n if self._fn_expects_mask_arg:\n arguments['mask'] = mask\n if self._fn_expects_training_arg:\n arguments['training'] = training\n with variable_scope.variable_creator_scope(self._variable_creator):\n return self.function(inputs, **arguments)\n\n def _variable_creator(self, next_creator, **kwargs):\n name = kwargs['name']\n if name in self._variable_dict:\n return self._variable_dict[name]\n var = next_creator(**kwargs)\n self._variable_dict[name] = var\n if var.trainable:\n self._trainable_weights.append(var)\n else:\n self._non_trainable_weights.append(var)\n K.track_variable(var)\n return var\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n function_config = self._serialize_function_to_config(self.function)\n output_shape_config = self._serialize_function_to_config(self._output_shape,\n allow_raw=True)\n config = {\n 'function': function_config[0],\n 'function_type': function_config[1],\n 'module': function_config[2],\n 'output_shape': output_shape_config[0],\n 'output_shape_type': output_shape_config[1],\n 'output_shape_module': output_shape_config[2],\n }\n if self.mask is not None:\n mask_config = self._serialize_function_to_config(self.mask)\n config.update({\n 'mask': mask_config[0],\n 'mask_type': mask_config[1],\n 'mask_module': mask_config[2]\n })\n config['arguments'] = self.arguments\n\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _serialize_function_to_config(self, inputs, allow_raw=False):\n if isinstance(inputs, python_types.LambdaType):\n output = generic_utils.func_dump(inputs)\n output_type = 'lambda'\n module = inputs.__module__\n elif callable(inputs):\n output = inputs.__name__\n output_type = 'function'\n module = inputs.__module__\n elif allow_raw:\n output = inputs\n output_type = 'raw'\n module = None\n else:\n raise ValueError(\n 'Invalid input for serialization, type: %s ' % type(inputs))\n\n return output, output_type, module\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy()\n function = cls._parse_function_from_config(\n config, custom_objects, 'function', 'module', 'function_type')\n\n output_shape = cls._parse_function_from_config(\n config, custom_objects, 'output_shape', 'output_shape_module',\n 'output_shape_type')\n if 'mask' in config:\n mask = cls._parse_function_from_config(\n config, custom_objects, 'mask', 'mask_module', 'mask_type')\n else:\n mask = None\n\n config['function'] = function\n config['output_shape'] = output_shape\n config['mask'] = mask\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n return cls(**config)\n\n @classmethod\n def _parse_function_from_config(\n cls, config, custom_objects, func_attr_name, module_attr_name,\n func_type_attr_name):\n globs = globals()\n module = config.pop(module_attr_name, None)\n if module in sys.modules:\n globs.update(sys.modules[module].__dict__)\n elif module is not None:\n # Note: we don't know the name of the function if it's a lambda.\n warnings.warn('{} is not loaded, but a Lambda layer uses it. '\n 'It may cause errors.'.format(module)\n , UserWarning)\n if custom_objects:\n globs.update(custom_objects)\n function_type = config.pop(func_type_attr_name)\n if function_type == 'function':\n # Simple lookup in custom objects\n function = generic_utils.deserialize_keras_object(\n config[func_attr_name],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = generic_utils.func_load(\n config[func_attr_name], globs=globs)\n elif function_type == 'raw':\n function = config[func_attr_name]\n else:\n raise TypeError('Unknown function type:', function_type)\n return function\n\n\n@keras_export('keras.layers.Dense')\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: If the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n Example:\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n\n super(Dense, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer), **kwargs)\n\n self.units = int(units) if not isinstance(units, int) else units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.supports_masking = True\n self.input_spec = InputSpec(min_ndim=2)\n\n def build(self, input_shape):\n dtype = dtypes.as_dtype(self.dtype or K.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError('Unable to build `Dense` layer with non-floating point '\n 'dtype %s' % (dtype,))\n input_shape = tensor_shape.TensorShape(input_shape)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError('The last dimension of the inputs to `Dense` '\n 'should be defined. Found `None`.')\n last_dim = tensor_shape.dimension_value(input_shape[-1])\n self.input_spec = InputSpec(min_ndim=2,\n axes={-1: last_dim})\n self.kernel = self.add_weight(\n 'kernel',\n shape=[last_dim, self.units],\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n 'bias',\n shape=[self.units,],\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n rank = len(inputs.shape)\n if rank > 2:\n # Broadcasting is required for the inputs.\n outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])\n # Reshape the output back to the original ndim of the input.\n if not context.executing_eagerly():\n shape = inputs.shape.as_list()\n output_shape = shape[:-1] + [self.units]\n outputs.set_shape(output_shape)\n else:\n inputs = math_ops.cast(inputs, self._compute_dtype)\n if K.is_sparse(inputs):\n outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)\n else:\n outputs = gen_math_ops.mat_mul(inputs, self.kernel)\n if self.use_bias:\n outputs = nn.bias_add(outputs, self.bias)\n if self.activation is not None:\n return self.activation(outputs) # pylint: disable=not-callable\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_shape = input_shape.with_rank_at_least(2)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError(\n 'The innermost dimension of input_shape must be defined, but saw: %s'\n % input_shape)\n return input_shape[:-1].concatenate(self.units)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ActivityRegularization')\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n Arguments:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(\n activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'l1': self.l1, 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.DropConnectDense')\nclass DropConnectDense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: If the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n Example:\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n kernel_dropout: Float between 0 and 1.\n Fraction of the weight units to drop.\n unit_dropout: Float between 0 and 1.\n Fraction of the inputs to drop.\n use_mc_dropout: Bool when True layer always acts like in \"train mode\"\n so dropout can be applied also in inference mode\n\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n def __init__(self,\n units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n kernel_dropout=0.,\n unit_dropout=0.,\n use_mc_dropout=False,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n\n super(DropConnectDense, self).__init__(\n activity_regularizer=regularizers.get(activity_regularizer), **kwargs)\n\n self.units = int(units) if not isinstance(units, int) else units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.kernel_dropout = min(1., max(0., kernel_dropout))\n self.unit_dropout = min(1., max(0., unit_dropout))\n self.use_mc_dropout = use_mc_dropout\n\n self.supports_masking = True\n self.input_spec = InputSpec(min_ndim=2)\n\n def build(self, input_shape):\n dtype = dtypes.as_dtype(self.dtype or K.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError('Unable to build `Dense` layer with non-floating point '\n 'dtype %s' % (dtype,))\n input_shape = tensor_shape.TensorShape(input_shape)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError('The last dimension of the inputs to `Dense` '\n 'should be defined. Found `None`.')\n last_dim = tensor_shape.dimension_value(input_shape[-1])\n self.input_spec = InputSpec(min_ndim=2,\n axes={-1: last_dim})\n self.kernel = self.add_weight(\n 'kernel',\n shape=[last_dim, self.units],\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n 'bias',\n shape=[self.units,],\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, training=None):\n if training is None:\n training = K.learning_phase()\n if self.use_mc_dropout:\n training = True\n\n #units dropout\n def drop_inputs():\n return K.dropout(inputs, self.unit_dropout)\n if 0. < self.unit_dropout < 1.:\n inputs = K.in_train_phase(drop_inputs, inputs, training=training)\n\n #kernel dropout\n ones = array_ops.ones_like(self.kernel)\n def dropped_weight_connections():\n return K.dropout(ones, self.kernel_dropout) * (1 - self.kernel_dropout)\n if 0. < self.kernel_dropout < 1.:\n kern_dp_mask = K.in_train_phase(dropped_weight_connections, ones, training=training)\n else:\n kern_dp_mask = ones\n\n rank = len(inputs.shape)\n if rank > 2:\n # Broadcasting is required for the inputs.\n outputs = standard_ops.tensordot(inputs, self.kernel * kern_dp_mask, [[rank - 1], [0]])\n # Reshape the output back to the original ndim of the input.\n if not context.executing_eagerly():\n shape = inputs.shape.as_list()\n output_shape = shape[:-1] + [self.units]\n outputs.set_shape(output_shape)\n else:\n inputs = math_ops.cast(inputs, self._compute_dtype)\n if K.is_sparse(inputs):\n outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel * kern_dp_mask)\n else:\n outputs = gen_math_ops.mat_mul(inputs, self.kernel * kern_dp_mask)\n if self.use_bias:\n outputs = nn.bias_add(outputs, self.bias)\n if self.activation is not None:\n return self.activation(outputs) # pylint: disable=not-callable\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_shape = input_shape.with_rank_at_least(2)\n if tensor_shape.dimension_value(input_shape[-1]) is None:\n raise ValueError(\n 'The innermost dimension of input_shape must be defined, but saw: %s'\n % input_shape)\n return input_shape[:-1].concatenate(self.units)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'kernel_dropout': self.kernel_dropout,\n 'unit_dropout': self.unit_dropout,\n 'use_mc_dropout': self.use_mc_dropout\n }\n base_config = super(DropConnectDense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))" ]
[ [ "tensorflow.python.keras.backend.track_variable", "tensorflow.python.util.nest.flatten", "tensorflow.python.keras.constraints.get", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.regularizers.L1L2", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.keras.constraints.serialize", "tensorflow.python.keras.backend.learning_phase", "tensorflow.python.keras.initializers.serialize", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.keras.backend.ndim", "tensorflow.python.keras.activations.get", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.keras.regularizers.serialize", "tensorflow.python.ops.nn.bias_add", "tensorflow.python.keras.utils.tf_utils.convert_shapes", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.keras.utils.generic_utils.func_load", "tensorflow.python.keras.backend.in_train_phase", "tensorflow.python.ops.gen_math_ops.mat_mul", "tensorflow.python.keras.regularizers.get", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul", "tensorflow.python.keras.utils.conv_utils.normalize_data_format", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.standard_ops.tensordot", "tensorflow.python.keras.utils.generic_utils.func_dump", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.activations.serialize", "numpy.prod", "tensorflow.python.keras.backend.floatx", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.keras.utils.generic_utils.deserialize_keras_object", "tensorflow.python.keras.backend.dropout", "tensorflow.python.ops.math_ops.not_equal", "tensorflow.python.keras.backend.repeat", "tensorflow.python.ops.variable_scope.variable_creator_scope", "tensorflow.python.ops.array_ops.ones_like", "numpy.iinfo", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.keras.engine.input_spec.InputSpec", "numpy.array", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.util.nest.map_structure", "tensorflow.python.keras.backend.is_sparse", "tensorflow.python.keras.initializers.get", "tensorflow.python.ops.array_ops.transpose" ] ]
QuantumQuadrate/Rearrangement
[ "5f8d64bd18a471a488747ed8d17b00304b4ab293" ]
[ "PythonRearrangement/setup.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 13:22:01 2018\n\n@author: Cody\n\"\"\"\n\nfrom setuptools import setup\nfrom setuptools import Extension\nfrom Cython.Distutils import build_ext\nimport numpy as np\n\n\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules = [Extension(\"Rearranger\", sources= [\"pyRearranger.pyx\",\"../CPPrearrangement/Rearrangement.cpp\"],language='c++',include_dirs=[np.get_include()])])\n" ]
[ [ "numpy.get_include" ] ]
Lechatelia/Welding_Joints
[ "7cb5b8ac4c961c4080e1590934c24130bfde3a26" ]
[ "ceshi.py" ]
[ "import cv2\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport random\r\n\r\ny = tf.constant([1,2,3,4,5,6], name='y',dtype=tf.float32)\r\ny_ = tf.constant([0,1,2,3,4,5], name='Y_',dtype=tf.float32)\r\ny = tf.reshape(y,[2,3])\r\ny_ = tf.reshape(y_,[2,3])\r\nz= tf.constant([1,2], name='z',dtype=tf.float32)\r\nz=tf.reshape(z,[2,-1])\r\n\r\n\r\nresult=[]\r\nresult.append(tf.subtract(y,y_))\r\nresult.append(tf.multiply(y,y_))\r\nresult.append(tf.multiply(y,z))\r\nwith tf.Session() as sess:\r\n result=sess.run(result)\r\n for i in result:\r\n\r\n print(i)\r\n # result=sess.run(multiply)\r\n # print(result)\r\n# y = tf.constant(0.5, shape=[7],name='y',dtype=tf.float32)\r\n# y_ = tf.constant([0.6, 0.3,0.4,0.6,0.6,0.5,0.8], name='Y_',dtype=tf.float32)\r\n# y_ = tf.constant([[9, 8], [7, 6], [10, 11]], name='x')\r\n# b = tf.constant(1, name='b')\r\n\r\n# a = tf.Variable(tf.zeros([3,3]))\r\n# result=tf.zeros(y.get_shape().as_list()[0])\r\n\r\n# result = tf.where(tf.greater(tf.abs((y-y_),\"abs\"),tf.constant(0.15,shape=y.get_shape(),dtype=tf.float32)),tf.constant(0,shape=y.get_shape(),dtype=tf.float32),tf.constant(1,shape=y.get_shape(),dtype=tf.float32))\r\n# y=23\r\n# y_=24\r\n# # result = tf.where(tf.greater(y,y_),tf.abs(y-y_)*10,tf.abs(y-y_))\r\n# result = tf.where(tf.greater(y,y_),y,y_)\r\n# z = tf.where(tf.greater(y,y_),y_,y)\r\n# z1=tf.to_int32(z)\r\n# z2=tf.to_int32(result)\r\n# #\r\n#\r\n# # result_mean=tf.reduce_mean(result)\r\n# # Create a session to compute\r\n# with tf.Session() as sess:\r\n# result=sess.run(result)\r\n# z=sess.run(z)\r\n# print(result)\r\n# # print(sess.run(result_mean))\r\n# print(z)\r\n\r\n# img = cv2.imread(\"test.jpg\")\r\n#\r\n# # img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# img = np.float32(img)/255\r\n# cv2.imshow(\"Image\",img)\r\n# mask = np.zeros((50,50,1),dtype=np.float32)\r\n# for i in range(20):\r\n# for j in range(20):\r\n# mask[i][j]=-0.5\r\n# mask[i+30][j+30]=0.5\r\n#\r\n# mask = cv2.resize(mask,(658,832))\r\n#\r\n# mask=cv2.cvtColor(mask,cv2.COLOR_GRAY2RGB)\r\n# cv2.imshow(\"a\",mask)\r\n# cv2.addWeighted(img,0.5,mask,0.5,0,mask)\r\n# cv2.imshow('hunh',mask)\r\n# cv2.waitKey(0)\r\n\r\n\r\n# cv2.destroyAllWindows()\r\n# for i in range(10):\r\n# print(random.randint(0, 1))\r\n#\r\n# a=[[[i*j*k for i in range(0,3)]for j in range(0,3)] for k in range(0,3)]\r\n# # b=[[j*i for i in range(0,3)]for j in range(0,3)]\r\n# print(a)\r\n# # print(b)\r\n# a=np.array(a)\r\n# # b=np.array(b)\r\n# print((list(a.shape)))\r\n# # print(a+b);\r\n# for n in a:\r\n# print(n)\r\n# np.random.shuffle(a)\r\n#\r\n# print(len(a))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n# print(random.randint(0, 2))\r\n\r\n\r\n# c=[i for i in range(7)]\r\n# print(c[-2:])\r\n\r\nr1 = np.array([1.2, 2, 3, 4],dtype=np.float32)\r\nr2 = np.array([1.1, 1.8, 3.3, 4.4],dtype=np.float32)\r\ncha = r1 - r2\r\nprint(cha)\r\nerror = np.mean(np.abs(cha), axis=0)\r\nprint(error)" ]
[ [ "tensorflow.reshape", "tensorflow.subtract", "tensorflow.multiply", "numpy.abs", "tensorflow.Session", "numpy.array", "tensorflow.constant" ] ]
WeiyuCheng/FIA-KDD-19
[ "18f29f8babbf1c505973a8a62ac48c6ca34ccd8a" ]
[ "src/scripts/RQ1.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport numpy as np\nimport argparse\nimport os\nfrom scipy.stats import pearsonr\nimport sys\n\nsys.path.append(\"..\")\nfrom scripts.load_movielens import load_movielens\nfrom scripts.load_yelp import load_yelp\nimport influence.experiments as experiments\nfrom influence.matrix_factorization import MF\nfrom influence.NCF import NCF\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--avextol', type=float, default=1e-3,\n help='threshold for optimization in influence function')\n parser.add_argument('--damping', type=float, default=1e-6,\n help='damping term in influence function')\n parser.add_argument('--weight_decay', type=float, default=1e-3,\n help='l2 regularization term for training MF or NCF model')\n parser.add_argument('--lr', type=float, default=1e-3,\n help='initial learning rate for training MF or NCF model')\n parser.add_argument('--embed_size', type=int, default=16,\n help='embedding size')\n parser.add_argument('--maxinf', type=int, default=1,\n help='remove type of train indices')\n parser.add_argument('--dataset', type=str, default='movielens',\n help='name of dataset: movielens or yelp')\n parser.add_argument('--model', type=str, default='NCF',\n help='model type: MF or NCF')\n parser.add_argument('--num_test', type=int, default=5,\n help='number of test points of retraining')\n parser.add_argument('--num_steps_train', type=int, default=180000,\n help='training steps')\n parser.add_argument('--num_steps_retrain', type=int, default=27000,\n help='retraining steps')\n parser.add_argument('--reset_adam', type=int, default=0)\n parser.add_argument('--load_checkpoint', type=int, default=1)\n parser.add_argument('--retrain_times', type=int, default=4)\n parser.add_argument('--sort_test_case', type=int, default=0)\n return parser.parse_args()\n\n\nargs = parse_args()\nif args.dataset == 'movielens':\n data_sets = load_movielens('../../data')\n batch_size = 3020\nelif args.dataset == 'yelp':\n data_sets = load_yelp('../../data')\n batch_size = 3009\nelse:\n raise NotImplementedError\nweight_decay = args.weight_decay\ninitial_learning_rate = args.lr\nnum_users = int(np.max(data_sets.train._x[:, 0])+1)\nnum_items = int(np.max(data_sets.train._x[:, 1])+1)\nprint(\"number of users: %d\" % num_users)\nprint(\"number of items: %d\" % num_items)\nprint(\"number of training examples: %d\" % data_sets.train._x.shape[0])\nprint(\"number of testing examples: %d\" % data_sets.test._x.shape[0])\navextol = args.avextol\ndamping = args.damping\nprint(\"Using avextol of %.0e\" % avextol)\nprint(\"Using damping of %.0e\" % damping)\nprint(\"Using embedding size of %d\" % args.embed_size)\nif args.model == 'MF':\n Model = MF\nelif args.model == 'NCF':\n Model = NCF\nelse:\n raise NotImplementedError\n\nmodel = Model(\n num_users=num_users,\n num_items=num_items,\n embedding_size=args.embed_size,\n weight_decay=weight_decay,\n num_classes=1,\n batch_size=batch_size,\n data_sets=data_sets,\n initial_learning_rate=initial_learning_rate,\n damping=damping,\n decay_epochs=[10000, 20000],\n mini_batch=True,\n train_dir='output',\n log_dir='log',\n avextol=avextol,\n model_name='%s_%s_explicit_damping%.0e_avextol%.0e_embed%d_maxinf%d_wd%.0e' % (\n args.dataset, args.model, damping, avextol, args.embed_size, args.maxinf, weight_decay))\nprint(f'Model name is: {model.model_name}')\n\nnum_steps = args.num_steps_train\niter_to_load = num_steps - 1\nif os.path.isfile(\"%s-%s.index\" % (model.checkpoint_file, iter_to_load)):\n print('Checkpoint found, loading...')\n model.load_checkpoint(iter_to_load=iter_to_load)\nelse:\n print('Checkpoint not found, start training...')\n model.train(\n num_steps=num_steps)\n model.saver.save(model.sess, model.checkpoint_file, global_step=num_steps - 1)\n\nif args.maxinf:\n remove_type = 'maxinf'\nelse:\n remove_type = 'random'\n\ntest_size = data_sets.test.num_examples\nnum_test = args.num_test\ntest_indices = np.random.choice(test_size, num_test, replace=False)\nif args.sort_test_case:\n num_related_ratings = []\n for i in range(test_size):\n num_related_ratings += [model.get_train_indices_of_test_case([i]).shape[0]]\n test_indices = np.argsort(np.array(num_related_ratings))[:num_test]\n\nactual_y_diff = np.zeros(num_test)\npredicted_y_diff = np.zeros(num_test)\nremoved_indices = np.zeros(num_test)\n\nfor i, test_idx in enumerate(test_indices):\n print(f'test point====={i}=====')\n actual_y_diffs, predicted_y_diffs, indices_to_remove = experiments.test_retraining(\n model,\n test_idx=test_idx,\n iter_to_load=iter_to_load,\n retrain_times=args.retrain_times,\n num_to_remove=1,\n num_steps=args.num_steps_retrain,\n remove_type=remove_type,\n force_refresh=True,\n reset_adam=args.reset_adam,\n load_checkpoint=args.load_checkpoint)\n actual_y_diff[i] = actual_y_diffs[0]\n predicted_y_diff[i] = predicted_y_diffs[0]\n removed_indices[i] = indices_to_remove[0]\n\nnp.savez(\n 'output/RQ1-%s-%s.npz' % (args.model, args.dataset),\n actual_loss_diffs=actual_y_diff,\n predicted_loss_diffs=predicted_y_diff,\n indices_to_remove=removed_indices\n)\nprint('Correlation is %s' % pearsonr(actual_y_diff, predicted_y_diff)[0])\n" ]
[ [ "scipy.stats.pearsonr", "numpy.zeros", "numpy.savez", "numpy.random.choice", "numpy.max", "numpy.array" ] ]
eyyupdirek/Rawpythn
[ "e66f0adcb8be514f349796d8ecad0a398412409d" ]
[ "rawpython.py" ]
[ "import pandas as pd\nimport numpy as np\nts = pd.Series(np.random.randn(1000),index=pd.date_range('1/1/2000', periods=1000))\n\nts\n" ]
[ [ "numpy.random.randn", "pandas.date_range" ] ]