repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
robertjankowski/reproducing-dl-papers | [
"01ad85eac333b87358b3d2e2276292333cacf0e0"
] | [
"homophily_structural_balance/plotting/plot_positive_edge_density.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef extract_name(word: str):\n return word.split('=')[-1]\n\n\ndef extract_info(filename: str):\n filename_splitted = filename.split('_')\n assert len(filename_splitted) == 7\n p = float(extract_name(filename_splitted[1]))\n iterations = int(extract_name(filename_splitted[2]))\n size = int(extract_name(filename_splitted[3]))\n G = int(extract_name(filename_splitted[4]))\n return p, iterations, size, G\n\n\ndef load_metrics(filename: str) -> list:\n with open(filename, 'r') as f:\n return [float(line.strip()) for line in f]\n\n\ndef plot_metrics(filename: str, metrics: list, output_path: str = None):\n p, iterations, size, G = extract_info(filename)\n x = np.linspace(0, iterations, len(metrics))\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.figure(figsize=(8, 5))\n plt.grid(True, alpha=0.3)\n plt.plot(x, metrics, label=f'p = {p}, N = {size}, G = {G}')\n plt.ylabel(r'$\\rho$', fontsize=14)\n plt.xlabel('$t$', fontsize=14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.legend(fontsize=13)\n if output_path is not None:\n plt.savefig(output_path, bbox_inches='tight')\n else:\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Plot positive edge density (rho)')\n parser.add_argument('--metrics-file', type=str, required=True, help='Path to calculated positive edge density')\n parser.add_argument('--output-figure', type=str, required=False, default=None, help='Where to save output figure')\n args = parser.parse_args()\n metrics = load_metrics(args.metrics_file)\n plot_metrics(args.metrics_file, metrics, args.output_figure)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elephantscale/facies | [
"ea78a4917ebb5dbbe478b9fc27200c67b6e5576f"
] | [
"code/faciesplot.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n#Key:\n# 1=sandstone 2=c_siltstone 3=f_siltstone \n# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite\n# 8=packstone 9=bafflestone\n\n\nfacies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',\n 'WS', 'D','PS', 'BS']\n\nfacies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']\n\n#facies_color_map is a dictionary that maps facies labels\n#to their respective colors\n\n\nfacies_color_map = {}\nfor ind, label in enumerate(facies_labels):\n facies_color_map[label] = facies_colors[ind]\n\n \n \ndef label_facies(row, labels):\n return labels[ row['Facies'] -1]\n\ndef make_facies_log_plot(logs, facies_colors):\n #make sure logs are sorted by depth\n logs = logs.sort_values(by='Depth')\n cmap_facies = colors.ListedColormap(\n facies_colors[0:len(facies_colors)], 'indexed')\n \n ztop=logs.Depth.min(); zbot=logs.Depth.max()\n \n cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)\n \n f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))\n ax[0].plot(logs.GR, logs.Depth, '-g')\n ax[1].plot(logs.ILD_log10, logs.Depth, '-')\n ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')\n ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')\n ax[4].plot(logs.PE, logs.Depth, '-', color='black')\n im=ax[5].imshow(cluster, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n \n divider = make_axes_locatable(ax[5])\n cax = divider.append_axes(\"right\", size=\"20%\", pad=0.05)\n cbar=plt.colorbar(im, cax=cax)\n cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', \n 'SiSh', ' MS ', ' WS ', ' D ', \n ' PS ', ' BS ']))\n cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')\n \n for i in range(len(ax)-1):\n ax[i].set_ylim(ztop,zbot)\n ax[i].invert_yaxis()\n ax[i].grid()\n ax[i].locator_params(axis='x', nbins=3)\n \n ax[0].set_xlabel(\"GR\")\n ax[0].set_xlim(logs.GR.min(),logs.GR.max())\n ax[1].set_xlabel(\"ILD_log10\")\n ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())\n ax[2].set_xlabel(\"DeltaPHI\")\n ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())\n ax[3].set_xlabel(\"PHIND\")\n ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())\n ax[4].set_xlabel(\"PE\")\n ax[4].set_xlim(logs.PE.min(),logs.PE.max())\n ax[5].set_xlabel('Facies')\n \n ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])\n ax[4].set_yticklabels([]); ax[5].set_yticklabels([])\n ax[5].set_xticklabels([])\n f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)\n\n\ndef compare_facies_plot(logs, compadre, facies_colors):\n \"\"\"plot the facies plot as a function of depth for both the prediction\n and the actual lithofacies labels.\n \"\"\"\n \n #make sure logs are sorted by depth\n logs = logs.sort_values(by='Depth')\n cmap_facies = colors.ListedColormap(\n facies_colors[0:len(facies_colors)], 'indexed')\n \n ztop=logs.Depth.min(); zbot=logs.Depth.max()\n \n cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)\n cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)\n \n f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))\n ax[0].plot(logs.GR, logs.Depth, '-g')\n ax[1].plot(logs.ILD_log10, logs.Depth, '-')\n ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')\n ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')\n ax[4].plot(logs.PE, logs.Depth, '-', color='black')\n im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',\n cmap=cmap_facies,vmin=1,vmax=9)\n \n divider = make_axes_locatable(ax[6])\n cax = divider.append_axes(\"right\", size=\"20%\", pad=0.05)\n cbar=plt.colorbar(im2, cax=cax)\n cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', \n 'SiSh', ' MS ', ' WS ', ' D ', \n ' PS ', ' BS ']))\n cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')\n \n for i in range(len(ax)-2):\n ax[i].set_ylim(ztop,zbot)\n ax[i].invert_yaxis()\n ax[i].grid()\n ax[i].locator_params(axis='x', nbins=3)\n \n ax[0].set_xlabel(\"GR\")\n ax[0].set_xlim(logs.GR.min(),logs.GR.max())\n ax[1].set_xlabel(\"ILD_log10\")\n ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())\n ax[2].set_xlabel(\"DeltaPHI\")\n ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())\n ax[3].set_xlabel(\"PHIND\")\n ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())\n ax[4].set_xlabel(\"PE\")\n ax[4].set_xlim(logs.PE.min(),logs.PE.max())\n ax[5].set_xlabel('Facies')\n ax[6].set_xlabel(compadre)\n \n ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])\n ax[4].set_yticklabels([]); ax[5].set_yticklabels([])\n ax[5].set_xticklabels([])\n ax[6].set_xticklabels([])\n f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)\n"
] | [
[
"matplotlib.pyplot.colorbar",
"numpy.expand_dims",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siyemuxu888/imagepy | [
"a933526483a15da282bacac54608d44d2173beb4",
"a933526483a15da282bacac54608d44d2173beb4",
"a933526483a15da282bacac54608d44d2173beb4"
] | [
"imagepy/tools/Transform/scale_tol.py",
"imagepy/menus/Plugins/Surf/surf_plg.py",
"imagepy/menus/Process/Binary/binary_plgs.py"
] | [
"import wx\nimport numpy as np\nfrom imagepy.core.engine import Tool, Filter\nimport scipy.ndimage as nimg\n\nclass ScaleTool(Tool):\n def __init__(self, plg):\n self.plg = plg\n self.para = plg.para\n self.moving = False\n \n def snap(self, x, y, lim):\n plg = self.plg\n if abs(x-plg.lt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'l'\n if abs(x-plg.rt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'r'\n if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.tp)<lim:return 't'\n if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.bm)<lim:return 'b'\n if abs(x-plg.lt)<lim and abs(y-plg.tp)<lim:return 'lt'\n if abs(x-plg.rt)<lim and abs(y-plg.bm)<lim:return 'rb'\n if abs(x-plg.rt)<lim and abs(y-plg.tp)<lim:return 'rt'\n if abs(x-plg.lt)<lim and abs(y-plg.bm)<lim:return 'lb'\n if (x-plg.lt)*(x-plg.rt)<0 and (y-plg.tp)*(y-plg.bm)<0:\n self.ox, self.oy = x, y\n return True\n return False\n \n def mouse_down(self, ips, x, y, btn, **key): \n lim = 5.0/key['canvas'].get_scale() \n self.moving = self.snap(x, y, lim)\n print(self.moving)\n \n def mouse_up(self, ips, x, y, btn, **key):\n if self.moving : self.plg.preview(ips, self.para)\n \n def mouse_move(self, ips, x, y, btn, **key):\n lim = 5.0/key['canvas'].get_scale()\n if btn==None:\n self.cursor = wx.CURSOR_CROSS\n if isinstance(self.snap(x, y, lim), str):\n self.cursor = wx.CURSOR_HAND\n elif self.moving==True:\n self.plg.lt+=x-self.ox\n self.plg.rt+=x-self.ox\n self.plg.bm+=y-self.oy\n self.plg.tp+=y-self.oy\n self.ox, self.oy = x, y\n self.plg.count()\n self.plg.dialog.reset()\n ips.update = True\n elif self.moving != False:\n print(\"scale_tol.ScaleTool.mouse_move\")\n if 'l' in self.moving:self.plg.lt = x\n if 'r' in self.moving:self.plg.rt = x\n if 't' in self.moving:self.plg.tp = y\n if 'b' in self.moving:self.plg.bm = y\n self.plg.count()\n self.plg.dialog.reset()\n ips.update = True\n\nclass Plugin(Filter):\n modal = False\n title = 'Scale'\n note = ['all', 'auto_msk', 'auto_snap', 'preview']\n para = {'kx': 1, 'ky':1, 'ox':0, 'oy':0, 'img':True, 'msk':False}\n view = [(float, (-100,100), 3, 'KX', 'kx', ''),\n (float, (-100,100), 3, 'KY', 'ky', ''),\n (int, (-10000,10000), 0, 'OffX', 'ox', 'pix'),\n (int, (-10000,10000), 0, 'OffY', 'oy', 'pix'),\n (bool, 'scale image', 'img'),\n (bool, 'scale mask', 'msk')]\n\n \n def draw(self, dc, f, **key):\n body = [(self.lt,self.bm),(self.rt,self.bm),\n (self.rt,self.tp),(self.lt,self.tp),(self.lt,self.bm)]\n dc.SetPen(wx.Pen((0,255,0), width=1, style=wx.SOLID))\n dc.DrawLines([f(*i) for i in body])\n for i in body:dc.DrawCircle(f(*i),2)\n dc.DrawCircle(f(self.lt, (self.tp+self.bm)/2),2)\n dc.DrawCircle(f(self.rt, (self.tp+self.bm)/2),2)\n dc.DrawCircle(f((self.lt+self.rt)/2, self.tp),2)\n dc.DrawCircle(f((self.lt+self.rt)/2, self.bm),2)\n \n def load(self, ips): \n self.bufroi = ips.roi\n self.lt, self.tp, self.rt, self.bm = 0, 0, ips.size[1], ips.size[0]\n \n if ips.roi!=None:\n box = ips.roi.get_box()\n if box[0]!=box[2] and box[1]!=box[3]:\n self.lt, self.tp, self.rt, self.bm = box\n\n self.orio = ((self.lt+self.rt)/2,(self.tp+self.bm)/2)\n self.oriw, self.orih = self.rt - self.lt, self.tp - self.bm\n\n self.para['ox'] = (self.lt+self.rt)/2\n self.para['oy'] = (self.tp+self.bm)/2\n self.para['kx'] = self.para['ky'] = 1\n \n ips.mark = self\n ips.update = True\n ips.tool = ScaleTool(self)\n return True\n \n def count(self, dir=True):\n if dir:\n self.para['ox'] = int((self.lt+self.rt)/2)\n self.para['oy'] = int((self.tp+self.bm)/2)\n self.para['kx'] = (self.rt-self.lt)*1.0/self.oriw\n self.para['ky'] = (self.tp-self.bm)*1.0/self.orih\n else:\n self.lt = self.para['ox']-self.oriw*self.para['kx']/2\n self.rt = self.para['ox']+self.oriw*self.para['kx']/2\n self.bm = self.para['oy']-self.orih*self.para['ky']/2\n self.tp = self.para['oy']+self.orih*self.para['ky']/2\n\n def ok(self, ips, para=None):\n Filter.ok(self, ips, para)\n ips.mark = None\n ips.tool = None\n \n def cancel(self, ips):\n Filter.cancel(self, ips)\n ips.roi = self.bufroi\n ips.mark = None\n ips.tool = None\n ips.update = 'pix'\n \n def run(self, ips, img, buf, para = None):\n if para == None: para = self.para\n self.count(False)\n trans = np.array([[1/self.para['ky'],0],[0,1/self.para['kx']]])\n o = np.array([self.para['oy'], self.para['ox']])\n offset = self.orio[::-1]-trans.dot(o)\n if self.para['img']:\n nimg.affine_transform(img, trans, output=buf, offset=offset)\n trans = np.array([[self.para['kx'],0],[0, self.para['ky']]])\n offset = o[::-1]-trans.dot(self.orio)\n if self.para['msk'] and self.bufroi!=None:ips.roi = self.bufroi.affine(trans, offset)\n if self.para['img'] and not ips.get_msk('out') is None: \n buf[ips.get_msk('out')] = img[ips.get_msk('out')]\n ips.update = True\n",
"import cv2, wx\nfrom imagepy.core.engine import Filter, Simple, Tool\nfrom imagepy.core.manager import WindowsManager\nfrom .matcher import Matcher\nimport numpy as np\nfrom imagepy import IPy\n\nCVSURF = cv2.xfeatures2d.SURF_create if cv2.__version__[0] ==\"3\" else cv2.SURF\n\nclass FeatMark:\n def __init__(self, feats):\n self.feats = feats\n\n def draw(self, dc, f, **key):\n for i in self.feats:\n dc.DrawCircle(f(i.pt), 3)\n\nclass Surf(Filter):\n title = 'Surf Detect'\n note = ['all', 'not-slice']\n\n para = {'upright':False, 'oct':3, 'int':4, 'thr':1000, 'ext':False}\n view = [(int, (0,5), 0, 'octaves', 'oct', ''),\n (int, (0,5), 0, 'intervals', 'int',''),\n (int, (500,2000), 0, 'threshold', 'thr','1-100'),\n (bool, 'extended', 'ext'),\n (bool, 'upright', 'upright')]\n\n def run(self, ips, snap, img, para):\n detector = CVSURF(hessianThreshold=para['thr'], nOctaves=para['oct'],\n nOctaveLayers=para['int'], upright=para['upright'],extended=para['ext'])\n kps = detector.detect(img)\n ips.surf_keypoint = kps\n ips.mark = FeatMark(kps)\n IPy.write(\"Detect completed, {} points found!\".format(len(kps)), 'Surf')\n\nclass Pick(Tool):\n title = 'Key Point Pick Tool'\n def __init__(self, pts1, pts2, pair, msk, ips1, ips2, host, style):\n self.pts1, self.pts2 = pts1, pts2\n self.ips1, self.ips2 = ips1, ips2\n self.pair, self.msk = pair, msk\n self.cur, self.host = -1, host\n self.pts = self.pts1 if host else self.pts2\n self.style = style\n\n def nearest(self, x, y):\n mind, mini = 1000, -1\n for i1, i2 in self.pair:\n i = i1 if self.host else i2\n d = np.sqrt((x-self.pts[i].pt[0])**2+(y-self.pts[i].pt[1])**2)\n if d<mind: mind, mini = d, (i1, i2)\n return mini if mind<5 else None\n\n def mouse_down(self, ips, x, y, btn, **key):\n cur = self.nearest(x, y)\n if cur==None:return\n self.ips1.tool.cur, self.ips2.tool.cur = cur\n self.ips1.update, self.ips2.update = True, True\n\n def mouse_up(self, ips, x, y, btn, **key):\n pass\n\n def mouse_move(self, ips, x, y, btn, **key):\n pass\n\n def mouse_wheel(self, ips, x, y, d, **key):\n pass\n\n def draw(self, dc, f, **key):\n #dc.SetPen(wx.TRANSPARENT_PEN)\n dc.SetBrush(wx.Brush((0,0,255)))\n if self.style:\n for i in self.pts:dc.DrawCircle(f(*i.pt), 3)\n tidx = self.pair[:,1-self.host][self.msk]\n dc.SetBrush(wx.Brush((255,255,0)))\n for i in tidx:\n dc.DrawCircle(f(*self.pts[i].pt), 3)\n if self.cur!=-1:\n dc.SetBrush(wx.Brush((255,0,0)))\n dc.DrawCircle(f(*self.pts[self.cur].pt), 3)\n\nclass Match(Simple):\n title = 'Surf Matcher'\n note = ['all']\n\n #parameter\n para = {'img1':'','img2':'','upright':False, 'log':False,\n 'oct':3, 'int':4, 'thr':1000, 'ext':False,\n 'trans':'None', 'std':1, 'style':'Blue/Yellow'}\n\n def load(self, ips):\n titles = WindowsManager.get_titles()\n self.para['img1'] = titles[0]\n self.para['img2'] = titles[0]\n Match.view = [('lab','========= two image in 8-bit ========='),\n (list, titles, str, 'image1', 'img1', ''),\n (list, titles, str, 'image2', 'img2', ''),\n ('lab',''),\n ('lab','====== parameter about the surf ======'),\n (int, (0,5), 0, 'octaves', 'oct', ''),\n (int, (0,5), 0, 'intervals', 'int',''),\n (int, (500,2000), 0, 'threshold', 'thr','1-100'),\n (bool, 'extended', 'ext'),\n (bool, 'upright', 'upright'),\n ('lab',''),\n ('lab','====== how to match and display ======'),\n (list, ['None', 'Affine', 'Homo'], str, 'transform', 'trans',''),\n (int, (1, 5), 0, 'Std', 'std', 'torlerance'),\n (list, ['Blue/Yellow', 'Hide'], str, 'Aspect', 'style', 'color'),\n (bool, 'Show log', 'log')]\n return True\n\n def filter_matches(self, kp1, kp2, matches, ratio = 0.75):\n mkp1, mkp2 = [], []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n m = m[0]\n mkp1.append( kp1[m.queryIdx] )\n mkp2.append( kp2[m.trainIdx] )\n p1 = np.float32([kp.pt for kp in mkp1])\n p2 = np.float32([kp.pt for kp in mkp2])\n kp_pairs = list(zip(mkp1, mkp2))\n return p1, p2, kp_pairs\n\n #process\n def run(self, ips, imgs, para = None):\n ips1 = WindowsManager.get(para['img1']).ips\n ips2 = WindowsManager.get(para['img2']).ips\n\n detector = CVSURF(hessianThreshold=para['thr'], nOctaves=para['oct'],\n nOctaveLayers=para['int'], upright=para['upright'],extended=para['ext'])\n kps1, feats1 = detector.detectAndCompute(ips1.img, None)\n kps2, feats2 = detector.detectAndCompute(ips2.img, None)\n dim, std = {'None':0, 'Affine':6, 'Homo':8}[para['trans']], para['std']/100.0\n style = para['style']=='Blue/Yellow'\n idx, msk, m = Matcher(dim, std).filter(kps1,feats1,kps2,feats2)\n picker1 = Pick(kps1, kps2, idx, msk, ips1, ips2, True, style)\n picker2 = Pick(kps1, kps2, idx, msk, ips1, ips2, False, style)\n ips1.tool, ips1.mark = picker1, picker1\n ips2.tool, ips2.mark = picker2, picker2\n if para['log']:self.log(kps1, kps2, msk, m, dim)\n ips1.update, ips2.update = True, True\n\n def log(self, pts1, pts2, msk, v, dim):\n sb = []\n sb.append('Image1:{} points detected!'.format(len(pts1)))\n sb.append('Image2:{} points detected!\\r\\n'.format(len(pts2)))\n sb.append('Matched Point:{0}/{1}\\r\\n'.format(msk.sum(),len(msk)))\n if dim == 0: return\n sb.append('Transformation:')\n sb.append('%15.4f%15.4f%15.4f'%tuple(v.A1[:3]))\n sb.append('%15.4f%15.4f%15.4f'%tuple(v.A1[3:6]))\n row = [0,0,1] if dim==6 else list(v[-2:])+[1]\n sb.append('%15.4f%15.4f%15.4f'%tuple(row))\n \n cont = '\\n'.join(sb)\n IPy.write(cont, 'Surf')\n\nplgs = [Surf, Match]\n\nif __name__ == '__main__':\n from .matcher import Matcher\n\n detector = CVSURF(1000, nOctaves=3, nOctaveLayers=4, upright=False,extended=False)\n #img1 = cv2.imread('/home/yxl/opencv-2.4/samples/c/box.png', 0)\n img1 = cv2.imread('/home/auss/Pictures/faces1.png',0)\n pts, des = detector.detectAndCompute(img1, None)\n\n matcher = cv2.BFMatcher(cv2.NORM_L2)\n raw_matches = matcher.knnMatch(des, trainDescriptors = des, k = 1)\n m = raw_matches[0][0]\n lt = [(i[0].distance, i[0].queryIdx, i[0].trainIdx) for i in raw_matches]\n lt = np.array(sorted(lt))\n\n matcher = Matcher(8, 3)\n idx, msk, m = matcher.filter(pts,des,pts,des)",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 18 22:51:57 2016\n\n@author: yxl\n\"\"\"\n\n# -*- coding: utf-8 -*\nimport scipy.ndimage as ndimg\nimport numpy as np\nfrom imagepy.core.engine import Filter\nfrom skimage.morphology import convex_hull_object\n\nclass Closing(Filter):\n \"\"\"Closing: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Binary Closeing'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n para = {'w':3, 'h':3}\n view = [(int, (1,15), 0, 'width', 'w', 'pix'),\n (int, (1,15), 0, 'height', 'h', 'pix')]\n\n def run(self, ips, snap, img, para = None):\n strc = np.ones((para['h'], para['w']), dtype=np.uint8)\n ndimg.binary_closing(snap, strc, output=img)\n img *= 255\n \nclass Opening(Filter):\n \"\"\"Opening: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Binary Opening'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n para = {'w':3, 'h':3}\n view = [(int, (1,15), 0, 'width', 'w', 'pix'),\n (int, (1,15), 0, 'height', 'h', 'pix')]\n\n def run(self, ips, snap, img, para = None):\n strc = np.ones((para['h'], para['w']), dtype=np.uint8)\n ndimg.binary_opening(snap, strc, output=img)\n img *= 255\n \nclass Dilation(Filter):\n \"\"\"Dilation: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Binary Dilation'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n para = {'w':3, 'h':3}\n view = [(int, (1,15), 0, 'width', 'w', 'pix'),\n (int, (1,15), 0, 'height', 'h', 'pix')]\n\n def run(self, ips, snap, img, para = None):\n strc = np.ones((para['h'], para['w']), dtype=np.uint8)\n ndimg.binary_dilation(snap, strc, output=img)\n img *= 255\n \nclass Erosion(Filter):\n \"\"\"Erosion: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Binary Erosion'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n para = {'w':3, 'h':3}\n view = [(int, (1,15), 0, 'width', 'w', 'pix'),\n (int, (1,15), 0, 'height', 'h', 'pix')]\n\n def run(self, ips, snap, img, para = None):\n strc = np.ones((para['h'], para['w']), dtype=np.uint8)\n ndimg.binary_erosion(snap, strc, output=img)\n img *= 255\n \nclass Outline(Filter):\n \"\"\"Outline: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Binary Outline'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n\n def run(self, ips, snap, img, para = None):\n ndimg.binary_dilation(snap, output=img)\n img *= 255\n img -= snap\n \nclass FillHoles(Filter):\n \"\"\"FillHoles: derived from imagepy.core.engine.Filter \"\"\"\n title = 'Fill Holes'\n note = ['8-bit', 'auto_msk', 'auto_snap','preview']\n\n def run(self, ips, snap, img, para = None):\n ndimg.binary_fill_holes(snap, output=img)\n img *= 255\n\nclass Convex(Filter):\n title = 'Binary ConvexHull'\n note = ['8-bit', 'auto_msk', 'auto_snap']\n\n #process\n def run(self, ips, snap, img, para = None):\n img[convex_hull_object(snap)] = 255\n \n\nplgs = [Dilation, Erosion, '-', Closing, Opening, '-', Outline, FillHoles, Convex]"
] | [
[
"numpy.array",
"scipy.ndimage.affine_transform"
],
[
"numpy.sqrt",
"numpy.float32"
],
[
"scipy.ndimage.binary_erosion",
"scipy.ndimage.binary_closing",
"numpy.ones",
"scipy.ndimage.binary_dilation",
"scipy.ndimage.binary_opening",
"scipy.ndimage.binary_fill_holes"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
KedoKudo/jupyter-ht-hedm | [
"b447202fb9800e7b2916b38470db1b9a83357130"
] | [
"seisidd/tomo_plans.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nPredefined bluesky scan plans\n\"\"\"\n\nimport numpy as np\nimport bluesky.plans as bp\nimport bluesky.preprocessors as bpp\nimport bluesky.plan_stubs as bps\n\nfrom .utility import load_config\n\n#@bpp.run_decorator()\ndef collect_white_field(experiment, cfg_tomo, atfront=True):\n \"\"\"\n Collect white/flat field images by moving the sample out of the FOV\n \"\"\"\n # unpack devices\n det = experiment.det\n tomostage = experiment.tomostage\n\n # move sample out of the way\n _x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX']\n _z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ']\n yield from bps.mv(tomostage.ksamX, _x)\n yield from bps.mv(tomostage.ksamZ, _z)\n\n # setup detector\n yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')\n yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') \n yield from bps.mv(det.proc1.enable, 1)\n yield from bps.mv(det.proc1.reset_filter, 1)\n yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])\n yield from bps.mv(det.cam.trigger_mode, \"Internal\")\n yield from bps.mv(det.cam.image_mode, \"Multiple\")\n yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])\n yield from bps.trigger_and_read([det])\n\n # move sample back to FOV\n # NOTE:\n # not sure is this will work or not...\n yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX'])\n yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ'])\n\n\n#@bpp.run_decorator()\ndef collect_dark_field(experiment, cfg_tomo):\n \"\"\"\n Collect dark field images by close the shutter\n \"\"\"\n det = experiment.det\n\n yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')\n yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') \n yield from bps.mv(det.proc1.enable, 1)\n yield from bps.mv(det.proc1.reset_filter, 1)\n yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])\n yield from bps.mv(det.cam.trigger_mode, \"Internal\")\n yield from bps.mv(det.cam.image_mode, \"Multiple\")\n yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])\n yield from bps.trigger_and_read([det])\n\n\n#@bpp.run_decorator()\ndef step_scan(experiment, cfg_tomo):\n \"\"\"\n Collect projects with step motion\n \"\"\"\n # unpack devices\n det = experiment.det\n tomostage = experiment.tomostage\n\n yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')\n yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') \n yield from bps.mv(det.proc1.enable, 1)\n yield from bps.mv(det.proc1.reset_filter, 1)\n yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])\n\n angs = np.arange(\n cfg_tomo['omega_start'], \n cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,\n cfg_tomo['omega_step'],\n )\n for ang in angs:\n yield from bps.checkpoint()\n yield from bps.mv(tomostage.preci, ang)\n yield from bps.trigger_and_read([det])\n\n\n#@bpp.run_decorator()\ndef fly_scan(experiment, cfg_tomo):\n \"\"\"\n Collect projections with fly motion\n \"\"\"\n det = experiment.det\n psofly = experiment.psofly\n \n yield from bps.mv(det.hdf1.nd_array_port, 'PG1')\n yield from bps.mv(det.tiff1.nd_array_port, 'PG1')\n\n # we are assuming that the global psofly is available\n yield from bps.mv(\n psofly.start, cfg_tomo['omega_start'],\n psofly.end, cfg_tomo['omega_end'],\n psofly.scan_delta, abs(cfg_tomo['omega_step']),\n psofly.slew_speed, cfg_tomo['slew_speed'],\n )\n # taxi\n yield from bps.mv(psofly.taxi, \"Taxi\")\n yield from bps.mv(\n det.cam.num_images, cfg_tomo['n_projections'],\n det.cam.trigger_mode, \"Overlapped\",\n )\n # start the fly scan\n yield from bps.trigger(det, group='fly')\n yield from bps.abs_set(psofly.fly, \"Fly\", group='fly')\n yield from bps.wait(group='fly')\n\n\ndef tomo_scan(experiment, cfg):\n \"\"\"\n Tomography scan plan based on given configuration\n \"\"\"\n # unpack devices\n det = experiment.det\n tomostage = experiment.tomostage\n shutter = experiment.shutter\n shutter_suspender = experiment.suspend_shutter\n \n cfg = load_config(cfg) if type(cfg) != dict else cfg\n\n # update the cached motor position in the dict in case exp goes wrong\n _cahed_position = experiment.cache_motor_position()\n\n # step 0: preparation\n acquire_time = cfg['tomo']['acquire_time']\n n_white = cfg['tomo']['n_white']\n n_dark = cfg['tomo']['n_dark']\n angs = np.arange(\n cfg['tomo']['omega_start'], \n cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,\n cfg['tomo']['omega_step'],\n )\n n_projections = len(angs)\n cfg['tomo']['n_projections'] = n_projections\n total_images = n_white + n_projections + n_white + n_dark\n fp = cfg['output']['filepath']\n fn = cfg['output']['fileprefix']\n \n # calculate slew speed for fly scan\n # https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py\n # TODO: considering blue pixels, use 2BM code as ref\n if cfg['tomo']['type'].lower() == 'fly':\n scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections\n slew_speed = (angs.max() - angs.min())/scan_time\n cfg['tomo']['slew_speed'] = slew_speed\n \n # need to make sure that the sample out position is the same for both front and back\n x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position\n dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ']\n rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])\n rotm = np.array([[ np.cos(rotang), np.sin(rotang)],\n [-np.sin(rotang), np.cos(rotang)]])\n dbxz = np.dot(rotm, np.array([dfx, dfz]))\n dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0\n dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0\n # now put the value to dict\n cfg['tomo']['initial_ksamX'] = x0\n cfg['tomo']['initial_ksamZ'] = z0\n cfg['tomo']['fronte_white_ksamX'] = x0 + dfx\n cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz\n cfg['tomo']['back_white_ksamX'] = x0 + dbx\n cfg['tomo']['back_white_ksamZ'] = z0 + dbz\n \n @bpp.run_decorator()\n @bpp.stage_decorator([det])\n def scan_closure():\n # open shutter for beam\n yield from bps.mv(shutter, 'open')\n yield from bps.install_suspender(shutter_suspender)\n \n # config output\n for me in [det.tiff1, det.hdf1]:\n yield from bps.mv(me.file_path, fp)\n yield from bps.mv(me.file_name, fn)\n yield from bps.mv(me.file_write_mode, 2)\n yield from bps.mv(me.num_capture, total_images)\n yield from bps.mv(me.file_template, \".\".join([r\"%s%s_%06d\",cfg['output']['type'].lower()])) \n\n if cfg['output']['type'] in ['tif', 'tiff']:\n yield from bps.mv(det.tiff1.enable, 1)\n yield from bps.mv(det.tiff1.capture, 1)\n yield from bps.mv(det.hdf1.enable, 0)\n elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:\n yield from bps.mv(det.tiff1.enable, 0)\n yield from bps.mv(det.hdf1.enable, 1)\n yield from bps.mv(det.hdf1.capture, 1)\n else:\n raise ValueError(f\"Unsupported output type {cfg['output']['type']}\")\n\n # collect front white field\n yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure\n yield from collect_white_field(experiment, cfg['tomo'], atfront=True)\n\n # collect projections\n yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure\n if cfg['tomo']['type'].lower() == 'step':\n yield from step_scan(experiment, cfg['tomo'])\n elif cfg['tomo']['type'].lower() == 'fly':\n yield from fly_scan(experiment, cfg['tomo'])\n else:\n raise ValueError(f\"Unsupported scan type: {cfg['tomo']['type']}\")\n\n # collect back white field\n yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure\n yield from collect_white_field(experiment, cfg['tomo'], atfront=False)\n\n # collect back dark field\n yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure\n yield from bps.remove_suspender(shutter_suspender)\n yield from bps.mv(shutter, \"close\")\n yield from collect_dark_field(experiment, cfg['tomo'])\n\n return (yield from scan_closure())\n"
] | [
[
"numpy.radians",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
petuum/tuun | [
"8eec472dbf0e5e695449b0fa2d98985469fd5b30",
"8eec472dbf0e5e695449b0fa2d98985469fd5b30",
"8eec472dbf0e5e695449b0fa2d98985469fd5b30"
] | [
"tuun/probo/models/gp_stan_transfer.py",
"tuun/util/data_transform.py",
"tuun/probo/acq/acqopt_product.py"
] | [
"\"\"\"\nClasses for GP models with Stan that perform transfer optimization.\n\"\"\"\n\nfrom argparse import Namespace\nimport numpy as np\nimport copy\n\nfrom .gp_stan import StanGp\nfrom .regression.transfer_regression import TransferRegression\nfrom ..util.misc_util import dict_to_namespace\n\n\nclass StanTransferGp(StanGp):\n \"\"\"\n GP model with transferred prior mean based on a regression model.\n \"\"\"\n def __init__(self, params=None, data=None, verbose=None):\n self.set_params(params)\n self.set_verbose(verbose)\n self.set_model(data)\n\n def set_params(self, params):\n \"\"\"Set self.params, the parameters for this model.\"\"\"\n super().set_params(params)\n params = dict_to_namespace(params)\n\n assert hasattr(params, 'transfer_config')\n self.params.transfer_config = params.transfer_config\n\n def set_model(self, data):\n \"\"\"Set GP Stan model and regression model.\"\"\"\n self.model = self.get_model()\n self.regressor = self.get_regressor(data)\n #self.regressor = self.get_proxy_regressor(data) # TODO\n\n def get_regressor(self, data):\n \"\"\"Return transfer (prior mean) regressor.\"\"\"\n\n # Define regressor\n regressor = TransferRegression(self.params.transfer_config)\n\n if len(data.x) < 1:\n regressor = None\n else:\n mean_errors = []\n\n # TODO: remove extra files such as .DS_STORE (or ignore files that break)\n for i, reg in enumerate(regressor.model_fnames):\n try:\n val_acc = regressor.evaluate_model(reg, data.x)\n error = np.mean((data.y - val_acc) ** 2)\n mean_errors.append((error, i))\n except:\n print(f'Transfer model file in tarball did not load: {reg}')\n mean_errors.sort()\n if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6):\n regressor.set_best_model(-1)\n else:\n regressor.set_best_model(mean_errors[0][1])\n\n return regressor\n\n def get_proxy_regressor(self, data):\n if not data:\n regressor = None\n else:\n def regressor(x): return np.linalg.norm(x)\n\n return regressor\n\n def transform_data_y(self):\n \"\"\"Transform data.y using PriorMeanDataTransformer.\"\"\"\n self.dt = PriorMeanDataTransformer(self.data, self.regressor, False)\n y_trans = self.dt.transform_y_data()\n self.data = Namespace(x=self.data.x, y=y_trans)\n\n def gen_list(self, x_list, z, s, nsamp):\n \"\"\"\n Draw nsamp samples from generative process, given list of inputs\n x_list, posterior sample z, and seed s.\n\n Parameters\n ----------\n x_list : list\n List of numpy ndarrays each with shape=(self.params.ndimx,)\n z : Namespace\n Namespace of GP hyperparameters.\n s : int\n The seed, a positive integer.\n nsamp : int\n The number of samples to draw from generative process.\n\n Returns\n -------\n list\n A list with len=len(x_list) of numpy ndarrays, each with\n shape=(nsamp,).\n \"\"\"\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_pred(nsamp, x_list)\n pred_list = [\n self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)\n ]\n return pred_list\n\n def postgen_list(self, x_list, s, nsamp):\n \"\"\"\n Draw nsamp samples from posterior predictive distribution, given list\n of inputs x_list and seed s.\n\n Parameters\n ----------\n x_list : list\n List of numpy ndarrays each with shape=(self.params.ndimx,).\n s : int\n The seed, a positive integer.\n nsamp : int\n The number of samples to draw from the posterior predictive\n distribution.\n\n Returns\n -------\n list\n A list with len=len(x_list) of numpy ndarrays, each with\n shape=(nsamp,).\n \"\"\"\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_post_pred(\n nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])\n )\n pred_list = [\n self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)\n ]\n return pred_list\n\n def __str__(self):\n return f'StanTransferGp with params={self.params}'\n\n\nclass PriorMeanDataTransformer:\n \"\"\"\n A class to transform (and inverse transform) data, based on a prior mean regression.\n \"\"\"\n\n def __init__(self, data, prior_mean_f, verbose=True):\n \"\"\"\n Parameters\n ----------\n data : Namespace\n Namespace containing data.\n prior_mean_f : function\n Prior mean function.\n verbose : bool\n If True, print description string.\n \"\"\"\n self._set_data(data)\n self._set_prior_mean_f(prior_mean_f)\n self._set_verbose(verbose)\n\n def _set_data(self, data):\n \"\"\"Set self.data\"\"\"\n self.data = data\n\n def _set_prior_mean_f(self, prior_mean_f):\n \"\"\"Set self.prior_mean_f.\"\"\"\n if prior_mean_f is None:\n # Default prior mean function is constant 0 function\n def prior_mean_f(x): return 0.\n\n self.prior_mean_f = prior_mean_f\n\n def _set_verbose(self, verbose):\n \"\"\"Set verbose options.\"\"\"\n self.verbose = verbose\n if self.verbose:\n self._print_str()\n\n def transform_y_data(self, y_data=None, x_data=None):\n \"\"\"Transform and return self.data.y\"\"\"\n\n # Transform self.data.y into new list\n y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)]\n return y_trans\n\n def inv_transform_y_data(self, y_arr, x_single_arr):\n \"\"\"Return inverse transform of y_arr.\"\"\"\n\n # Compute prior mean val for the single input\n prior_mean_val = self.prior_mean_f(x_single_arr)\n\n # Inverse transform y_arr into list\n y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)]\n\n # Transform back to array and return\n y_inv_trans = np.array(y_inv_trans_list).reshape(-1)\n return y_inv_trans\n\n def _print_str(self):\n \"\"\"Print a description string.\"\"\"\n print('*PriorMeanDataTransformer')\n",
"\"\"\"\nClasses for transforming data.\n\"\"\"\n\nfrom argparse import Namespace\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass DataTransformer(object):\n \"\"\"\n Class for transforming data.\n \"\"\"\n\n def __init__(self, data, verbose=True):\n \"\"\"\n Parameters\n ----------\n data : Namespace\n Namespace containing data.\n verbose : bool\n If True, print description string.\n \"\"\"\n self.set_y_data(data)\n self.set_y_transformers()\n self.set_verbose(verbose)\n\n def set_y_data(self, data):\n \"\"\"Set self.y_data.\"\"\"\n if len(data.y.shape) > 1:\n if not (data.y.shape[0] == 1 or data.y.shape[1] == 1):\n raise ValueError('data.y has incorrect shape.')\n self.y_data_orig_shape = data.y.shape\n self.y_data = data.y.reshape(-1, 1)\n\n def set_y_transformers(self):\n \"\"\"Set transformers for self.y_data.\"\"\"\n self.ss = StandardScaler()\n self.ss.fit(self.y_data)\n\n def set_verbose(self, verbose):\n \"\"\"Set verbose options.\"\"\"\n self.verbose = verbose\n if self.verbose:\n self.print_str()\n\n def transform_y_data(self, y_data=None):\n \"\"\"Return transformed y_data (default self.y_data).\"\"\"\n\n # Set y_data and save y_data_orig_shape\n if y_data is None:\n y_data = self.y_data\n y_data_orig_shape = self.y_data_orig_shape\n else:\n y_data_orig_shape = y_data.shape\n\n # Transform y_data column\n y_data_col = y_data.reshape(-1, 1)\n y_trans_col = self.ss.transform(y_data_col)\n\n # Transform y_trans back to original shape\n y_trans = y_trans_col.reshape(y_data_orig_shape)\n return y_trans\n\n def inv_transform_y_data(self, y_data):\n \"\"\"Return inverse transform of y_data.\"\"\"\n y_data_orig_shape = y_data.shape\n\n # Inverse transform y_data column\n y_data_col = y_data.reshape(-1, 1)\n y_inv_trans_col = self.ss.inverse_transform(y_data_col)\n\n # Transform y_inv_trans back to original shape\n y_inv_trans = y_inv_trans_col.reshape(y_data_orig_shape)\n return y_inv_trans\n\n def print_str(self):\n \"\"\"Print a description string.\"\"\"\n print('*DataTransformer')\n",
"\"\"\"\nAn acqoptimizer for a product domain.\n\"\"\"\n\nfrom argparse import Namespace\nimport copy\nimport numpy as np\n\nfrom ..domain import ProductDomain\nfrom ..util.misc_util import dict_to_namespace\n\n\nclass ProductAcqOptimizer:\n \"\"\"AcqOptimizer for ProductDomain.\"\"\"\n\n def __init__(self, acqoptimizer_list, params=None, print_delta=False, verbose=True):\n \"\"\"\n Parameters\n ----------\n acqoptimizer_list : list\n List of other AcqOptimizers\n params : Namespace_or_dict\n Namespace or dict of parameters.\n print_delta : bool\n If True, print acquisition function deltas at each iteration.\n verbose : bool\n If True, print description string.\n \"\"\"\n self.ao_list = acqoptimizer_list\n self.set_params(params)\n self.set_product_domain()\n self.params.print_delta = print_delta\n self.set_verbose(verbose)\n\n def set_params(self, params):\n \"\"\"Set self.params.\"\"\"\n params = dict_to_namespace(params)\n\n self.params = params\n self.params.n_iter_bcd = getattr(params, 'n_iter_bcd', 3)\n self.params.rand_every = getattr(params, 'rand_every', None)\n self.params.rand_block_init = getattr(params, 'rand_block_init', False)\n self.params.n_init_rs = getattr(params, 'n_init_rs', 0)\n\n def set_product_domain(self):\n \"\"\"Set self.product_domain.\"\"\"\n domain_list = [ao.domain for ao in self.ao_list]\n self.product_domain = ProductDomain(domain_list=domain_list, verbose=False)\n\n def set_verbose(self, verbose):\n \"\"\"Set verbose options.\"\"\"\n self.verbose = verbose\n if self.verbose:\n self.print_str()\n\n def setup_optimize(self):\n # Run setup_optimize() for each AcqOptimizer in self.ao_list\n for ao in self.ao_list:\n ao.setup_optimize()\n\n self.xin_is_list = True\n\n def optimize(self, acqmap, data):\n\n # If there is no data, return a random sample from domain\n if data is None or not list(data.x):\n return self.product_domain.unif_rand_sample(1)[0]\n\n # Optionally initialize with random search\n if len(data.x) <= self.params.n_init_rs:\n return self.product_domain.unif_rand_sample(1)[0]\n\n # NOTE:\n # - Below I assume that the input xin to acqmap (and to the model) is a list,\n # where each element is associated with one sub-domain (this restricts how\n # model must be defined).\n\n domain_list = self.product_domain.get_domain_list()\n\n # Initialize nextpt\n nextpt = self.init_and_get_nextpt(self.product_domain, data)\n\n # Store initial point (used for printing acquisition function delta)\n initpt = copy.deepcopy(nextpt)\n\n for _ in range(self.params.n_iter_bcd):\n for j in range(len(domain_list)):\n\n # Select jth domain and acqoptimizer\n domain = domain_list[j]\n ao = self.ao_list[j]\n\n # Define function that returns a modified nextpt\n get_nextpt_mod = lambda x: self.list_replace_idx(nextpt, x, j)\n\n # Construct am (for ao) from acqmap, for list & non-list cases\n xin_is_list = getattr(ao, 'xin_is_list', False)\n\n if xin_is_list:\n # NOTE:\n # - xin_list is list of domain-pts for sub-domain.\n # - acqmap is for full domain (and takes list of xin)\n # - am is for sub-domain (and also takes list of xin)\n am = lambda xin_list: acqmap(\n [get_nextpt_mod(xin) for xin in xin_list]\n )\n else:\n # NOTE:\n # - xin is a domain-pt for sub-domain.\n # - acqmap is for full domain (and takes list of xin)\n # - am is for sub-domain (and takes single xin)\n am = lambda xin: acqmap([get_nextpt_mod(xin)])[0]\n\n # Convert data into correct form\n data_j = copy.deepcopy(data)\n data_j.x = [x[j] for x in data_j.x]\n\n # init_opt strategy\n data_j.init_opt = nextpt[j]\n\n # Checkpoint current nextpt\n nextpt_ckpt = copy.deepcopy(nextpt)\n\n # Update nextpt with ao.optimize\n nextpt[j] = ao.optimize(am, data_j)\n\n # if self.params.print_delta:\n # acq_delta = acqmap([nextpt])[0] - acqmap([nextpt_ckpt])[0]\n # print((' Acq delta: {:.7f} = (final acq - init acq) ' +\n #'[block ckpt]').format(acq_delta))\n\n if self.params.print_delta:\n self.print_acq_delta(acqmap, initpt, nextpt)\n\n return nextpt\n\n def list_replace_idx(self, alist, newitem, idx):\n \"\"\"Replace alist[idx] with newitem.\"\"\"\n newlist = copy.deepcopy(alist)\n newlist[idx] = newitem\n return newlist\n\n def print_acq_delta(self, acqmap, init_point, optima):\n \"\"\"Print acquisition function delta for optima minus initial point.\"\"\"\n init_acq = acqmap([init_point])[0]\n final_acq = acqmap([optima])[0]\n acq_delta = final_acq - init_acq\n print(\n (' Acq delta: {:.7f} = (final acq - init acq) ' + '[product]').format(\n acq_delta\n )\n )\n\n def init_and_get_nextpt(self, product_domain, data):\n \"\"\"Initialize and return nextpt for optimize.\"\"\"\n\n if len(data.x) < 1:\n nextpt = product_domain.unif_rand_sample()[0]\n else:\n if self.params.rand_every is None:\n self.params.rand_every = len(data.x) + 1\n\n if (\n self.params.rand_block_init\n and len(data.x) % self.params.rand_every == 0\n ):\n # Randomize initialize only one block of nextpt (other blocks\n # set to best so far)\n\n min_idx = np.argmin(data.y)\n nextpt = data.x[min_idx]\n\n nextpt_rand = product_domain.unif_rand_sample()[0]\n rand_j_idx = np.random.randint(len(nextpt))\n\n nextpt[rand_j_idx] = nextpt_rand[rand_j_idx]\n if self.params.print_delta:\n print(' RAND-BLOCK init for BCD')\n\n elif len(data.x) % self.params.rand_every == 0:\n # Randomly initialize full nextpt\n nextpt = product_domain.unif_rand_sample()[0]\n if self.params.print_delta:\n print(' RAND init for BCD')\n\n else:\n # Initialize nextpt to best so far\n min_idx = np.argmin(data.y)\n nextpt = data.x[min_idx]\n if self.params.print_delta:\n print(' BSF init for BCD')\n\n return nextpt\n\n def print_str(self):\n \"\"\"Print a description string.\"\"\"\n print('*[INFO] ' + str(self) + ' and ao_list:')\n for idx, ao in enumerate(self.ao_list):\n print(f'*[INFO] {idx}: {ao}')\n\n def __str__(self):\n return f'ProductAcqOptimizer with params = {self.params}'\n"
] | [
[
"numpy.array",
"numpy.mean",
"numpy.linalg.norm",
"numpy.min"
],
[
"sklearn.preprocessing.StandardScaler"
],
[
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
K4S4B4/learnable-triangulation-pytorch | [
"94f5121919785bf7c89dd973521a21c01104dbd5"
] | [
"mvn/utils/op.py"
] | [
"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mvn.utils.img import to_numpy, to_torch\nfrom mvn.utils import multiview\n\n\ndef integrate_tensor_2d(heatmaps, softmax=True):\n \"\"\"Applies softmax to heatmaps and integrates them to get their's \"center of masses\"\n\n Args:\n heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps\n\n Returns:\n coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps\n\n \"\"\"\n batch_size, n_heatmaps, h, w = heatmaps.shape\n\n heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))\n if softmax:\n heatmaps = nn.functional.softmax(heatmaps, dim=2)\n else:\n heatmaps = nn.functional.relu(heatmaps)\n\n heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))\n\n mass_x = heatmaps.sum(dim=2)\n mass_y = heatmaps.sum(dim=3)\n\n mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)\n mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)\n\n x = mass_times_coord_x.sum(dim=2, keepdim=True)\n y = mass_times_coord_y.sum(dim=2, keepdim=True)\n\n if not softmax:\n x = x / mass_x.sum(dim=2, keepdim=True)\n y = y / mass_y.sum(dim=2, keepdim=True)\n\n coordinates = torch.cat((x, y), dim=2)\n coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))\n\n return coordinates\n\n\ndef integrate_tensor_3d(volumes, softmax=True):\n batch_size, n_volumes, x_size, y_size, z_size = volumes.shape\n\n volumes = volumes.reshape((batch_size, n_volumes, -1))\n if softmax:\n volumes = nn.functional.softmax(volumes, dim=2)\n else:\n volumes = nn.functional.relu(volumes)\n\n volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))\n\n mass_x = volumes.sum(dim=3).sum(dim=3)\n mass_y = volumes.sum(dim=2).sum(dim=3)\n mass_z = volumes.sum(dim=2).sum(dim=2)\n\n mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device)\n mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device)\n mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device)\n\n x = mass_times_coord_x.sum(dim=2, keepdim=True)\n y = mass_times_coord_y.sum(dim=2, keepdim=True)\n z = mass_times_coord_z.sum(dim=2, keepdim=True)\n\n if not softmax:\n x = x / mass_x.sum(dim=2, keepdim=True)\n y = y / mass_y.sum(dim=2, keepdim=True)\n z = z / mass_z.sum(dim=2, keepdim=True)\n\n coordinates = torch.cat((x, y, z), dim=2)\n coordinates = coordinates.reshape((batch_size, n_volumes, 3))\n\n return coordinates, volumes\n\n\ndef integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True):\n batch_size, n_volumes, x_size, y_size, z_size = volumes.shape\n\n volumes = volumes.reshape((batch_size, n_volumes, -1))\n if softmax:\n volumes = nn.functional.softmax(volumes, dim=2)\n else:\n volumes = nn.functional.relu(volumes)\n\n volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))\n coordinates = torch.einsum(\"bnxyz, bxyzc -> bnc\", volumes, coord_volumes)\n\n return coordinates #, volumes\n\n\ndef unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None):\n device = heatmaps.device\n batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) # 1,4,32,96x96\n volume_shape = coord_volumes.shape[1:4] #64x64x64\n\n volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) # 1x32x64x64x64のTensor\n\n # TODO: speed up this this loop\n for batch_i in range(batch_size):\n coord_volume = coord_volumes[batch_i] # Bx64x64x64x3 -> 64x64x64x3\n grid_coord = coord_volume.reshape((-1, 3)) # 262144x3\n\n volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) # 4x32x64x64x64\n\n for view_i in range(n_views):\n heatmap = heatmaps[batch_i, view_i] # 1x4x32x96x96 -> 32x96x96\n heatmap = heatmap.unsqueeze(0) # 1x32x96x96 (一番初めに次元を追加)\n\n grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( # 262144x3\n proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False\n )\n\n invalid_mask = grid_coord_proj[:, 2] <= 0.0 # depth must be larger than 0.0 #人がカメラに近づきすぎた場合に起こる??\n\n grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 # not to divide by zero\n grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj)\n\n # transform to [-1.0, 1.0] range\n grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) # 262144x2\n grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) # (0,0)->(96,96)の座標を、中心を(0,0)、左上を(-1,-1)、右下を(1,1)とする相対的な座標に変換\n grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5)\n grid_coord_proj = grid_coord_proj_transformed\n\n # prepare to F.grid_sample\n grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) # 引数で指定された場所に一つ次元を足すらしい 1x262144x1x2。heatmapが1x32x96x96\n try:\n current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) # 1x32x262144x1 = Heatmap(1x32x96x96), grid_coord_proj(1x262144x1x2)\n except TypeError: # old PyTorch\n current_volume = F.grid_sample(heatmap, grid_coord_proj)\n\n # zero out non-valid points\n current_volume = current_volume.view(n_joints, -1) #32x262144\n current_volume[:, invalid_mask] = 0.0\n\n # reshape back to volume\n current_volume = current_volume.view(n_joints, *volume_shape) #32x64x64x64\n\n # collect\n volume_batch_to_aggregate[view_i] = current_volume\n\n # agregate resulting volume\n if volume_aggregation_method.startswith('conf'):\n volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0)\n elif volume_aggregation_method == 'sum':\n volume_batch[batch_i] = volume_batch_to_aggregate.sum(0)\n elif volume_aggregation_method == 'max':\n volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0]\n elif volume_aggregation_method == 'softmax':\n volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() # 2x32x64x64x64(n_views, n_joints, *volume_shape)\n volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) # reshape\n volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0)\n volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) #reshape back\n\n volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0)\n else:\n raise ValueError(\"Unknown volume_aggregation_method: {}\".format(volume_aggregation_method))\n\n return volume_batch\n\n\ndef gaussian_2d_pdf(coords, means, sigmas, normalize=True):\n normalization = 1.0\n if normalize:\n normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0])\n\n exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2)\n return exp / normalization\n\n\ndef render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True):\n device = points.device\n n_points = points.shape[0]\n\n yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device))\n grid = torch.stack([xx, yy], dim=-1).type(torch.float32)\n grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) # (n_points, h, w, 2)\n grid = grid.reshape((-1, 2))\n\n points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)\n points = points.reshape(-1, 2)\n\n sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)\n sigmas = sigmas.reshape(-1, 2)\n\n images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize)\n images = images.reshape(n_points, *image_shape)\n\n return images\n"
] | [
[
"torch.nn.functional.softmax",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.zeros_like",
"torch.exp",
"torch.nn.functional.relu",
"torch.nn.functional.grid_sample",
"torch.arange",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ctiger34/BASIC-EMOTION-DETECTION | [
"1c2be519c70408159ea6e1093d5f139c99ea6e27"
] | [
"load_and_process.py"
] | [
"import pandas as pd\nimport cv2\nimport numpy as np\n\n\ndataset_path = 'fer2013/fer2013/fer2013.csv'\nimage_size=(48,48)\n\ndef load_fer2013():\n data = pd.read_csv(dataset_path)\n pixels = data['pixels'].tolist()\n width, height = 48, 48\n faces = []\n for pixel_sequence in pixels:\n face = [int(pixel) for pixel in pixel_sequence.split(' ')]\n face = np.asarray(face).reshape(width, height)\n face = cv2.resize(face.astype('uint8'),image_size)\n faces.append(face.astype('float32'))\n faces = np.asarray(faces)\n faces = np.expand_dims(faces, -1)\n emotions = pd.get_dummies(data['emotion']).as_matrix()\n return faces, emotions\n\ndef preprocess_input(x, v2=True):\n x = x.astype('float32')\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x"
] | [
[
"numpy.asarray",
"pandas.read_csv",
"numpy.expand_dims",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ray-ruisun/FedML | [
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd",
"24ff30d636bb70f64e94e9ca205375033597d3dd"
] | [
"app/fedcv/medical_chest_xray_image_clf/data/chexpert/data_loader.py",
"app/fedgraphnn/ego_networks_link_pred/data/utils.py",
"app/fednlp/data/advanced_partition/util/visualization_heatmap_unsort.py",
"app/fedgraphnn/moleculenet_graph_reg/trainer/gcn_trainer_readout_regression.py",
"app/fedcv/object_detection/model/yolo/utils/flask_rest_api/restapi.py",
"app/fednlp/data/raw_data_loader/shakespeare/data_loader.py",
"python/fedml/cross_device/server_mnn_lsa/trainer/my_model_trainer_classification.py"
] | [
"import logging\n\nimport os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom .dataset import CheXpert\n\n\ndef _get_mean_and_std(dataset: Dataset):\n \"\"\"Compute the mean and std of dataset.\"\"\"\n data_loader = DataLoader(dataset, batch_size=1, shuffle=False)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n for i, (img, _) in enumerate(data_loader):\n if i % 1000 == 0:\n print(i)\n mean += img.mean(dim=(0, 2, 3))\n std += img.std(dim=(0, 2, 3))\n mean /= len(data_loader)\n std /= len(data_loader)\n return mean, std\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1:y2, x1:x2] = 0.0\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef _data_transforms_chexpert():\n\n CHEXPERT_MEAN = [0.503, 0.503, 0.503]\n CHEXPERT_STD = [0.291, 0.291, 0.291]\n\n image_size = 256\n train_transform = transforms.Compose(\n [\n # transforms.ToPILImage(),\n transforms.RandomResizedCrop(image_size),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),\n ]\n )\n\n # train_transform.transforms.append(Cutout(16))\n\n test_transform = transforms.Compose(\n [\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),\n ]\n )\n\n return train_transform, test_transform\n\n\n# for centralized training\ndef get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy=\"zeros\"):\n return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy)\n\n\n# for local devices\ndef get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=\"zeros\"):\n return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy)\n\n\ndef get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy=\"zeros\"):\n dl_obj = CheXpert\n\n transform_train, transform_test = _data_transforms_chexpert()\n\n train_ds = dl_obj(\n datadir,\n dataidxs=dataidxs,\n train=True,\n transform=transform_train,\n download=False,\n policy=policy,\n )\n test_ds = dl_obj(\n datadir,\n dataidxs=None,\n train=False,\n transform=transform_test,\n download=False,\n policy=policy,\n )\n\n train_dl = DataLoader(\n dataset=train_ds,\n batch_size=train_bs,\n shuffle=True,\n drop_last=False,\n pin_memory=True,\n num_workers=4,\n )\n test_dl = DataLoader(\n dataset=test_ds,\n batch_size=test_bs,\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n num_workers=4,\n )\n\n return train_dl, test_dl\n\n\ndef get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy=\"zeros\"):\n dl_obj = CheXpert\n\n transform_train, transform_test = _data_transforms_chexpert()\n\n train_ds = dl_obj(\n datadir,\n dataidxs=dataidxs_train,\n train=True,\n transform=transform_train,\n download=True,\n policy=policy,\n )\n test_ds = dl_obj(\n datadir,\n dataidxs=dataidxs_test,\n train=False,\n transform=transform_test,\n download=True,\n policy=policy,\n )\n\n train_dl = DataLoader(\n dataset=train_ds,\n batch_size=train_bs,\n shuffle=True,\n drop_last=False,\n pin_memory=True,\n num_workers=4,\n )\n test_dl = DataLoader(\n dataset=test_ds,\n batch_size=test_bs,\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n num_workers=4,\n )\n\n return train_dl, test_dl\n\n\ndef distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size):\n \"\"\"\n Used for generating distributed dataloader for\n accelerating centralized training\n \"\"\"\n\n train_bs = batch_size\n test_bs = batch_size\n\n transform_train, transform_test = _data_transforms_chexpert()\n train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train)\n test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test)\n\n train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)\n test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank)\n\n train_dl = data.DataLoader(\n train_dataset,\n batch_size=train_bs,\n sampler=train_sam,\n pin_memory=True,\n num_workers=4,\n )\n test_dl = data.DataLoader(\n test_dataset,\n batch_size=test_bs,\n sampler=test_sam,\n pin_memory=True,\n num_workers=4,\n )\n\n class_num = 1000\n\n train_data_num = len(train_dataset)\n test_data_num = len(test_dataset)\n\n return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num\n\n\ndef load_partition_data_chexpert(\n data_dir,\n partition_method=\"random\",\n partition_alpha=None,\n client_number=100,\n batch_size=10,\n policy=\"zeros\",\n):\n transform_train, transform_test = _data_transforms_chexpert()\n\n train_dataset = CheXpert(\n data_dir=data_dir,\n dataidxs=None,\n train=True,\n transform=transform_train,\n policy=policy,\n )\n test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy)\n\n # get local dataset\n if partition_method == \"random\":\n num_train_items = int(len(train_dataset) / client_number)\n num_test_items = int(len(test_dataset) / client_number)\n dict_client = {}\n all_train_idxs = list(range(len(train_dataset)))\n all_test_idxs = list(range(len(test_dataset)))\n for client_idx in range(client_number):\n dict_client[client_idx] = {}\n dict_client[client_idx][\"train\"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False))\n dict_client[client_idx][\"test\"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False))\n all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx][\"train\"])\n all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx][\"test\"])\n if len(all_train_idxs) > 0:\n all_client_idxs = list(range(client_number))\n np.random.shuffle(all_client_idxs)\n choiced_client_idxs = all_client_idxs[: len(all_train_idxs)]\n for idx, client_idx in enumerate(choiced_client_idxs):\n dict_client[client_idx][\"train\"].add(all_train_idxs[idx])\n if len(all_test_idxs) > 0:\n all_client_idxs = list(range(client_number))\n np.random.shuffle(all_client_idxs)\n choiced_client_idxs = all_client_idxs[: len(all_test_idxs)]\n for idx, client_idx in enumerate(choiced_client_idxs):\n dict_client[client_idx][\"test\"].add(all_test_idxs[idx])\n else:\n raise NotImplementedError\n\n # build dataloader\n train_dl = []\n test_dl = []\n for client_idx in range(client_number):\n train_data_idxs = list(dict_client[client_idx][\"train\"])\n test_data_idxs = list(dict_client[client_idx][\"test\"])\n train_dl_, test_dl_ = get_dataloader_test_chexpert(\n datadir=data_dir,\n dataidxs_train=train_data_idxs,\n dataidxs_test=test_data_idxs,\n train_bs=batch_size,\n test_bs=batch_size,\n policy=policy,\n )\n train_dl.append(train_dl_)\n test_dl.append(test_dl_)\n\n logging.info(f\"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}\")\n\n logging.info(\"Partition data done\")\n # logging.info(\"Partition data for each client: {}\".format(dict_client))\n\n train_data_num = len(train_dataset)\n test_data_num = len(test_dataset)\n train_data_global = train_dataset\n test_data_global = test_dataset\n data_local_num_dict = {\n client_idx: len(dict_client[client_idx][\"train\"]) + len(dict_client[client_idx][\"test\"])\n for client_idx in range(client_number)\n }\n train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)}\n test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)}\n class_num = train_dataset.num_classes\n\n return (\n train_data_num,\n test_data_num,\n train_data_global,\n test_data_global,\n data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n class_num,\n )\n\n\nif __name__ == \"__main__\":\n data_path = os.path.join(\"D:\\\\\", \"dataset\", \"CheXpert\", \"CheXpert-v1.0-small\")\n data = CheXpert(data_dir=data_path, transform=transforms.ToTensor())\n print(len(data))\n print(data[0][0])\n print(data[0][1])\n\n # mean, std = _get_mean_and_std(data)\n # print(mean, std)\n\n # train_transform, valid_transform = _data_transforms_chexpert()\n # print(train_transform)\n # print(valid_transform)\n\n (\n train_data_num,\n test_data_num,\n train_data_global,\n test_data_global,\n data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n class_num,\n ) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy=\"zeros\")\n\n print(train_data_num, test_data_num, class_num)\n",
"import numpy as np\nimport scipy.sparse as sp\nimport torch\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom torch_geometric.utils import to_networkx, degree\nimport torch.nn.functional as F\n\n\ndef convert_to_nodeDegreeFeatures(graphs):\n # print(graph.x)\n graph_infos = []\n maxdegree = 0\n for i, graph in enumerate(graphs):\n g = to_networkx(graph, to_undirected=True)\n gdegree = max(dict(g.degree).values())\n if gdegree > maxdegree:\n maxdegree = gdegree\n graph_infos.append(\n (graph, g.degree, graph.num_nodes)\n ) # (graph, node_degrees, num_nodes)\n\n new_graphs = []\n for i, tuple in enumerate(graph_infos):\n idx, x = tuple[0].edge_index[0], tuple[0].x\n deg = degree(idx, tuple[2], dtype=torch.long)\n deg = F.one_hot(deg, num_classes=maxdegree + 1).to(torch.float)\n\n new_graph = tuple[0].clone()\n new_graph.__setitem__(\"x\", deg)\n new_graphs.append(new_graph)\n\n return new_graphs\n\n\ndef split_data(graphs, train=None, test=None, shuffle=True, seed=None):\n y = torch.cat([graph.y for graph in graphs])\n graphs_tv, graphs_test = train_test_split(\n graphs,\n train_size=train,\n test_size=test,\n stratify=y,\n shuffle=shuffle,\n random_state=seed,\n )\n return graphs_tv, graphs_test\n\n\ndef np_uniform_sample_next(compact_adj, tree, fanout):\n last_level = tree[-1] # [batch, f^depth]\n batch_lengths = compact_adj.degrees[last_level]\n nodes = np.repeat(last_level, fanout, axis=1)\n batch_lengths = np.repeat(batch_lengths, fanout, axis=1)\n batch_next_neighbor_ids = np.random.uniform(\n size=batch_lengths.shape, low=0, high=1 - 1e-9\n )\n # Shape = (len(nodes), neighbors_per_node)\n batch_next_neighbor_ids = np.array(\n batch_next_neighbor_ids * batch_lengths, dtype=last_level.dtype\n )\n shape = batch_next_neighbor_ids.shape\n batch_next_neighbor_ids = np.array(\n compact_adj.compact_adj[nodes.reshape(-1), batch_next_neighbor_ids.reshape(-1)]\n ).reshape(shape)\n\n return batch_next_neighbor_ids\n\n\ndef np_traverse(\n compact_adj, seed_nodes, fanouts=(1,), sample_fn=np_uniform_sample_next\n):\n if not isinstance(seed_nodes, np.ndarray):\n raise ValueError(\"Seed must a numpy array\")\n\n if (\n len(seed_nodes.shape) > 2\n or len(seed_nodes.shape) < 1\n or not str(seed_nodes.dtype).startswith(\"int\")\n ):\n raise ValueError(\"seed_nodes must be 1D or 2D int array\")\n\n if len(seed_nodes.shape) == 1:\n seed_nodes = np.expand_dims(seed_nodes, 1)\n\n # Make walk-tree\n forest_array = [seed_nodes]\n for f in fanouts:\n next_level = sample_fn(compact_adj, forest_array, f)\n assert next_level.shape[1] == forest_array[-1].shape[1] * f\n\n forest_array.append(next_level)\n\n return forest_array\n\n\nclass WalkForestCollator(object):\n def __init__(self, normalize_features=False):\n self.normalize_features = normalize_features\n\n def __call__(self, molecule):\n comp_adj, feature_matrix, label, fanouts = molecule[0]\n node_ids = np.array(list(range(feature_matrix.shape[0])), dtype=np.int32)\n forest = np_traverse(comp_adj, node_ids, fanouts)\n torch_forest = [torch.from_numpy(forest[0]).flatten()]\n label = np.where(np.isnan(label), 0.0, label)\n\n for i in range(len(forest) - 1):\n torch_forest.append(torch.from_numpy(forest[i + 1]).reshape(-1, fanouts[i]))\n\n if self.normalize_features:\n mx = sp.csr_matrix(feature_matrix)\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.0\n r_mat_inv = sp.diags(r_inv)\n normalized_feature_matrix = r_mat_inv.dot(mx)\n normalized_feature_matrix = np.array(normalized_feature_matrix.todense())\n else:\n scaler = StandardScaler()\n scaler.fit(feature_matrix)\n normalized_feature_matrix = scaler.transform(feature_matrix)\n\n return (\n torch_forest,\n torch.as_tensor(normalized_feature_matrix, dtype=torch.float32),\n torch.as_tensor(label, dtype=torch.float32),\n )\n\n\nclass DefaultCollator(object):\n def __init__(self, normalize_features=True, normalize_adj=True):\n self.normalize_features = normalize_features\n self.normalize_adj = normalize_adj\n\n def __call__(self, molecule):\n adj_matrix, feature_matrix, label, _ = molecule[0]\n label = np.where(np.isnan(label), 0.0, label)\n\n if self.normalize_features:\n mx = sp.csr_matrix(feature_matrix)\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.0\n r_mat_inv = sp.diags(r_inv)\n normalized_feature_matrix = r_mat_inv.dot(mx)\n normalized_feature_matrix = np.array(normalized_feature_matrix.todense())\n else:\n scaler = StandardScaler()\n scaler.fit(feature_matrix)\n normalized_feature_matrix = scaler.transform(feature_matrix)\n\n if self.normalize_adj:\n rowsum = np.array(adj_matrix.sum(1))\n r_inv_sqrt = np.power(rowsum, -0.5).flatten()\n r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.0\n r_mat_inv_sqrt = sp.diags(r_inv_sqrt)\n normalized_adj_matrix = (\n adj_matrix.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)\n )\n else:\n normalized_adj_matrix = adj_matrix\n\n return (\n torch.as_tensor(\n np.array(normalized_adj_matrix.todense()), dtype=torch.float32\n ),\n torch.as_tensor(normalized_feature_matrix, dtype=torch.float32),\n torch.as_tensor(label, dtype=torch.float32),\n )",
"import os\r\nimport h5py\r\nimport argparse\r\nimport pandas as pd\r\nimport json\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial import distance\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument(\r\n \"--partition_name\", type=str, metavar=\"PN\", help=\"name of the method \"\r\n)\r\nparser.add_argument(\r\n \"--partition_file\",\r\n type=str,\r\n default=\"data/partition_files/wikiner_partition.h5\",\r\n metavar=\"PF\",\r\n help=\"data partition path\",\r\n)\r\nparser.add_argument(\r\n \"--data_file\",\r\n type=str,\r\n default=\"data/data_files/wikiner_data.h5\",\r\n metavar=\"DF\",\r\n help=\"data file path\",\r\n)\r\nparser.add_argument(\"--task_name\", type=str, metavar=\"TN\", help=\"task name\")\r\n\r\nparser.add_argument(\r\n \"--cluster_num\", type=int, metavar=\"KN\", help=\"cluster of partition\"\r\n)\r\n\r\nparser.add_argument(\r\n \"--client_number\",\r\n type=int,\r\n metavar=\"CN\",\r\n help=\"client number of this partition method\",\r\n)\r\n\r\nparser.add_argument(\r\n \"--figure_path\", type=str, metavar=\"TN\", help=\"the place to store generated figures\"\r\n)\r\n\r\nparser.add_argument(\r\n \"--task_type\",\r\n type=str,\r\n default=\"name entity recognition\",\r\n metavar=\"TT\",\r\n help=\"task type\",\r\n)\r\n\r\nargs = parser.parse_args()\r\n\r\ntemp = \"kmeans_\" + str(args.cluster_num)\r\nclient_assignment = []\r\n\r\nif args.task_type == \"text_classification\":\r\n data = h5py.File(args.data_file, \"r\")\r\n total_labels = [data[\"Y\"][i][()] for i in data[\"Y\"].keys()]\r\n attributes = json.loads(data[\"attributes\"][()])\r\n label_vocab = attributes[\"label_vocab\"]\r\n client_assignment = [label_vocab[label] for label in total_labels]\r\n data.close()\r\nelse:\r\n f = h5py.File(args.partition_file, \"r\")\r\n for i in f.keys():\r\n if temp in i:\r\n client_assignment = f[i + \"/client_assignment/\"][()]\r\n break\r\n f.close()\r\npartition_data_path = \"/\" + args.partition_name + \"/partition_data/\"\r\n\r\nclient_numbers = args.client_number\r\nclient_index = list(range(client_numbers))\r\nprint(client_index)\r\nclient_data_distribution = []\r\ncluster_num = len(set(client_assignment))\r\n\r\n\r\nf = h5py.File(args.partition_file, \"r\")\r\n\r\nfor i in client_index:\r\n temp = []\r\n single_client_data = []\r\n probability_array = np.zeros(cluster_num)\r\n temp.extend(f[partition_data_path + str(i) + \"/train\"][()])\r\n temp.extend(f[partition_data_path + str(i) + \"/test\"][()])\r\n single_client_data = np.array([client_assignment[i] for i in temp])\r\n unique, counts = np.unique(single_client_data, return_counts=True)\r\n for key, value in dict(zip(unique, counts)).items():\r\n probability_array[key] = value\r\n client_data_distribution.append(probability_array)\r\nf.close()\r\nheat_map_data = np.zeros((client_numbers, client_numbers))\r\n\r\nfor i in range(client_numbers):\r\n for j in range(client_numbers):\r\n heat_map_data[i][j] = distance.jensenshannon(\r\n client_data_distribution[i], client_data_distribution[j]\r\n )\r\n\"\"\" #reorder index based on the sum of distance in each client\r\nclient_data_distribution_reorder_index = [np.where(np.all(heat_map_data == i,axis = 1))[0][0] for i in sorted(heat_map_data, key=lambda client: sum(client), reverse=True)]\r\n#reorder the matrix based on the reorder index\r\nfor index, value in enumerate(heat_map_data):\r\n heat_map_data[index] = value[client_data_distribution_reorder_index]\r\nheat_map_data = heat_map_data[client_data_distribution_reorder_index] \"\"\"\r\n\r\n\r\nclient_sum_order = sorted([sum(i) for i in heat_map_data], reverse=True)\r\n\r\ndata_dir = args.figure_path\r\nfig_name = args.task_name + \"_%s_clients_heatmap_unsort.png\" % args.partition_name\r\nfig_dir = os.path.join(data_dir, fig_name)\r\nfig_dims = (30, 22)\r\nfig, ax = plt.subplots(figsize=fig_dims)\r\nsns.set(font_scale=6)\r\nsns.heatmap(heat_map_data, linewidths=0.05, cmap=\"Blues\", cbar=True, vmin=0, vmax=0.8)\r\nax.tick_params(\r\n labelbottom=False,\r\n labelleft=False,\r\n labeltop=False,\r\n left=False,\r\n bottom=False,\r\n top=False,\r\n)\r\nfig.tight_layout(pad=0.1)\r\nplt.savefig(fig_dir)\r\n\r\nsns.set(font_scale=1)\r\n\r\nplt.figure(figsize=(20, 15))\r\nfig = sns.distplot(client_sum_order)\r\nplt.xlim(0, None)\r\nplt.xlabel(\"distance\")\r\nplt.xticks(fig.get_xticks(), fig.get_xticks() / 100)\r\nfig_name = args.task_name + \"_%s_clients_sum_distplot_unsort.png\" % args.partition_name\r\nfig_dir = os.path.join(data_dir, fig_name)\r\nplt.title(args.task_name + \"_%s_clients_sum_distplot_unsort\" % args.partition_name)\r\nplt.savefig(fig_dir, bbox_inches=\"tight\")\r\n",
"import logging\n\nimport numpy as np\nimport torch\nimport wandb\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n\nfrom fedml.core.alg_frame.client_trainer import ClientTrainer\n\n\n# Trainer for MoleculeNet. The evaluation metric is RMSE, by default\n\n\nclass GcnMoleculeNetTrainer(ClientTrainer):\n def get_model_params(self):\n return self.model.cpu().state_dict()\n\n def set_model_params(self, model_parameters):\n logging.info(\"set_model_params\")\n self.model.load_state_dict(model_parameters)\n\n def train(self, train_data, device, args):\n model = self.model\n\n model.to(device)\n model.train()\n\n test_data = None\n try:\n test_data = self.test_data\n except:\n pass\n\n criterion = torch.nn.MSELoss() if args.dataset != \"qm9\" else torch.nn.MAELoss()\n if args.client_optimizer == \"sgd\":\n optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n else:\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n\n min_score = np.Inf if args.metric != \"r2\" else -np.Inf\n best_model_params = {}\n # print('Training on {}'.format(torch.cuda.get_device_name()))\n for epoch in range(args.epochs):\n avg_loss = 0\n count = 0\n for mol_idxs, (adj_matrix, feature_matrix, label, _) in enumerate(\n train_data\n ):\n optimizer.zero_grad()\n\n adj_matrix = adj_matrix.to(\n device=device, dtype=torch.float32, non_blocking=True\n )\n feature_matrix = feature_matrix.to(\n device=device, dtype=torch.float32, non_blocking=True\n )\n label = label.to(device=device, dtype=torch.float32, non_blocking=True)\n\n logits = model(adj_matrix, feature_matrix)\n loss = criterion(logits, label)\n loss.backward()\n optimizer.step()\n\n if test_data is not None:\n test_score, _ = self.test(self.test_data, device, args)\n if args.metric != \"r2\":\n print(\n \"Epoch = {}: Test {} = {}\".format(\n epoch, args.metric.upper(), test_score\n )\n )\n if test_score < min_score:\n min_score = test_score\n best_model_params = {\n k: v.cpu() for k, v in model.state_dict().items()\n }\n print(\n \"Current best {}= {}\".format(args.metric.upper(), min_score)\n )\n else:\n print(\"Epoch = {}: Test R2 = {}\".format(epoch, test_score))\n if test_score > min_score:\n min_score = test_score\n best_model_params = {\n k: v.cpu() for k, v in model.state_dict().items()\n }\n print(\"Current best R2= {}\".format(min_score))\n\n #\n # avg_loss += loss.item()\n # count += 1\n # # logging.info(\"training. epoch = %d, mol_idxs = %d, avg_loss = %f\" % (epoch, mol_idxs, avg_loss))\n #\n # avg_loss /= count\n return min_score, best_model_params\n\n def test(self, test_data, device, args):\n logging.info(\"----------test--------\")\n model = self.model\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n y_pred = []\n y_true = []\n for mol_idx, (adj_matrix, feature_matrix, label, _) in enumerate(test_data):\n adj_matrix = adj_matrix.to(\n device=device, dtype=torch.float32, non_blocking=True\n )\n feature_matrix = feature_matrix.to(\n device=device, dtype=torch.float32, non_blocking=True\n )\n label = label.to(device=device, dtype=torch.float32, non_blocking=True)\n logits = model(adj_matrix, feature_matrix)\n y_pred.append(logits.cpu().numpy())\n y_true.append(label.cpu().numpy())\n\n if args.metric == \"rmse\":\n score = mean_squared_error(\n np.array(y_true), np.array(y_pred), squared=False\n )\n elif args.metric == \"r2\":\n score = r2_score(np.array(y_true), np.array(y_pred))\n else:\n score = mean_absolute_error(np.array(y_true), np.array(y_pred))\n return score, model\n\n def test_on_the_server(\n self, train_data_local_dict, test_data_local_dict, device, args=None\n ) -> bool:\n logging.info(\"----------test_on_the_server--------\")\n # for client_idx in train_data_local_dict.keys():\n # train_data = train_data_local_dict[client_idx]\n # train_score = self.test(train_data, device, args)\n # logging.info('Client {}, Train ROC-AUC score = {}'.format(client_idx, train_score))\n\n model_list, score_list = [], []\n for client_idx in test_data_local_dict.keys():\n test_data = test_data_local_dict[client_idx]\n score, model = self.test(test_data, device, args)\n for idx in range(len(model_list)):\n self._compare_models(model, model_list[idx])\n model_list.append(model)\n score_list.append(score)\n logging.info(\n \"Client {}, Test {} = {}\".format(client_idx, args.metric.upper(), score)\n )\n if args.enable_wandb:\n wandb.log(\n {\"Client {} Test/{}\".format(client_idx, args.metric.upper()): score}\n )\n avg_score = np.mean(np.array(score_list))\n logging.info(\"Test {} score = {}\".format(args.metric.upper(), avg_score))\n if args.enable_wandb:\n wandb.log({\"Test/{}}\".format(args.metric.upper()): avg_score})\n\n return True\n\n def _compare_models(self, model_1, model_2):\n models_differ = 0\n for key_item_1, key_item_2 in zip(\n model_1.state_dict().items(), model_2.state_dict().items()\n ):\n if torch.equal(key_item_1[1], key_item_2[1]):\n pass\n else:\n models_differ += 1\n if key_item_1[0] == key_item_2[0]:\n logging.info(\"Mismatch found at\", key_item_1[0])\n else:\n raise Exception\n if models_differ == 0:\n logging.info(\"Models match perfectly! :)\")\n",
"# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nRun a Flask REST API exposing a YOLOv5s model\n\"\"\"\n\nimport argparse\nimport io\n\nimport torch\nfrom flask import Flask, request\nfrom PIL import Image\n\napp = Flask(__name__)\n\nDETECTION_URL = \"/v1/object-detection/yolov5s\"\n\n\[email protected](DETECTION_URL, methods=[\"POST\"])\ndef predict():\n if request.method != \"POST\":\n return\n\n if request.files.get(\"image\"):\n # Method 1\n # with request.files[\"image\"] as f:\n # im = Image.open(io.BytesIO(f.read()))\n\n # Method 2\n im_file = request.files[\"image\"]\n im_bytes = im_file.read()\n im = Image.open(io.BytesIO(im_bytes))\n\n results = model(im, size=640) # reduce size=320 for faster inference\n return results.pandas().xyxy[0].to_json(orient=\"records\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Flask API exposing YOLOv5 model\")\n parser.add_argument(\"--port\", default=5000, type=int, help=\"port number\")\n opt = parser.parse_args()\n\n # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210\n torch.hub._validate_not_a_forked_repo = lambda a, b, c: True\n\n model = torch.hub.load(\"ultralytics/yolov5\", \"yolov5s\", force_reload=True) # force_reload to recache\n app.run(host=\"0.0.0.0\", port=opt.port) # debug=True causes Restarting with stat\n",
"import json\nimport os\n\nimport numpy as np\nimport torch\n\nfrom data_preprocessing.shakespeare.language_utils import (\n word_to_indices,\n VOCAB_SIZE,\n letter_to_index,\n)\n\n\ndef read_data(train_data_dir, test_data_dir):\n \"\"\"parses data in given train and test data directories\n\n assumes:\n - the data in the input directories are .json files with\n keys 'users' and 'user_data'\n - the set of train set users is the same as the set of test set users\n\n Return:\n clients: list of client ids\n groups: list of group ids; empty list if none found\n train_data: dictionary of train data\n test_data: dictionary of test data\n \"\"\"\n clients = []\n groups = []\n train_data = {}\n test_data = {}\n\n train_files = os.listdir(train_data_dir)\n train_files = [f for f in train_files if f.endswith(\".json\")]\n for f in train_files:\n file_path = os.path.join(train_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n clients.extend(cdata[\"users\"])\n if \"hierarchies\" in cdata:\n groups.extend(cdata[\"hierarchies\"])\n train_data.update(cdata[\"user_data\"])\n\n test_files = os.listdir(test_data_dir)\n test_files = [f for f in test_files if f.endswith(\".json\")]\n for f in test_files:\n file_path = os.path.join(test_data_dir, f)\n with open(file_path, \"r\") as inf:\n cdata = json.load(inf)\n test_data.update(cdata[\"user_data\"])\n\n clients = list(sorted(train_data.keys()))\n\n return clients, groups, train_data, test_data\n\n\ndef process_x(raw_x_batch):\n x_batch = [word_to_indices(word) for word in raw_x_batch]\n return x_batch\n\n\ndef process_y(raw_y_batch):\n y_batch = [letter_to_index(c) for c in raw_y_batch]\n return y_batch\n\n\ndef batch_data(data, batch_size):\n \"\"\"\n data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)\n returns x, y, which are both numpy array of length: batch_size\n \"\"\"\n data_x = data[\"x\"]\n data_y = data[\"y\"]\n\n # randomly shuffle data\n np.random.seed(100)\n rng_state = np.random.get_state()\n np.random.shuffle(data_x)\n np.random.set_state(rng_state)\n np.random.shuffle(data_y)\n\n # loop through mini-batches\n batch_data = list()\n for i in range(0, len(data_x), batch_size):\n batched_x = data_x[i : i + batch_size]\n batched_y = data_y[i : i + batch_size]\n batched_x = torch.from_numpy(np.asarray(process_x(batched_x)))\n batched_y = torch.from_numpy(np.asarray(process_y(batched_y)))\n batch_data.append((batched_x, batched_y))\n return batch_data\n\n\ndef load_partition_data_shakespeare(batch_size):\n train_path = \"./data/text_classification/shakespeare/train\"\n test_path = \"./data/text_classification/shakespeare/test\"\n users, groups, train_data, test_data = read_data(train_path, test_path)\n\n if len(groups) == 0:\n groups = [None for _ in users]\n train_data_num = 0\n test_data_num = 0\n train_data_local_dict = dict()\n test_data_local_dict = dict()\n train_data_local_num_dict = dict()\n train_data_global = list()\n test_data_global = list()\n client_idx = 0\n for u, g in zip(users, groups):\n user_train_data_num = len(train_data[u][\"x\"])\n user_test_data_num = len(test_data[u][\"x\"])\n train_data_num += user_train_data_num\n test_data_num += user_test_data_num\n train_data_local_num_dict[client_idx] = user_train_data_num\n\n # transform to batches\n train_batch = batch_data(train_data[u], batch_size)\n test_batch = batch_data(test_data[u], batch_size)\n\n # index using client index\n train_data_local_dict[client_idx] = train_batch\n test_data_local_dict[client_idx] = test_batch\n train_data_global += train_batch\n test_data_global += test_batch\n client_idx += 1\n client_num = client_idx\n output_dim = VOCAB_SIZE\n\n return (\n client_num,\n train_data_num,\n test_data_num,\n train_data_global,\n test_data_global,\n train_data_local_num_dict,\n train_data_local_dict,\n test_data_local_dict,\n output_dim,\n )\n",
"import uuid\n\nimport torch\nfrom torch import nn\n\nfrom ....core.alg_frame.client_trainer import ClientTrainer\nimport logging\n\n\nclass MyModelTrainer(ClientTrainer):\n def get_model_params(self):\n return self.model.cpu().state_dict()\n\n def set_model_params(self, model_parameters):\n self.model.load_state_dict(model_parameters)\n\n # TODO: refactor MNN-related file processing\n def get_model_params_file(self):\n model_path = \"/tmp/\" + str(uuid.uuid4()) + \".ckpt\"\n torch.save(self.model.state_dict(), model_path)\n return model_path\n\n # TODO: refactor MNN-related file processing\n def get_model_params_from_file(self, model_params_file):\n return torch.load(model_params_file)\n\n def train(self, train_data, device, args):\n model = self.model\n\n model.to(device)\n model.train()\n\n # train and update\n criterion = nn.CrossEntropyLoss().to(device)\n if args.client_optimizer == \"sgd\":\n optimizer = torch.optim.SGD(\n filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=args.learning_rate,\n )\n else:\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, self.model.parameters()),\n lr=args.learning_rate,\n weight_decay=args.weight_decay,\n amsgrad=True,\n )\n\n epoch_loss = []\n for epoch in range(args.epochs):\n batch_loss = []\n for batch_idx, (x, labels) in enumerate(train_data):\n x, labels = x.to(device), labels.to(device)\n model.zero_grad()\n log_probs = model(x)\n loss = criterion(log_probs, labels)\n loss.backward()\n\n # Uncommet this following line to avoid nan loss\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)\n\n optimizer.step()\n logging.info(\n \"Update Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\".format(\n epoch,\n (batch_idx + 1) * args.batch_size,\n len(train_data) * args.batch_size,\n 100.0 * (batch_idx + 1) / len(train_data),\n loss.item(),\n )\n )\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss) / len(batch_loss))\n logging.info(\n \"Client Index = {}\\tEpoch: {}\\tLoss: {:.6f}\".format(\n self.id, epoch, sum(epoch_loss) / len(epoch_loss)\n )\n )\n\n def test(self, test_data, device, args):\n model = self.model\n\n model.to(device)\n model.eval()\n\n metrics = {\"test_correct\": 0, \"test_loss\": 0, \"test_total\": 0}\n\n criterion = nn.CrossEntropyLoss().to(device)\n\n with torch.no_grad():\n for batch_idx, (x, target) in enumerate(test_data):\n x = x.to(device)\n target = target.to(device)\n pred = model(x)\n loss = criterion(pred, target)\n\n _, predicted = torch.max(pred, -1)\n correct = predicted.eq(target).sum()\n\n metrics[\"test_correct\"] += correct.item()\n metrics[\"test_loss\"] += loss.item() * target.size(0)\n metrics[\"test_total\"] += target.size(0)\n return metrics\n\n def test_on_the_server(\n self, train_data_local_dict, test_data_local_dict, device, args=None\n ) -> bool:\n return False\n"
] | [
[
"torch.utils.data.distributed.DistributedSampler",
"torch.zeros",
"numpy.clip",
"numpy.random.choice",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.random.shuffle",
"numpy.ones",
"numpy.random.randint"
],
[
"numpy.expand_dims",
"torch.cat",
"numpy.isnan",
"numpy.power",
"scipy.sparse.diags",
"sklearn.model_selection.train_test_split",
"scipy.sparse.csr_matrix",
"torch.from_numpy",
"sklearn.preprocessing.StandardScaler",
"torch.nn.functional.one_hot",
"numpy.random.uniform",
"numpy.repeat",
"numpy.array",
"numpy.isinf",
"torch.as_tensor"
],
[
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"scipy.spatial.distance.jensenshannon",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"torch.equal",
"torch.no_grad",
"torch.nn.MAELoss",
"numpy.array",
"torch.nn.MSELoss"
],
[
"torch.hub.load"
],
[
"numpy.random.get_state",
"numpy.random.shuffle",
"numpy.random.set_state",
"numpy.random.seed"
],
[
"torch.nn.CrossEntropyLoss",
"torch.no_grad",
"torch.max",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
openmcworkshop/paramak | [
"c41dc4c2e68183869556544ee7a72deb1d16a8dc"
] | [
"paramak/reactor.py"
] | [
"\nimport json\nfrom collections import Iterable\nfrom pathlib import Path\n\nimport cadquery as cq\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nfrom cadquery import exporters\n\nimport paramak\nfrom paramak.neutronics_utils import (add_stl_to_moab_core,\n define_moab_core_and_tags)\nfrom paramak.utils import get_hash\n\n\nclass Reactor:\n \"\"\"The Reactor object allows shapes and components to be added and then\n collective operations to be performed on them. Combining all the shapes is\n required for creating images of the whole reactor and creating a Graveyard\n (bounding box) that is needed for neutronics simulations.\n\n Args:\n shapes_and_components (list): list of paramak.Shape\n \"\"\"\n\n def __init__(self, shapes_and_components):\n\n self.material_tags = []\n self.stp_filenames = []\n self.stl_filenames = []\n self.tet_meshes = []\n self.graveyard = None\n self.solid = None\n\n self.shapes_and_components = shapes_and_components\n self.reactor_hash_value = None\n\n self.graveyard_offset = None # set by the make_graveyard method\n\n @property\n def stp_filenames(self):\n values = []\n for shape_or_component in self.shapes_and_components:\n values.append(shape_or_component.stp_filename)\n return values\n\n @stp_filenames.setter\n def stp_filenames(self, value):\n self._stp_filenames = value\n\n @property\n def stl_filenames(self):\n values = []\n for shape_or_component in self.shapes_and_components:\n values.append(shape_or_component.stl_filename)\n return values\n\n @stl_filenames.setter\n def stl_filenames(self, value):\n self._stl_filenames = value\n\n @property\n def largest_dimension(self):\n \"\"\"Calculates a bounding box for the Reactor and returns the largest\n absolute value of the largest dimension of the bounding box\"\"\"\n largest_dimension = 0\n for component in self.shapes_and_components:\n largest_dimension = max(\n largest_dimension,\n component.largest_dimension)\n self._largest_dimension = largest_dimension\n return largest_dimension\n\n @largest_dimension.setter\n def largest_dimension(self, value):\n self._largest_dimension = value\n\n @property\n def material_tags(self):\n \"\"\"Returns a set of all the materials_tags used in the Reactor\n (excluding the plasma)\"\"\"\n values = []\n for shape_or_component in self.shapes_and_components:\n if isinstance(\n shape_or_component,\n (paramak.Plasma,\n paramak.PlasmaFromPoints,\n paramak.PlasmaBoundaries)) is False:\n values.append(shape_or_component.material_tag)\n return values\n\n @material_tags.setter\n def material_tags(self, value):\n self._material_tags = value\n\n @property\n def tet_meshes(self):\n values = []\n for shape_or_componet in self.shapes_and_components:\n values.append(shape_or_componet.tet_mesh)\n return values\n\n @tet_meshes.setter\n def tet_meshes(self, value):\n self._tet_meshes = value\n\n @property\n def shapes_and_components(self):\n \"\"\"Adds a list of parametric shape(s) and or parametric component(s)\n to the Reactor object. This allows collective operations to be\n performed on all the shapes in the reactor. When adding a shape or\n component the stp_filename of the shape or component should be unique\"\"\"\n if hasattr(self, \"create_solids\"):\n ignored_keys = [\"reactor_hash_value\"]\n if get_hash(self, ignored_keys) != self.reactor_hash_value:\n self.create_solids()\n self.reactor_hash_value = get_hash(self, ignored_keys)\n return self._shapes_and_components\n\n @shapes_and_components.setter\n def shapes_and_components(self, value):\n if not isinstance(value, Iterable):\n raise ValueError(\"shapes_and_components must be a list\")\n self._shapes_and_components = value\n\n @property\n def graveyard_offset(self):\n return self._graveyard_offset\n\n @graveyard_offset.setter\n def graveyard_offset(self, value):\n if value is None:\n self._graveyard_offset = None\n elif not isinstance(value, (float, int)):\n raise ValueError(\"graveyard_offset must be a number\")\n elif value < 0:\n raise ValueError(\"graveyard_offset must be positive\")\n self._graveyard_offset = value\n\n @property\n def solid(self):\n \"\"\"This combines all the parametric shapes and compents in the reactor\n object and rotates the viewing angle so that .solid operations in\n jupyter notebook.\n \"\"\"\n\n list_of_cq_vals = []\n\n for shape_or_compound in self.shapes_and_components:\n if isinstance(\n shape_or_compound.solid,\n cq.occ_impl.shapes.Compound):\n for solid in shape_or_compound.solid.Solids():\n list_of_cq_vals.append(solid)\n else:\n list_of_cq_vals.append(shape_or_compound.solid.val())\n\n compound = cq.Compound.makeCompound(list_of_cq_vals)\n\n compound = compound.rotate(\n startVector=(0, 1, 0), endVector=(0, 0, 1), angleDegrees=180\n )\n return compound\n\n @solid.setter\n def solid(self, value):\n self._solid = value\n\n def neutronics_description(self, include_plasma=False,\n include_graveyard=True\n ):\n \"\"\"A description of the reactor containing material tags, stp filenames,\n and tet mesh instructions. This is used for neutronics simulations which\n require linkage between volumes, materials and identification of which\n volumes to tet mesh. The plasma geometry is not included by default as\n it is typically not included in neutronics simulations. The reason for\n this is that the low number density results in minimal interaction with\n neutrons. However, it can be added if the include_plasma argument is set\n to True.\n\n Returns:\n dictionary: a dictionary of materials and filenames for the reactor\n \"\"\"\n\n neutronics_description = []\n\n for entry in self.shapes_and_components:\n\n if include_plasma is False and isinstance(\n entry,\n (paramak.Plasma,\n paramak.PlasmaFromPoints,\n paramak.PlasmaBoundaries)) is True:\n continue\n\n if entry.stp_filename is None:\n raise ValueError(\n \"Set Shape.stp_filename for all the \\\n Reactor entries before using this method\"\n )\n\n if entry.material_tag is None:\n raise ValueError(\n \"set Shape.material_tag for all the \\\n Reactor entries before using this method\"\n )\n\n neutronics_description.append(entry.neutronics_description())\n\n # This add the neutronics description for the graveyard which is unique\n # as it is automatically calculated instead of being added by the user.\n # Also the graveyard must have 'Graveyard' as the material name\n if include_graveyard is True:\n self.make_graveyard()\n neutronics_description.append(\n self.graveyard.neutronics_description())\n\n return neutronics_description\n\n def export_neutronics_description(\n self,\n filename=\"manifest.json\",\n include_plasma=False,\n include_graveyard=True):\n \"\"\"\n Saves Reactor.neutronics_description to a json file. The resulting json\n file contains a list of dictionaries. Each dictionary entry comprises\n of a material and a filename and optionally a tet_mesh instruction. The\n json file can then be used with the neutronics workflows to create a\n neutronics model. Creating of the neutronics model requires linkage\n between volumes, materials and identification of which volumes to\n tet_mesh. If the filename does not end with .json then .json will be\n added. The plasma geometry is not included by default as it is\n typically not included in neutronics simulations. The reason for this\n is that the low number density results in minimal interactions with\n neutrons. However, the plasma can be added if the include_plasma\n argument is set to True.\n\n Args:\n filename (str, optional): the filename used to save the neutronics\n description\n include_plasma (Boolean, optional): should the plasma be included.\n Defaults to False as the plasma volume and material has very\n little impact on the neutronics results due to the low density.\n Including the plasma does however slow down the simulation.\n include_graveyard (Boolean, optional): should the graveyard be\n included. Defaults to True as this is needed for DAGMC models.\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".json\":\n path_filename = path_filename.with_suffix(\".json\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n with open(path_filename, \"w\") as outfile:\n json.dump(\n self.neutronics_description(\n include_plasma=include_plasma,\n include_graveyard=include_graveyard,\n ),\n outfile,\n indent=4,\n )\n\n print(\"saved geometry description to \", path_filename)\n\n return str(path_filename)\n\n def export_stp(self, output_folder=\"\", graveyard_offset=100,\n mode='solid'):\n \"\"\"Writes stp files (CAD geometry) for each Shape object in the reactor\n and the graveyard.\n\n Args:\n output_folder (str): the folder for saving the stp files to\n graveyard_offset (float, optional): the offset between the largest\n edge of the geometry and inner bounding shell created. Defaults\n to 100.\n mode (str, optional): the object to export can be either\n 'solid' which exports 3D solid shapes or the 'wire' which\n exports the wire edges of the shape. Defaults to 'solid'.\n Returns:\n list: a list of stp filenames created\n \"\"\"\n\n if len(self.stp_filenames) != len(set(self.stp_filenames)):\n raise ValueError(\n \"Set Reactor already contains a shape or component \\\n with this stp_filename\",\n self.stp_filenames,\n )\n\n filenames = []\n for entry in self.shapes_and_components:\n if entry.stp_filename is None:\n raise ValueError(\n \"set .stp_filename property for \\\n Shapes before using the export_stp method\"\n )\n filenames.append(\n str(Path(output_folder) / Path(entry.stp_filename)))\n entry.export_stp(\n filename=Path(output_folder) / Path(entry.stp_filename),\n mode=mode\n )\n\n # creates a graveyard (bounding shell volume) which is needed for\n # nuetronics simulations\n self.make_graveyard(graveyard_offset=graveyard_offset)\n filenames.append(\n str(Path(output_folder) / Path(self.graveyard.stp_filename)))\n self.graveyard.export_stp(\n Path(output_folder) / Path(self.graveyard.stp_filename)\n )\n\n return filenames\n\n def export_stl(self, output_folder=\"\", tolerance=0.001):\n \"\"\"Writes stl files (CAD geometry) for each Shape object in the reactor\n\n :param output_folder: the folder for saving the stp files to\n :type output_folder: str\n :param tolerance: the precision of the faceting\n :type tolerance: float\n\n :return: a list of stl filenames created\n :rtype: list\n \"\"\"\n\n if len(self.stl_filenames) != len(set(self.stl_filenames)):\n raise ValueError(\n \"Set Reactor already contains a shape or component \\\n with this stl_filename\",\n self.stl_filenames,\n )\n\n filenames = []\n for entry in self.shapes_and_components:\n print(\"entry.stl_filename\", entry.stl_filename)\n if entry.stl_filename is None:\n raise ValueError(\n \"set .stl_filename property for \\\n Shapes before using the export_stl method\"\n )\n\n filenames.append(\n str(Path(output_folder) / Path(entry.stl_filename)))\n entry.export_stl(\n Path(output_folder) /\n Path(\n entry.stl_filename),\n tolerance)\n\n # creates a graveyard (bounding shell volume) which is needed for\n # nuetronics simulations\n self.make_graveyard()\n filenames.append(\n str(Path(output_folder) / Path(self.graveyard.stl_filename)))\n self.graveyard.export_stl(\n Path(output_folder) / Path(self.graveyard.stl_filename)\n )\n\n print(\"exported stl files \", filenames)\n\n return filenames\n\n def export_h5m(\n self,\n filename='dagmc.h5m',\n skip_graveyard=False,\n tolerance=0.001,\n graveyard_offset=100):\n \"\"\"Converts stl files into DAGMC compatible h5m file using PyMOAB. The\n DAGMC file produced has not been imprinted and merged unlike the other\n supported method which uses Trelis to produce an imprinted and merged\n DAGMC geometry. If the provided filename doesn't end with .h5m it will\n be added\n\n Args:\n filename (str, optional): filename of h5m outputfile\n Defaults to \"dagmc.h5m\".\n skip_graveyard (boolean, optional): filename of h5m outputfile\n Defaults to False.\n tolerance (float, optional): the precision of the faceting\n Defaults to 0.001.\n graveyard_offset (float, optional): the offset between the largest\n edge of the geometry and inner bounding shell created. Defaults\n to 100.\n Returns:\n filename: output h5m filename\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".h5m\":\n path_filename = path_filename.with_suffix(\".h5m\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n moab_core, moab_tags = define_moab_core_and_tags()\n\n surface_id = 1\n volume_id = 1\n\n for item in self.shapes_and_components:\n\n item.export_stl(item.stl_filename, tolerance=tolerance)\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n item.material_tag,\n moab_tags,\n item.stl_filename)\n volume_id += 1\n surface_id += 1\n\n if skip_graveyard is False:\n self.make_graveyard(graveyard_offset=graveyard_offset)\n self.graveyard.export_stl(self.graveyard.stl_filename)\n volume_id = 2\n surface_id = 2\n moab_core = add_stl_to_moab_core(\n moab_core,\n surface_id,\n volume_id,\n self.graveyard.material_tag,\n moab_tags,\n self.graveyard.stl_filename\n )\n\n all_sets = moab_core.get_entities_by_handle(0)\n\n file_set = moab_core.create_meshset()\n\n moab_core.add_entities(file_set, all_sets)\n\n moab_core.write_file(str(path_filename))\n\n return filename\n\n def export_physical_groups(self, output_folder=\"\"):\n \"\"\"Exports several JSON files containing a look up table which is\n useful for identifying faces and volumes. The output file names are\n generated from .stp_filename properties.\n\n Args:\n output_folder (str, optional): directory of outputfiles.\n Defaults to \"\".\n\n Raises:\n ValueError: if one .stp_filename property is set to None\n\n Returns:\n list: list of output file names\n \"\"\"\n filenames = []\n for entry in self.shapes_and_components:\n if entry.stp_filename is None:\n raise ValueError(\n \"set .stp_filename property for \\\n Shapes before using the export_stp method\"\n )\n filenames.append(\n str(Path(output_folder) / Path(entry.stp_filename)))\n entry.export_physical_groups(\n Path(output_folder) / Path(entry.stp_filename))\n return filenames\n\n def export_svg(self, filename):\n \"\"\"Exports an svg file for the Reactor.solid. If the filename provided\n doesn't end with .svg it will be added.\n\n Args:\n filename (str): the filename of the svg file to be exported\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".svg\":\n path_filename = path_filename.with_suffix(\".svg\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n with open(path_filename, \"w\") as out_file:\n exporters.exportShape(self.solid, \"SVG\", out_file)\n print(\"Saved file as \", path_filename)\n\n def export_graveyard(\n self,\n graveyard_offset=100,\n filename=\"Graveyard.stp\"):\n \"\"\"Writes an stp file (CAD geometry) for the reactor graveyard. This\n is needed for DAGMC simulations. This method also calls\n Reactor.make_graveyard with the offset.\n\n Args:\n filename (str): the filename for saving the stp file\n graveyard_offset (float): the offset between the largest edge of\n the geometry and inner bounding shell created. Defaults to\n Reactor.graveyard_offset\n\n Returns:\n str: the stp filename created\n \"\"\"\n\n self.make_graveyard(graveyard_offset=graveyard_offset)\n self.graveyard.export_stp(Path(filename))\n\n return filename\n\n def make_graveyard(self, graveyard_offset=100):\n \"\"\"Creates a graveyard volume (bounding box) that encapsulates all\n volumes. This is required by DAGMC when performing neutronics\n simulations.\n\n Args:\n graveyard_offset (float): the offset between the largest edge of\n the geometry and inner bounding shell created. Defaults to\n Reactor.graveyard_offset\n\n Returns:\n CadQuery solid: a shell volume that bounds the geometry, referred\n to as a graveyard in DAGMC\n \"\"\"\n\n self.graveyard_offset = graveyard_offset\n\n for component in self.shapes_and_components:\n if component.solid is None:\n component.create_solid()\n\n graveyard_shape = paramak.HollowCube(\n length=self.largest_dimension * 2 + graveyard_offset * 2,\n name=\"Graveyard\",\n material_tag=\"Graveyard\",\n stp_filename=\"Graveyard.stp\",\n stl_filename=\"Graveyard.stl\",\n )\n\n self.graveyard = graveyard_shape\n\n return graveyard_shape\n\n def export_2d_image(\n self,\n filename=\"2d_slice.png\",\n xmin=0.0,\n xmax=900.0,\n ymin=-600.0,\n ymax=600.0):\n \"\"\"Creates a 2D slice image (png) of the reactor.\n\n Args:\n filename (str): output filename of the image created\n\n Returns:\n str: png filename created\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".png\":\n path_filename = path_filename.with_suffix(\".png\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n fig, ax = plt.subplots()\n\n # creates indvidual patches for each Shape which are combined together\n for entry in self.shapes_and_components:\n patch = entry._create_patch()\n ax.add_collection(patch)\n\n ax.axis(\"equal\")\n ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))\n ax.set_aspect(\"equal\", \"box\")\n\n Path(filename).parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(filename, dpi=100)\n plt.close()\n\n print(\"\\n saved 2d image to \", str(path_filename))\n\n return str(path_filename)\n\n def export_html(self, filename=\"reactor.html\"):\n \"\"\"Creates a html graph representation of the points for the Shape\n objects that make up the reactor. Note, If filename provided doesn't end\n with .html then it will be appended.\n\n Args:\n filename (str): the filename to save the html graph\n\n Returns:\n plotly figure: figure object\n \"\"\"\n\n path_filename = Path(filename)\n\n if path_filename.suffix != \".html\":\n path_filename = path_filename.with_suffix(\".html\")\n\n path_filename.parents[0].mkdir(parents=True, exist_ok=True)\n\n fig = go.Figure()\n fig.update_layout(\n {\"title\": \"coordinates of components\", \"hovermode\": \"closest\"}\n )\n\n # accesses the Shape traces for each Shape and adds them to the figure\n for entry in self.shapes_and_components:\n fig.add_trace(entry._trace())\n\n fig.write_html(str(path_filename))\n print(\"Exported html graph to \", str(path_filename))\n\n return fig\n"
] | [
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jaycosaur/spynet | [
"535841bcea761463d27f7f3eb745ffe186d9f763"
] | [
"streaming_helpers.py"
] | [
"import queue\nimport time\nimport numpy as np\n\n\nclass CameraInformation:\n def __init__(self, cam_id: str):\n self._frame_queue: queue.Queue = queue.Queue(maxsize=1)\n self._frame_shape = None\n self._last_frame_time = None\n self.is_online = True\n self.node_id = cam_id\n\n def write_frame(self, frame):\n try:\n self._frame_queue.get_nowait()\n except queue.Empty:\n pass\n self._frame_shape = frame.shape\n self._last_frame_time = time.time()\n self._frame_queue.put_nowait(frame)\n\n def read_frame(self,):\n try:\n frame = self._frame_queue.get(timeout=2)\n if not self.is_online:\n self.is_online = True\n return frame\n except queue.Empty:\n if self.is_online:\n self.is_online = False\n return np.zeros(self._frame_shape)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liiliiliil/ride-hailing-platform-with-simulator | [
"c9eae7f718c9e10c7ba4955e5093d4fb21d16d25",
"c9eae7f718c9e10c7ba4955e5093d4fb21d16d25",
"c9eae7f718c9e10c7ba4955e5093d4fb21d16d25"
] | [
"data_processing/draw_value_map.py",
"algorithm/KM.py",
"simulator/utilities.py"
] | [
"import os\nimport time\nimport pickle\n\nimport math\nimport numpy as np\nimport linecache\nimport matplotlib.pyplot as plt\n# from matplotlib.pyplot import MultipleLocator\nimport grid\n\ndata_path = 'E:/dataset/didi/processed'\nsave_path = 'E:/dataset/didi/processed/order_20161101_sampled_value_map_fig'\ndata_file_name = 'processed_data' # '.pkl' will be added for binary file\nvalue_map_file_name = 'value_map' # '.pkl' will be added for binary file\n\nn_time_unit = 144\nsize_hexagon_to_edge = 0.0048\nhexagon_size_factor_for_plot = 1\nrange_map_longitude = [103.96, 104.18]\nrange_map_latitude = [30.59, 30.77]\n\nsize_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) # length to the point\n\nif not os.path.exists(save_path):\n os.mkdir(save_path)\n\nwith open(os.path.join(data_path, data_file_name+'.pkl'), 'rb') as f:\n data = pickle.load(f)\nwith open(os.path.join(data_path, value_map_file_name+'.pkl'), 'rb') as f:\n value_map = pickle.load(f)\n\n\n\n# make hexagon\ngrid = grid.Hexagon(size_to_edge=size_hexagon_to_edge*hexagon_size_factor_for_plot)\ngrid_interval_lo = size_hexagon * 1.5\ngrid_interval_la = size_hexagon_to_edge * 2\n\ngrid_centers = []\nfor la in np.arange(range_map_latitude[1]-size_hexagon, range_map_latitude[0]-0.00001, -grid_interval_la):\n row = []\n count = 0\n for lo in np.arange(range_map_longitude[0], range_map_longitude[1]+0.00001, grid_interval_lo):\n if count % 2 == 0:\n row.append([lo, la])\n else:\n row.append([lo, la+size_hexagon_to_edge])\n count += 1\n grid_centers.append(row)\n\ngrid_centers_mat = np.array(grid_centers)\nshape_grid_centers_mat = grid_centers_mat.shape\nn_grids = shape_grid_centers_mat[0]*shape_grid_centers_mat[1]\n\ngrid_index_mat = np.arange(n_grids).reshape(shape_grid_centers_mat[:2])\n\nprint('shape of grids is', shape_grid_centers_mat)\nprint('number of grids is', n_grids)\n\ngrid_centers_flat_T = grid_centers_mat.reshape(n_grids, 2).T\n\n\nmax_value = np.max(value_map)\nmin_value = np.min(value_map)\nprint('maximum value in value_map is', max_value)\nprint('minimum value in value_map is', min_value)\n# value_map = (value_map - min_value) / max_value\n# max_value = np.max(value_map)\n# min_value = np.min(value_map)\n# print('maximum value in value_map after normalization is', max_value)\n# print('minimum value in value_map after normalization is', min_value)\n \n\n\nfor t in range(n_time_unit):\n fig = plt.figure()\n plt.title('value map of time unit %d' % t)\n plt.scatter(grid_centers_flat_T[0], grid_centers_flat_T[1], c=value_map[t], marker='H', s=100, alpha=0.5)\n plt.colorbar()\n fig.savefig(os.path.join(save_path, '%d.jpg'%t))\n \n\n",
"import pickle\nimport numpy as np\n\nINF = np.inf\nNO_PATH = - np.inf\nNOT_MATCH = -1\n\nNEAR_ZERO = 1e-12\n\nclass KM:\n\n def __init__(self, graph):\n\n # weight of each edge\n self.graph = np.array(graph, dtype=float)\n self.min_value = np.min(self.graph)\n self.graph -= self.min_value\n # self.scale = scale\n\n # self.graph = (self.graph*self.scale).astype(int)\n \n \n self.has_transposed = False\n if self.graph.shape[0] > self.graph.shape[1]:\n self.graph = self.graph.T\n self.has_transposed = True\n \n self.n_x = self.graph.shape[0]\n self.n_y = self.graph.shape[1]\n\n # weight of each vertex\n self.w_x = np.zeros(self.n_x, dtype=int)\n self.w_y = np.zeros(self.n_y, dtype=int)\n self.init_w_of_v()\n\n # flag of wether vertex has been added in to path\n self.visited_x = np.zeros(self.n_x, dtype=bool)\n self.visited_y = np.zeros(self.n_y, dtype=bool)\n\n # match_x[i] is j means that vertex of index i in set X matches vertex of index j in set Y\n self.match_x = np.ones(self.n_x, dtype=int) * NOT_MATCH\n self.match_y = np.ones(self.n_y, dtype=int) * NOT_MATCH\n\n self.slack = np.ones(self.n_y) * INF\n \n def init_w_of_v(self):\n\n self.w_x = np.max(self.graph, axis=1)\n self.w_y = np.zeros(self.n_y)\n \n def init_path(self):\n # flag of wether vertex has been added in to path\n self.visited_x = np.zeros(self.n_x, dtype=bool)\n self.visited_y = np.zeros(self.n_y, dtype=bool)\n \n def find_path(self, u):\n \"\"\"\n u: index of the beginning vertex (must in set X) in this path\n \"\"\"\n self.visited_x[u] = True\n for v in range(self.n_y):\n if not self.visited_y[v] and self.graph[u][v] != np.inf:\n delta = self.w_x[u] + self.w_y[v] - self.graph[u][v]\n\n if delta < NEAR_ZERO: # add v into path\n self.visited_y[v] = True\n\n # no conflict in v or path can be found\n if self.match_y[v] == NOT_MATCH or self.find_path(self.match_y[v]):\n self.match_x[u] = v\n self.match_y[v] = u\n\n return True\n \n elif delta > 0: # delta is greater or equal to 0\n self.slack[v] = min(self.slack[v], delta)\n \n return False\n \n def match(self):\n \n for u in range(self.n_x):\n self.slack = np.ones(self.n_y) * INF\n self.init_path()\n while not self.find_path(u):\n min_d = np.min(self.slack[np.logical_not(self.visited_y)])\n # print(u, min_d)\n\n self.w_x[self.visited_x] -= min_d\n self.w_y[self.visited_y] += min_d\n\n # because in these vertexes of set Y, weights of corresponding vertexes in set X\n # have been subtracted by min_d while weights of themselves and weights of corresponding\n # path have not been changed\n self.slack[np.logical_not(self.visited_y)] -= min_d\n\n self.init_path()\n \n return (np.sum(self.graph[np.arange(self.n_x), self.match_x]) + self.min_value*self.n_x)\n \n def get_match_result(self):\n if self.has_transposed:\n return self.match_y, self.match_x\n else:\n return self.match_x, self.match_y\n \n def set_graph(self, graph):\n self.__init__(graph)\n \n\n\ndef test():\n # graph = [[3, NO_PATH, 4],\n # [2, 1, 3],\n # [NO_PATH, NO_PATH, 5]]\n \n # # Not sure about correct result\n # graph = [[3, 4, 6, 4, 9],\n # [6, 4, 5, 3, 8],\n # [7, 5, 3, 4, 2],\n # [6, 3, 2, 2, 5],\n # [8, 4, 5, 4, 7]]\n\n # graph = [[1, 100],\n # [NO_PATH, 1]]\n\n graph_1 = [[1, 100],\n [0, 1]]\n \n graph_2 = [[NO_PATH, 2, NO_PATH, NO_PATH,3],\n [7, NO_PATH, 23, NO_PATH, NO_PATH],\n [17, 24, NO_PATH, NO_PATH, NO_PATH],\n [NO_PATH, 6, 13, 20, NO_PATH]]\n \n km = KM(graph_2)\n res = km.match()\n match_r, match_c = km.get_match_result()\n\n print('match_r is', match_r)\n print('match_c is', match_c)\n print('maximum weight is', res)\n\n km.set_graph(graph_1)\n res = km.match()\n match_r, match_c = km.get_match_result()\n\n print('match_r is', match_r)\n print('match_c is', match_c)\n print('maximum weight is', res)\n\n\ndef test_with_given_graph(graph):\n km = KM(graph)\n res = km.match()\n match_r, match_c = km.get_match_result()\n\n print('match_r is', match_r)\n print('match_c is', match_c)\n print('maximum weight is', res)\n\n if len(match_c) >= len(match_r):\n print('Two match result is equal?', np.array([match_c[j] == i for i, j in enumerate(match_r)]).all())\n print('res is correct?', np.sum([graph[i][match_r[i]] for i in range(len(match_r))]) == res)\n print(np.sum([graph[i][match_r[i]] for i in range(len(match_r))]))\n else:\n print('Two match result is equal?', np.array([match_r[j] == i for i, j in enumerate(match_c)]).all())\n print('res is correct?', np.sum([graph[match_c[i]][i] for i in range(len(match_c))]) == res)\n \n \n\n\nif __name__ == '__main__':\n with open(r'E:\\Project\\simulator_for_ride_hailing_platform\\algorithm\\graph_1.pkl', 'rb') as f:\n graph = pickle.load(f)\n \n test_with_given_graph(graph)\n",
"import numpy as np\nimport os\nimport errno\n\n\n\nfrom datetime import datetime, timedelta\n\ndef datetime_range(start, end, delta):\n current = start\n while current < end:\n yield current\n current += delta\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef ids_2dto1d(i, j, M, N):\n '''\n convert (i,j) in a M by N matrix to index in M*N list. (row wise)\n matrix: [[1,2,3], [4, 5, 6]] (I think it is better to ignore this line)\n list: [0, 1, 2, 3, 4, 5, 6]\n index start from 0\n '''\n assert 0 <= i < M and 0 <= j < N\n # print(type(i), type(j), type(M), type(N))\n index = i * N + j\n return index\n\n\ndef ids_1dto2d(ids, M, N):\n ''' inverse of ids_2dto1d(i, j, M, N)\n index start from 0\n '''\n i = ids // N\n j = ids - N * i\n return (i, j)\n\n\ndef get_neighbor_list(i, j, M, N, n, nodes):\n ''' n: n-sided polygon, construct for a 2d map\n 1\n 6 2\n center\n 5 3\n 4\n return index of neighbor 1, 2, 3, 4, 5,6 in the matrix\n '''\n\n neighbor_list = [None] * n\n if n == 6:\n # hexagonal\n if j % 2 == 0:\n if i - 1 >= 0:\n neighbor_list[0] = nodes[ids_2dto1d(i-1, j, M, N)]\n if j + 1 < N:\n neighbor_list[1] = nodes[ids_2dto1d(i, j+1, M, N)]\n if i + 1 < M and j + 1 < N:\n neighbor_list[2] = nodes[ids_2dto1d(i+1, j+1, M, N)]\n if i + 1 < M:\n neighbor_list[3] = nodes[ids_2dto1d(i+1, j, M, N)]\n if i + 1 < M and j - 1 >= 0:\n neighbor_list[4] = nodes[ids_2dto1d(i+1, j-1, M, N)]\n if j - 1 >= 0:\n neighbor_list[5] = nodes[ids_2dto1d(i, j-1, M, N)]\n elif j % 2 == 1:\n if i - 1 >= 0:\n neighbor_list[0] = nodes[ids_2dto1d(i-1, j, M, N)]\n if i - 1 >= 0 and j + 1 < N:\n neighbor_list[1] = nodes[ids_2dto1d(i-1, j+1, M, N)]\n if j + 1 < N:\n neighbor_list[2] = nodes[ids_2dto1d(i, j+1, M, N)]\n if i + 1 < M:\n neighbor_list[3] = nodes[ids_2dto1d(i+1, j, M, N)]\n if j - 1 >= 0:\n neighbor_list[4] = nodes[ids_2dto1d(i, j-1, M, N)]\n if i - 1 >= 0 and j - 1 >= 0:\n neighbor_list[5] = nodes[ids_2dto1d(i-1, j-1, M, N)]\n elif n == 4:\n # square\n if i - 1 >= 0:\n neighbor_list[0] = nodes[ids_2dto1d(i-1, j, M, N)]\n if j + 1 < N:\n neighbor_list[1] = nodes[ids_2dto1d(i, j+1, M, N)]\n if i + 1 < M:\n neighbor_list[2] = nodes[ids_2dto1d(i+1, j, M, N)]\n if j - 1 >= 0:\n neighbor_list[3] = nodes[ids_2dto1d(i, j-1, M, N)]\n\n return neighbor_list\n\n\ndef get_neighbor_index(i, j):\n \"\"\"\n 1\n 6 2\n center\n 5 3\n 4\n return index of neighbor 1, 2, 3, 4, 5,6 in the matrix\n \"\"\"\n neighbor_matrix_ids = []\n if j % 2 == 0:\n neighbor_matrix_ids = [[i - 1, j ],\n [i, j + 1],\n [i + 1, j + 1],\n [i + 1, j ],\n [i + 1, j - 1],\n [i , j - 1]]\n elif j % 2 == 1:\n neighbor_matrix_ids = [[i - 1, j ],\n [i - 1, j + 1],\n [i , j + 1],\n [i + 1, j ],\n [i , j - 1],\n [i - 1, j - 1]]\n\n return neighbor_matrix_ids\n\n\ndef get_layers_neighbors(i, j, l_max, M, N):\n \"\"\"get neighbors of node layer by layer, todo BFS.\n i, j: center node location\n L_max: max number of layers\n layers_neighbors: layers_neighbors[0] first layer neighbor: 6 nodes: can arrived in 1 time step.\n layers_neighbors[1]: 2nd layer nodes id\n M, N: matrix rows and columns.\n \"\"\"\n assert l_max >= 1\n layers_neighbors = []\n layer1_neighbor = get_neighbor_index(i, j) #[[1,1], [0, 1], ...]\n temp = []\n for item in layer1_neighbor:\n x, y = item\n if 0 <= x <= M-1 and 0 <= y <= N-1:\n temp.append(item)\n layers_neighbors.append(temp)\n\n node_id_neighbors = []\n for item in layer1_neighbor:\n x, y = item\n if 0 <= x <= M-1 and 0 <= y <= N-1:\n node_id_neighbors.append(ids_2dto1d(x, y, M, N))\n\n layers_neighbors_set = set(node_id_neighbors)\n curr_ndoe_id = ids_2dto1d(i, j, M, N)\n layers_neighbors_set.add(curr_ndoe_id)\n\n t = 1\n while t < l_max:\n t += 1\n layer_neighbor_temp = []\n for item in layers_neighbors[-1]:\n x, y = item\n if 0 <= x <= M-1 and 0 <= y <= N-1:\n layer_neighbor_temp += get_neighbor_index(x, y)\n\n layer_neighbor = [] # remove previous layer neighbors\n for item in layer_neighbor_temp:\n x, y = item\n if 0 <= x <= M-1 and 0 <= y <= N-1:\n node_id = ids_2dto1d(x, y, M, N)\n if node_id not in layers_neighbors_set:\n layer_neighbor.append(item)\n layers_neighbors_set.add(node_id)\n layers_neighbors.append(layer_neighbor)\n\n return layers_neighbors\n\n\ndef get_driver_status(env):\n idle_driver_dist = np.zeros((env.M, env.N))\n for driver_id, cur_drivers in env.drivers.iteritems():\n if cur_drivers.node is not None:\n node_id = cur_drivers.node.get_node_index()\n row, col = ids_1dto2d(node_id, env.M, env.N)\n if cur_drivers.onservice is False and cur_drivers.online is True:\n idle_driver_dist[row, col] += 1\n\n return idle_driver_dist\n\ndef debug_print_drivers(node):\n print(\"Status of all drivers in the node {}\".format(node.get_node_index()))\n print(\"|{:12}|{:12}|{:12}|{:12}|\".format(\"driver id\", \"driver location\", \"online\", \"onservice\"))\n\n for driver_id, cur_drivers in node.drivers.iteritems():\n if cur_drivers.node is not None:\n node_id = cur_drivers.node.get_node_index()\n else:\n node_id = \"none\"\n print(\"|{:12}|{:12}|{:12}|{:12}|\".format(driver_id, node_id, cur_drivers.online, cur_drivers.onservice))\n\n\n\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.scatter",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"numpy.logical_not",
"numpy.min",
"numpy.arange",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Amir-Mehrpanah/hgraph2graph | [
"6d37153afe09f7684381ce56e8366675e22833e9"
] | [
"hgraph/decoder.py"
] | [
"import torch\nimport torch.nn as nn\nimport rdkit.Chem as Chem\nimport torch.nn.functional as F\nfrom hgraph.nnutils import *\nfrom hgraph.encoder import IncHierMPNEncoder\nfrom hgraph.mol_graph import MolGraph\nfrom hgraph.inc_graph import IncTree, IncGraph\n\nclass HTuple():\n def __init__(self, node=None, mess=None, vmask=None, emask=None):\n self.node, self.mess = node, mess\n self.vmask, self.emask = vmask, emask\n\nclass HierMPNDecoder(nn.Module):\n\n def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, latent_size, depthT, depthG, dropout, attention=False):\n super(HierMPNDecoder, self).__init__()\n self.vocab = vocab\n self.avocab = avocab\n self.hidden_size = hidden_size\n self.embed_size = embed_size\n self.latent_size = latent_size\n self.use_attention = attention\n self.itensor = torch.LongTensor([]).cuda()\n\n self.hmpn = IncHierMPNEncoder(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout)\n self.rnn_cell = self.hmpn.tree_encoder.rnn\n self.E_assm = self.hmpn.E_i \n self.E_order = torch.eye(MolGraph.MAX_POS).cuda()\n\n self.topoNN = nn.Sequential(\n nn.Linear(hidden_size + latent_size, hidden_size),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_size, 1)\n )\n self.clsNN = nn.Sequential(\n nn.Linear(hidden_size + latent_size, hidden_size),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_size, vocab.size()[0])\n )\n self.iclsNN = nn.Sequential(\n nn.Linear(hidden_size + latent_size, hidden_size),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(hidden_size, vocab.size()[1])\n )\n self.matchNN = nn.Sequential(\n nn.Linear(hidden_size + embed_size + MolGraph.MAX_POS, hidden_size),\n nn.ReLU(),\n )\n self.W_assm = nn.Linear(hidden_size, latent_size)\n\n if latent_size != hidden_size:\n self.W_root = nn.Linear(latent_size, hidden_size)\n\n if self.use_attention:\n self.A_topo = nn.Linear(hidden_size, latent_size)\n self.A_cls = nn.Linear(hidden_size, latent_size)\n self.A_assm = nn.Linear(hidden_size, latent_size)\n\n self.topo_loss = nn.BCEWithLogitsLoss(size_average=False)\n self.cls_loss = nn.CrossEntropyLoss(size_average=False)\n self.icls_loss = nn.CrossEntropyLoss(size_average=False)\n self.assm_loss = nn.CrossEntropyLoss(size_average=False)\n \n def apply_tree_mask(self, tensors, cur, prev):\n fnode, fmess, agraph, bgraph, cgraph, scope = tensors\n agraph = agraph * index_select_ND(cur.emask, 0, agraph)\n bgraph = bgraph * index_select_ND(cur.emask, 0, bgraph)\n cgraph = cgraph * index_select_ND(prev.vmask, 0, cgraph)\n return fnode, fmess, agraph, bgraph, cgraph, scope\n\n def apply_graph_mask(self, tensors, hgraph):\n fnode, fmess, agraph, bgraph, scope = tensors\n agraph = agraph * index_select_ND(hgraph.emask, 0, agraph)\n bgraph = bgraph * index_select_ND(hgraph.emask, 0, bgraph)\n return fnode, fmess, agraph, bgraph, scope\n\n def update_graph_mask(self, graph_batch, new_atoms, hgraph):\n new_atom_index = hgraph.vmask.new_tensor(new_atoms)\n hgraph.vmask.scatter_(0, new_atom_index, 1)\n\n new_atom_set = set(new_atoms)\n new_bonds = [] #new bonds are the subgraph induced by new_atoms\n for zid in new_atoms:\n for nid in graph_batch[zid]:\n if nid not in new_atom_set: continue\n new_bonds.append( graph_batch[zid][nid]['mess_idx'] )\n\n new_bond_index = hgraph.emask.new_tensor(new_bonds)\n if len(new_bonds) > 0:\n hgraph.emask.scatter_(0, new_bond_index, 1)\n return new_atom_index, new_bond_index\n\n def init_decoder_state(self, tree_batch, tree_tensors, src_root_vecs):\n batch_size = len(src_root_vecs)\n num_mess = len(tree_tensors[1])\n agraph = tree_tensors[2].clone()\n bgraph = tree_tensors[3].clone()\n\n for i,tup in enumerate(tree_tensors[-1]):\n root = tup[0]\n assert agraph[root,-1].item() == 0\n agraph[root,-1] = num_mess + i\n for v in tree_batch.successors(root):\n mess_idx = tree_batch[root][v]['mess_idx'] \n assert bgraph[mess_idx,-1].item() == 0\n bgraph[mess_idx,-1] = num_mess + i\n\n new_tree_tensors = tree_tensors[:2] + [agraph, bgraph] + tree_tensors[4:]\n htree = HTuple()\n htree.mess = self.rnn_cell.get_init_state(tree_tensors[1], src_root_vecs)\n htree.emask = torch.cat( [bgraph.new_zeros(num_mess), bgraph.new_ones(batch_size)], dim=0 )\n\n return htree, new_tree_tensors\n\n def attention(self, src_vecs, batch_idx, queries, W_att):\n size = batch_idx.size()\n if batch_idx.dim() > 1:\n batch_idx = batch_idx.view(-1)\n queries = queries.view(-1, queries.size(-1))\n\n src_vecs = src_vecs.index_select(0, batch_idx)\n att_score = torch.bmm( src_vecs, W_att(queries).unsqueeze(-1) )\n att_vecs = F.softmax(att_score, dim=1) * src_vecs\n att_vecs = att_vecs.sum(dim=1)\n return att_vecs if len(size) == 1 else att_vecs.view(size[0], size[1], -1)\n\n def get_topo_score(self, src_tree_vecs, batch_idx, topo_vecs):\n if self.use_attention:\n topo_cxt = self.attention(src_tree_vecs, batch_idx, topo_vecs, self.A_topo)\n else:\n topo_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)\n return self.topoNN( torch.cat([topo_vecs, topo_cxt], dim=-1) ).squeeze(-1)\n\n def get_cls_score(self, src_tree_vecs, batch_idx, cls_vecs, cls_labs):\n if self.use_attention:\n cls_cxt = self.attention(src_tree_vecs, batch_idx, cls_vecs, self.A_cls)\n else:\n cls_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)\n\n cls_vecs = torch.cat([cls_vecs, cls_cxt], dim=-1)\n cls_scores = self.clsNN(cls_vecs)\n\n if cls_labs is None: #inference mode\n icls_scores = self.iclsNN(cls_vecs) #no masking\n else:\n vocab_masks = self.vocab.get_mask(cls_labs)\n icls_scores = self.iclsNN(cls_vecs) + vocab_masks #apply mask by log(x + mask): mask=0 or -INF\n return cls_scores, icls_scores\n\n def get_assm_score(self, src_graph_vecs, batch_idx, assm_vecs):\n if self.use_attention:\n assm_cxt = self.attention(src_graph_vecs, batch_idx, assm_vecs, self.A_assm)\n else:\n assm_cxt = index_select_ND(src_graph_vecs, 0, batch_idx)\n return (self.W_assm(assm_vecs) * assm_cxt).sum(dim=-1)\n\n def forward(self, src_mol_vecs, graphs, tensors, orders):\n batch_size = len(orders)\n tree_batch, graph_batch = graphs\n tree_tensors, graph_tensors = tensors\n inter_tensors = tree_tensors\n\n src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs\n init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)\n\n htree, tree_tensors = self.init_decoder_state(tree_batch, tree_tensors, init_vecs)\n hinter = HTuple(\n mess = self.rnn_cell.get_init_state(inter_tensors[1]),\n emask = self.itensor.new_zeros(inter_tensors[1].size(0))\n )\n hgraph = HTuple(\n mess = self.rnn_cell.get_init_state(graph_tensors[1]),\n vmask = self.itensor.new_zeros(graph_tensors[0].size(0)),\n emask = self.itensor.new_zeros(graph_tensors[1].size(0))\n )\n \n all_topo_preds, all_cls_preds, all_assm_preds = [], [], []\n new_atoms = []\n tree_scope = tree_tensors[-1]\n for i in range(batch_size):\n root = tree_batch.nodes[ tree_scope[i][0] ]\n clab, ilab = self.vocab[ root['label'] ]\n all_cls_preds.append( (init_vecs[i], i, clab, ilab) ) #cluster prediction\n new_atoms.extend(root['cluster'])\n\n subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)\n graph_tensors = self.hmpn.embed_graph(graph_tensors) + (graph_tensors[-1],) #preprocess graph tensors\n\n maxt = max([len(x) for x in orders])\n max_cls_size = max( [len(attr) * 2 for node,attr in tree_batch.nodes(data='cluster')] )\n\n for t in range(maxt):\n batch_list = [i for i in range(batch_size) if t < len(orders[i])]\n assert htree.emask[0].item() == 0 and hinter.emask[0].item() == 0 and hgraph.vmask[0].item() == 0 and hgraph.emask[0].item() == 0\n\n subtree = [], []\n for i in batch_list:\n xid, yid, tlab = orders[i][t]\n subtree[0].append(xid)\n if yid is not None:\n mess_idx = tree_batch[xid][yid]['mess_idx']\n subtree[1].append(mess_idx)\n\n subtree = htree.emask.new_tensor(subtree[0]), htree.emask.new_tensor(subtree[1]) \n htree.emask.scatter_(0, subtree[1], 1)\n hinter.emask.scatter_(0, subtree[1], 1)\n\n cur_tree_tensors = self.apply_tree_mask(tree_tensors, htree, hgraph)\n cur_inter_tensors = self.apply_tree_mask(inter_tensors, hinter, hgraph)\n cur_graph_tensors = self.apply_graph_mask(graph_tensors, hgraph)\n htree, hinter, hgraph = self.hmpn(cur_tree_tensors, cur_inter_tensors, cur_graph_tensors, htree, hinter, hgraph, subtree, subgraph)\n\n new_atoms = []\n for i in batch_list:\n xid, yid, tlab = orders[i][t]\n all_topo_preds.append( (htree.node[xid], i, tlab) ) #topology prediction\n if yid is not None:\n mess_idx = tree_batch[xid][yid]['mess_idx']\n new_atoms.extend( tree_batch.nodes[yid]['cluster'] ) #NOTE: regardless of tlab = 0 or 1\n\n if tlab == 0: continue\n\n cls = tree_batch.nodes[yid]['smiles']\n clab, ilab = self.vocab[ tree_batch.nodes[yid]['label'] ]\n mess_idx = tree_batch[xid][yid]['mess_idx']\n hmess = self.rnn_cell.get_hidden_state(htree.mess)\n all_cls_preds.append( (hmess[mess_idx], i, clab, ilab) ) #cluster prediction using message\n \n inter_label = tree_batch.nodes[yid]['inter_label']\n inter_label = [ (pos, self.vocab[(cls, icls)][1]) for pos,icls in inter_label ]\n inter_size = self.vocab.get_inter_size(ilab)\n\n if len(tree_batch.nodes[xid]['cluster']) > 2: #uncertainty occurs only when previous cluster is a ring\n nth_child = tree_batch[yid][xid]['label'] #must be yid -> xid (graph order labeling is different from tree)\n cands = tree_batch.nodes[yid]['assm_cands']\n icls = list(zip(*inter_label))[1]\n cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)\n\n if len(cand_vecs) < max_cls_size:\n pad_len = max_cls_size - len(cand_vecs)\n cand_vecs = F.pad(cand_vecs, (0,0,0,pad_len))\n\n batch_idx = hgraph.emask.new_tensor( [i] * max_cls_size )\n all_assm_preds.append( (cand_vecs, batch_idx, 0) ) #the label is always the first of assm_cands\n\n subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)\n\n topo_vecs, batch_idx, topo_labels = zip_tensors(all_topo_preds)\n topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, topo_vecs)\n topo_loss = self.topo_loss(topo_scores, topo_labels.float())\n topo_acc = get_accuracy_bin(topo_scores, topo_labels)\n\n cls_vecs, batch_idx, cls_labs, icls_labs = zip_tensors(all_cls_preds)\n cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, cls_vecs, cls_labs)\n cls_loss = self.cls_loss(cls_scores, cls_labs) + self.icls_loss(icls_scores, icls_labs)\n cls_acc = get_accuracy(cls_scores, cls_labs)\n icls_acc = get_accuracy(icls_scores, icls_labs)\n\n if len(all_assm_preds) > 0:\n assm_vecs, batch_idx, assm_labels = zip_tensors(all_assm_preds)\n assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, assm_vecs)\n assm_loss = self.assm_loss(assm_scores, assm_labels)\n assm_acc = get_accuracy_sym(assm_scores, assm_labels)\n else:\n assm_loss, assm_acc = 0, 1\n \n loss = (topo_loss + cls_loss + assm_loss) / batch_size\n return loss, cls_acc, icls_acc, topo_acc, assm_acc\n\n def enum_attach(self, hgraph, cands, icls, nth_child):\n cands = self.itensor.new_tensor(cands)\n icls_vecs = self.itensor.new_tensor(icls * len(cands))\n icls_vecs = self.E_assm( icls_vecs )\n\n nth_child = self.itensor.new_tensor([nth_child] * len(cands.view(-1)))\n order_vecs = self.E_order.index_select(0, nth_child)\n\n cand_vecs = hgraph.node.index_select(0, cands.view(-1))\n cand_vecs = torch.cat( [cand_vecs, icls_vecs, order_vecs], dim=-1 )\n cand_vecs = self.matchNN(cand_vecs)\n\n if len(icls) == 2:\n cand_vecs = cand_vecs.view(-1, 2, self.hidden_size).sum(dim=1)\n return cand_vecs\n\n def decode(self, src_mol_vecs, greedy=True, max_decode_step=100, beam=5):\n src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs\n batch_size = len(src_root_vecs)\n\n tree_batch = IncTree(batch_size, node_fdim=2, edge_fdim=3)\n graph_batch = IncGraph(self.avocab, batch_size, node_fdim=self.hmpn.atom_size, edge_fdim=self.hmpn.atom_size + self.hmpn.bond_size)\n stack = [[] for i in range(batch_size)]\n\n init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)\n batch_idx = self.itensor.new_tensor(range(batch_size))\n cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, init_vecs, None)\n root_cls = cls_scores.max(dim=-1)[1]\n icls_scores = icls_scores + self.vocab.get_mask(root_cls)\n root_cls, root_icls = root_cls.tolist(), icls_scores.max(dim=-1)[1].tolist()\n\n super_root = tree_batch.add_node() \n for bid in range(batch_size):\n clab, ilab = root_cls[bid], root_icls[bid]\n root_idx = tree_batch.add_node( batch_idx.new_tensor([clab, ilab]) )\n tree_batch.add_edge(super_root, root_idx) \n stack[bid].append(root_idx)\n\n root_smiles = self.vocab.get_ismiles(ilab)\n new_atoms, new_bonds, attached = graph_batch.add_mol(bid, root_smiles, [], 0)\n tree_batch.register_cgraph(root_idx, new_atoms, new_bonds, attached)\n \n #invariance: tree_tensors is equal to inter_tensors (but inter_tensor's init_vec is 0)\n tree_tensors = tree_batch.get_tensors()\n graph_tensors = graph_batch.get_tensors()\n\n htree = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )\n hinter = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )\n hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]) )\n h = self.rnn_cell.get_hidden_state(htree.mess)\n h[1 : batch_size + 1] = init_vecs #wiring root (only for tree, not inter)\n \n for t in range(max_decode_step):\n batch_list = [ bid for bid in range(batch_size) if len(stack[bid]) > 0 ]\n if len(batch_list) == 0: break\n\n batch_idx = batch_idx.new_tensor(batch_list)\n cur_tree_nodes = [stack[bid][-1] for bid in batch_list]\n subtree = batch_idx.new_tensor(cur_tree_nodes), batch_idx.new_tensor([])\n subgraph = batch_idx.new_tensor( tree_batch.get_cluster_nodes(cur_tree_nodes) ), batch_idx.new_tensor( tree_batch.get_cluster_edges(cur_tree_nodes) )\n\n htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)\n topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, htree.node.index_select(0, subtree[0]))\n topo_scores = torch.sigmoid(topo_scores)\n if greedy:\n topo_preds = topo_scores.tolist()\n else:\n topo_preds = torch.bernoulli(topo_scores).tolist()\n\n new_mess = []\n expand_list = []\n for i,bid in enumerate(batch_list):\n if topo_preds[i] > 0.5 and tree_batch.can_expand(stack[bid][-1]):\n expand_list.append( (len(new_mess), bid) )\n new_node = tree_batch.add_node() #new node label is yet to be predicted\n edge_feature = batch_idx.new_tensor( [stack[bid][-1], new_node, 0] ) #parent to child is 0\n new_edge = tree_batch.add_edge(stack[bid][-1], new_node, edge_feature) \n stack[bid].append(new_node)\n new_mess.append(new_edge)\n else:\n child = stack[bid].pop()\n if len(stack[bid]) > 0:\n nth_child = tree_batch.graph.in_degree(stack[bid][-1]) #edge child -> father has not established\n edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )\n new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)\n new_mess.append(new_edge)\n\n subtree = subtree[0], batch_idx.new_tensor(new_mess)\n subgraph = [], []\n htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)\n cur_mess = self.rnn_cell.get_hidden_state(htree.mess).index_select(0, subtree[1])\n\n if len(expand_list) > 0:\n idx_in_mess, expand_list = zip(*expand_list)\n idx_in_mess = batch_idx.new_tensor( idx_in_mess )\n expand_idx = batch_idx.new_tensor( expand_list )\n forward_mess = cur_mess.index_select(0, idx_in_mess)\n cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, expand_idx, forward_mess, None)\n scores, cls_topk, icls_topk = hier_topk(cls_scores, icls_scores, self.vocab, beam)\n if not greedy:\n scores = torch.exp(scores) #score is output of log_softmax\n shuf_idx = torch.multinomial(scores, beam, replacement=True).tolist()\n\n for i,bid in enumerate(expand_list):\n new_node, fa_node = stack[bid][-1], stack[bid][-2]\n success = False\n cls_beam = range(beam) if greedy else shuf_idx[i]\n for kk in cls_beam: #try until one is chemically valid\n if success: break\n clab, ilab = cls_topk[i][kk], icls_topk[i][kk]\n node_feature = batch_idx.new_tensor( [clab, ilab] )\n tree_batch.set_node_feature(new_node, node_feature)\n smiles, ismiles = self.vocab.get_smiles(clab), self.vocab.get_ismiles(ilab)\n fa_cluster, _, fa_used = tree_batch.get_cluster(fa_node)\n inter_cands, anchor_smiles, attach_points = graph_batch.get_assm_cands(fa_cluster, fa_used, ismiles)\n\n if len(inter_cands) == 0:\n continue\n elif len(inter_cands) == 1:\n sorted_cands = [(inter_cands[0], 0)]\n nth_child = 0\n else:\n nth_child = tree_batch.graph.in_degree(fa_node)\n icls = [self.vocab[ (smiles,x) ][1] for x in anchor_smiles]\n cands = inter_cands if len(attach_points) <= 2 else [ (x[0],x[-1]) for x in inter_cands ]\n cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)\n\n batch_idx = batch_idx.new_tensor( [bid] * len(inter_cands) )\n assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, cand_vecs).tolist()\n sorted_cands = sorted( list(zip(inter_cands, assm_scores)), key = lambda x:x[1], reverse=True )\n\n for inter_label,_ in sorted_cands:\n inter_label = list(zip(inter_label, attach_points))\n if graph_batch.try_add_mol(bid, ismiles, inter_label):\n new_atoms, new_bonds, attached = graph_batch.add_mol(bid, ismiles, inter_label, nth_child)\n tree_batch.register_cgraph(new_node, new_atoms, new_bonds, attached)\n tree_batch.update_attached(fa_node, inter_label)\n success = True\n break\n\n if not success: #force backtrack\n child = stack[bid].pop() #pop the dummy new_node which can't be added\n nth_child = tree_batch.graph.in_degree(stack[bid][-1]) \n edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )\n new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)\n\n child = stack[bid].pop() \n if len(stack[bid]) > 0:\n nth_child = tree_batch.graph.in_degree(stack[bid][-1]) \n edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )\n new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)\n\n return graph_batch.get_mol()\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.sigmoid",
"torch.LongTensor",
"torch.cat",
"torch.eye",
"torch.multinomial",
"torch.exp",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"torch.bernoulli",
"torch.nn.ReLU",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johncliu/Horizon | [
"cfa7a873ada5de3bb01e78e2f237d9849b8270b2",
"cfa7a873ada5de3bb01e78e2f237d9849b8270b2"
] | [
"ml/rl/test/test_normalization.py",
"ml/rl/training/sac_trainer.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport unittest\n\nimport numpy as np\nimport numpy.testing as npt\nimport six\nfrom caffe2.python import core, workspace\nfrom ml.rl.caffe_utils import C2\nfrom ml.rl.preprocessing import identify_types, normalization\nfrom ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM\nfrom ml.rl.preprocessing.normalization import (\n NormalizationParameters,\n sort_features_by_normalization,\n)\nfrom ml.rl.preprocessing.preprocessor_net import PreprocessorNet\nfrom ml.rl.test.preprocessing_util import (\n BOXCOX_FEATURE_ID,\n ENUM_FEATURE_ID,\n PROBABILITY_FEATURE_ID,\n id_to_type,\n read_data,\n)\nfrom ml.rl.test.utils import NumpyFeatureProcessor\nfrom scipy import special\n\n\nclass TestNormalization(unittest.TestCase):\n def _feature_type_override(self, feature_id):\n \"\"\"\n This should only be used to test CONTINUOUS_ACTION\n \"\"\"\n if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:\n return identify_types.CONTINUOUS_ACTION\n return None\n\n def test_prepare_normalization_and_normalize(self):\n feature_value_map = read_data()\n\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n values, 10, feature_type=self._feature_type_override(name)\n )\n for k, v in normalization_parameters.items():\n if id_to_type(k) == CONTINUOUS:\n self.assertEqual(v.feature_type, CONTINUOUS)\n self.assertIs(v.boxcox_lambda, None)\n self.assertIs(v.boxcox_shift, None)\n elif id_to_type(k) == BOXCOX:\n self.assertEqual(v.feature_type, BOXCOX)\n self.assertIsNot(v.boxcox_lambda, None)\n self.assertIsNot(v.boxcox_shift, None)\n else:\n assert v.feature_type == id_to_type(k)\n sorted_features, _ = sort_features_by_normalization(normalization_parameters)\n\n norm_net = core.Net(\"net\")\n C2.set_net(norm_net)\n preprocessor = PreprocessorNet()\n input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32)\n for i, feature in enumerate(sorted_features):\n input_matrix[:, i] = feature_value_map[feature]\n input_matrix_blob = \"input_matrix_blob\"\n workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32))\n output_blob, _ = preprocessor.normalize_dense_matrix(\n input_matrix_blob, sorted_features, normalization_parameters, \"\", False\n )\n workspace.FeedBlob(input_matrix_blob, input_matrix)\n workspace.RunNetOnce(norm_net)\n normalized_feature_matrix = workspace.FetchBlob(output_blob)\n\n normalized_features = {}\n on_column = 0\n for feature in sorted_features:\n norm = normalization_parameters[feature]\n if norm.feature_type == ENUM:\n column_size = len(norm.possible_values)\n else:\n column_size = 1\n normalized_features[feature] = normalized_feature_matrix[\n :, on_column : (on_column + column_size)\n ]\n on_column += column_size\n\n self.assertTrue(\n all(\n [\n np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)\n for parameter in normalization_parameters.values()\n ]\n )\n )\n for k, v in six.iteritems(normalized_features):\n self.assertTrue(np.all(np.isfinite(v)))\n feature_type = normalization_parameters[k].feature_type\n if feature_type == identify_types.PROBABILITY:\n sigmoidv = special.expit(v)\n self.assertTrue(\n np.all(\n np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))\n )\n )\n elif feature_type == identify_types.ENUM:\n possible_values = normalization_parameters[k].possible_values\n self.assertEqual(v.shape[0], len(feature_value_map[k]))\n self.assertEqual(v.shape[1], len(possible_values))\n\n possible_value_map = {}\n for i, possible_value in enumerate(possible_values):\n possible_value_map[possible_value] = i\n\n for i, row in enumerate(v):\n original_feature = feature_value_map[k][i]\n self.assertEqual(\n possible_value_map[original_feature], np.where(row == 1)[0][0]\n )\n elif feature_type == identify_types.QUANTILE:\n for i, feature in enumerate(v[0]):\n original_feature = feature_value_map[k][i]\n expected = NumpyFeatureProcessor.value_to_quantile(\n original_feature, normalization_parameters[k].quantiles\n )\n self.assertAlmostEqual(feature, expected, 2)\n elif feature_type == identify_types.BINARY:\n pass\n elif (\n feature_type == identify_types.CONTINUOUS\n or feature_type == identify_types.BOXCOX\n ):\n one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)\n zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)\n zero_mean = np.isclose(np.mean(v), 0, atol=0.01)\n self.assertTrue(\n np.all(zero_mean),\n \"mean of feature {} is {}, not 0\".format(k, np.mean(v)),\n )\n self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))\n elif feature_type == identify_types.CONTINUOUS_ACTION:\n less_than_max = v < 1\n more_than_min = v > -1\n self.assertTrue(\n np.all(less_than_max),\n \"values are not less than 1: {}\".format(v[less_than_max == False]),\n )\n self.assertTrue(\n np.all(more_than_min),\n \"values are not more than -1: {}\".format(v[more_than_min == False]),\n )\n else:\n raise NotImplementedError()\n\n def test_normalize_dense_matrix_enum(self):\n normalization_parameters = {\n 1: NormalizationParameters(\n identify_types.ENUM,\n None,\n None,\n None,\n None,\n [12, 4, 2],\n None,\n None,\n None,\n ),\n 2: NormalizationParameters(\n identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None\n ),\n 3: NormalizationParameters(\n identify_types.ENUM, None, None, None, None, [15, 3], None, None, None\n ),\n }\n norm_net = core.Net(\"net\")\n C2.set_net(norm_net)\n preprocessor = PreprocessorNet()\n\n inputs = np.zeros([4, 3], dtype=np.float32)\n feature_ids = [2, 1, 3] # Sorted according to feature type\n inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]\n inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]\n inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]\n input_blob = C2.NextBlob(\"input_blob\")\n workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32))\n normalized_output_blob, _ = preprocessor.normalize_dense_matrix(\n input_blob, feature_ids, normalization_parameters, \"\", False\n )\n workspace.FeedBlob(input_blob, inputs)\n workspace.RunNetOnce(norm_net)\n normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob)\n\n np.testing.assert_allclose(\n np.array(\n [\n [1.0, 1, 0, 0, 1, 0],\n [2.0, 0, 1, 0, 0, 1],\n [3.0, 0, 0, 1, 1, 0],\n [3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0\n ]\n ),\n normalized_feature_matrix,\n )\n\n def test_persistency(self):\n feature_value_map = read_data()\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n values, feature_type=self._feature_type_override(name)\n )\n\n s = normalization.serialize(normalization_parameters)\n read_parameters = normalization.deserialize(s)\n # Unfortunately, Thrift serializatin seems to lose a bit of precision.\n # Using `==` will be false.\n self.assertEqual(read_parameters.keys(), normalization_parameters.keys())\n for k in normalization_parameters:\n self.assertEqual(\n read_parameters[k].feature_type,\n normalization_parameters[k].feature_type,\n )\n self.assertEqual(\n read_parameters[k].possible_values,\n normalization_parameters[k].possible_values,\n )\n for field in [\n \"boxcox_lambda\",\n \"boxcox_shift\",\n \"mean\",\n \"stddev\",\n \"quantiles\",\n \"min_value\",\n \"max_value\",\n ]:\n if getattr(normalization_parameters[k], field) is None:\n self.assertEqual(\n getattr(read_parameters[k], field),\n getattr(normalization_parameters[k], field),\n )\n else:\n npt.assert_allclose(\n getattr(read_parameters[k], field),\n getattr(normalization_parameters[k], field),\n )\n\n def test_preprocessing_network(self):\n feature_value_map = read_data()\n\n normalization_parameters = {}\n for name, values in feature_value_map.items():\n normalization_parameters[name] = normalization.identify_parameter(\n values, feature_type=self._feature_type_override(name)\n )\n test_features = NumpyFeatureProcessor.preprocess(\n feature_value_map, normalization_parameters\n )\n\n net = core.Net(\"PreprocessingTestNet\")\n C2.set_net(net)\n preprocessor = PreprocessorNet()\n name_preprocessed_blob_map = {}\n for feature_name in feature_value_map:\n workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32))\n preprocessed_blob, _ = preprocessor.preprocess_blob(\n str(feature_name), [normalization_parameters[feature_name]]\n )\n name_preprocessed_blob_map[feature_name] = preprocessed_blob\n\n workspace.CreateNet(net)\n\n for feature_name, feature_value in six.iteritems(feature_value_map):\n feature_value = np.expand_dims(feature_value, -1)\n workspace.FeedBlob(str(feature_name), feature_value)\n workspace.RunNetOnce(net)\n\n for feature_name in feature_value_map:\n normalized_features = workspace.FetchBlob(\n name_preprocessed_blob_map[feature_name]\n )\n if feature_name != ENUM_FEATURE_ID:\n normalized_features = np.squeeze(normalized_features, -1)\n\n tolerance = 0.01\n if feature_name == BOXCOX_FEATURE_ID:\n # At the limit, boxcox has some numerical instability\n tolerance = 0.5\n non_matching = np.where(\n np.logical_not(\n np.isclose(\n normalized_features,\n test_features[feature_name],\n rtol=tolerance,\n atol=tolerance,\n )\n )\n )\n self.assertTrue(\n np.all(\n np.isclose(\n normalized_features,\n test_features[feature_name],\n rtol=tolerance,\n atol=tolerance,\n )\n ),\n \"{} does not match: {} {}\".format(\n feature_name,\n normalized_features[non_matching].tolist(),\n test_features[feature_name][non_matching].tolist(),\n ),\n )\n\n def test_type_override(self):\n # Take a feature that should be identified as probability\n feature_value_map = read_data()\n probability_values = feature_value_map[PROBABILITY_FEATURE_ID]\n\n # And ask for a binary anyways\n parameter = normalization.identify_parameter(\n probability_values, feature_type=identify_types.BINARY\n )\n self.assertEqual(parameter.feature_type, \"BINARY\")\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nfrom typing import Optional\n\nimport ml.rl.types as rlt\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom ml.rl.tensorboardX import SummaryWriterContext\nfrom ml.rl.thrift.core.ttypes import SACModelParameters\nfrom ml.rl.training._parametric_dqn_predictor import _ParametricDQNPredictor\nfrom ml.rl.training.actor_predictor import ActorPredictor\nfrom ml.rl.training.rl_exporter import ActorExporter, ParametricDQNExporter\nfrom ml.rl.training.rl_trainer_pytorch import RLTrainer, rescale_torch_tensor\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SACTrainer(RLTrainer):\n \"\"\"\n Soft Actor-Critic trainer as described in https://arxiv.org/pdf/1801.01290\n\n The actor is assumed to implement reparameterization trick.\n \"\"\"\n\n def __init__(\n self,\n q1_network,\n value_network,\n value_network_target,\n actor_network,\n parameters: SACModelParameters,\n q2_network=None,\n min_action_range_tensor_training=None,\n max_action_range_tensor_training=None,\n min_action_range_tensor_serving=None,\n max_action_range_tensor_serving=None,\n ) -> None:\n \"\"\"\n Args:\n The four args below are provided for integration with other\n environments (e.g., Gym):\n min_action_range_tensor_training / max_action_range_tensor_training:\n min / max value of actions at training time\n min_action_range_tensor_serving / max_action_range_tensor_serving:\n min / max value of actions at serving time\n \"\"\"\n self.minibatch_size = parameters.training.minibatch_size\n super(SACTrainer, self).__init__(\n parameters,\n use_gpu=False,\n additional_feature_types=None,\n gradient_handler=None,\n )\n\n self.q1_network = q1_network\n self.q1_network_optimizer = self._get_optimizer(\n q1_network, parameters.training.q_network_optimizer\n )\n\n self.q2_network = q2_network\n if self.q2_network is not None:\n self.q2_network_optimizer = self._get_optimizer(\n q2_network, parameters.training.q_network_optimizer\n )\n\n self.value_network = value_network\n self.value_network_optimizer = self._get_optimizer(\n value_network, parameters.training.value_network_optimizer\n )\n self.value_network_target = value_network_target\n\n self.actor_network = actor_network\n self.actor_network_optimizer = self._get_optimizer(\n actor_network, parameters.training.actor_network_optimizer\n )\n\n self.entropy_temperature = parameters.training.entropy_temperature\n self.logged_action_uniform_prior = (\n parameters.training.logged_action_uniform_prior\n )\n\n # These ranges are only for Gym tests\n self.min_action_range_tensor_training = min_action_range_tensor_training\n self.max_action_range_tensor_training = max_action_range_tensor_training\n self.min_action_range_tensor_serving = min_action_range_tensor_serving\n self.max_action_range_tensor_serving = max_action_range_tensor_serving\n\n def warm_start_components(self):\n components = [\n \"q1_network\",\n \"q1_network_optimizer\",\n \"value_network\",\n \"value_network_optimizer\",\n \"value_network_target\",\n \"actor_network\",\n \"actor_network_optimizer\",\n ]\n if self.q2_network:\n components += [\"q2_network\", \"q2_network_optimizer\"]\n return components\n\n def train(self, training_batch) -> None:\n \"\"\"\n IMPORTANT: the input action here is assumed to be preprocessed to match the\n range of the output of the actor.\n \"\"\"\n if hasattr(training_batch, \"as_parametric_sarsa_training_batch\"):\n training_batch = training_batch.as_parametric_sarsa_training_batch()\n\n learning_input = training_batch.training_input\n self.minibatch += 1\n\n state = learning_input.state\n action = learning_input.action\n reward = learning_input.reward\n discount = torch.full_like(reward, self.gamma)\n not_done_mask = learning_input.not_terminal\n\n if self._should_scale_action_in_train():\n action = rlt.FeatureVector(\n rescale_torch_tensor(\n action.float_features,\n new_min=self.min_action_range_tensor_training,\n new_max=self.max_action_range_tensor_training,\n prev_min=self.min_action_range_tensor_serving,\n prev_max=self.max_action_range_tensor_serving,\n )\n )\n\n current_state_action = rlt.StateAction(state=state, action=action)\n\n q1_value = self.q1_network(current_state_action).q_value\n min_q_value = q1_value\n\n if self.q2_network:\n q2_value = self.q2_network(current_state_action).q_value\n min_q_value = torch.min(q1_value, q2_value)\n\n # Use the minimum as target, ensure no gradient going through\n min_q_value = min_q_value.detach()\n\n #\n # First, optimize value network; minimizing MSE between\n # V(s) & Q(s, a) - log(pi(a|s))\n #\n\n state_value = self.value_network(state.float_features) # .q_value\n\n if self.logged_action_uniform_prior:\n log_prob_a = torch.zeros_like(min_q_value)\n target_value = min_q_value\n else:\n with torch.no_grad():\n log_prob_a = self.actor_network.get_log_prob(\n state, action.float_features\n )\n log_prob_a = log_prob_a.clamp(-20.0, 20.0)\n target_value = min_q_value - self.entropy_temperature * log_prob_a\n\n value_loss = F.mse_loss(state_value, target_value)\n self.value_network_optimizer.zero_grad()\n value_loss.backward()\n self.value_network_optimizer.step()\n\n #\n # Second, optimize Q networks; minimizing MSE between\n # Q(s, a) & r + discount * V'(next_s)\n #\n\n with torch.no_grad():\n next_state_value = (\n self.value_network_target(learning_input.next_state.float_features)\n * not_done_mask.float()\n )\n\n if self.minibatch < self.reward_burnin:\n target_q_value = reward\n else:\n target_q_value = reward + discount * next_state_value\n\n q1_loss = F.mse_loss(q1_value, target_q_value)\n self.q1_network_optimizer.zero_grad()\n q1_loss.backward()\n self.q1_network_optimizer.step()\n if self.q2_network:\n q2_loss = F.mse_loss(q2_value, target_q_value)\n self.q2_network_optimizer.zero_grad()\n q2_loss.backward()\n self.q2_network_optimizer.step()\n\n #\n # Lastly, optimize the actor; minimizing KL-divergence between action propensity\n # & softmax of value. Due to reparameterization trick, it ends up being\n # log_prob(actor_action) - Q(s, actor_action)\n #\n\n actor_output = self.actor_network(rlt.StateInput(state=state))\n\n state_actor_action = rlt.StateAction(\n state=state, action=rlt.FeatureVector(float_features=actor_output.action)\n )\n q1_actor_value = self.q1_network(state_actor_action).q_value\n min_q_actor_value = q1_actor_value\n if self.q2_network:\n q2_actor_value = self.q2_network(state_actor_action).q_value\n min_q_actor_value = torch.min(q1_actor_value, q2_actor_value)\n\n actor_loss = (\n self.entropy_temperature * actor_output.log_prob - min_q_actor_value\n )\n # Do this in 2 steps so we can log histogram of actor loss\n actor_loss_mean = actor_loss.mean()\n self.actor_network_optimizer.zero_grad()\n actor_loss_mean.backward()\n self.actor_network_optimizer.step()\n\n if self.minibatch < self.reward_burnin:\n # Reward burnin: force target network\n self._soft_update(self.value_network, self.value_network_target, 1.0)\n else:\n # Use the soft update rule to update both target networks\n self._soft_update(self.value_network, self.value_network_target, self.tau)\n\n # Logging at the end to schedule all the cuda operations first\n if (\n self.tensorboard_logging_freq is not None\n and self.minibatch % self.tensorboard_logging_freq == 0\n ):\n SummaryWriterContext.add_histogram(\"q1/logged_state_value\", q1_value)\n if self.q2_network:\n SummaryWriterContext.add_histogram(\"q2/logged_state_value\", q2_value)\n\n SummaryWriterContext.add_histogram(\"log_prob_a\", log_prob_a)\n SummaryWriterContext.add_histogram(\"value_network/target\", target_value)\n SummaryWriterContext.add_histogram(\n \"q_network/next_state_value\", next_state_value\n )\n SummaryWriterContext.add_histogram(\n \"q_network/target_q_value\", target_q_value\n )\n SummaryWriterContext.add_histogram(\n \"actor/min_q_actor_value\", min_q_actor_value\n )\n SummaryWriterContext.add_histogram(\n \"actor/action_log_prob\", actor_output.log_prob\n )\n SummaryWriterContext.add_histogram(\"actor/loss\", actor_loss)\n\n self.loss_reporter.report(\n td_loss=q1_loss,\n reward_loss=None,\n logged_rewards=reward,\n model_values_on_logged_actions=q1_value,\n model_propensities=actor_output.log_prob.exp(),\n model_values=min_q_actor_value,\n )\n\n def _should_scale_action_in_train(self):\n if (\n self.min_action_range_tensor_training is not None\n and self.max_action_range_tensor_training is not None\n and self.min_action_range_tensor_serving is not None\n and self.max_action_range_tensor_serving is not None\n ):\n return True\n return False\n\n def internal_prediction(self, states):\n \"\"\" Returns list of actions output from actor network\n :param states states as list of states to produce actions for\n \"\"\"\n self.actor_network.eval()\n state_examples = torch.from_numpy(np.array(states)).type(self.dtype)\n actions = self.actor_network(\n rlt.StateInput(rlt.FeatureVector(float_features=state_examples))\n )\n # clamp actions to make sure actions are in the range\n clamped_actions = torch.max(\n torch.min(actions.action, self.max_action_range_tensor_training),\n self.min_action_range_tensor_training,\n )\n rescaled_actions = rescale_torch_tensor(\n clamped_actions,\n new_min=self.min_action_range_tensor_serving,\n new_max=self.max_action_range_tensor_serving,\n prev_min=self.min_action_range_tensor_training,\n prev_max=self.max_action_range_tensor_training,\n )\n\n self.actor_network.train()\n return rescaled_actions\n"
] | [
[
"numpy.expand_dims",
"numpy.greater",
"scipy.special.expit",
"numpy.isfinite",
"numpy.less",
"numpy.squeeze",
"numpy.all",
"numpy.logical_or",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.isclose"
],
[
"torch.min",
"torch.zeros_like",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"torch.full_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
boycehbz/DMMR | [
"18fcee7ce584fdccfa08bcda883d9b4fcb962c04",
"18fcee7ce584fdccfa08bcda883d9b4fcb962c04",
"18fcee7ce584fdccfa08bcda883d9b4fcb962c04"
] | [
"core/smplx/lbs_.py",
"core/utils/non_linear_solver.py",
"core/smplx/my_smpl_model.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is\n# holder of all proprietary rights on this computer program.\n# You can only use this computer program if you have closed\n# a license agreement with MPG or you get the right to use the computer\n# program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and\n# liable to prosecution.\n#\n# Copyright©2019 Max-Planck-Gesellschaft zur Förderung\n# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute\n# for Intelligent Systems. All rights reserved.\n#\n# Contact: [email protected]\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom .utils import rot_mat_to_euler\n\n\ndef find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks\n\n\n To do so, we first compute the rotation of the neck around the y-axis\n and then use a pre-computed look-up table to find the faces and the\n barycentric coordinates that will be used.\n\n Special thanks to Soubhik Sanyal ([email protected])\n for providing the original TensorFlow implementation and for the LUT.\n\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n pose: torch.tensor Bx(Jx3), dtype = torch.float32\n The current pose of the body model\n dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long\n The look-up table from neck rotation to faces\n dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32\n The look-up table from neck rotation to barycentric coordinates\n neck_kin_chain: list\n A python list that contains the indices of the joints that form the\n kinematic chain of the neck.\n dtype: torch.dtype, optional\n\n Returns\n -------\n dyn_lmk_faces_idx: torch.tensor, dtype = torch.long\n A tensor of size BxL that contains the indices of the faces that\n will be used to compute the current dynamic landmarks.\n dyn_lmk_b_coords: torch.tensor, dtype = torch.float32\n A tensor of size BxL that contains the indices of the faces that\n will be used to compute the current dynamic landmarks.\n '''\n\n batch_size = vertices.shape[0]\n\n aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,\n neck_kin_chain)\n rot_mats = batch_rodrigues(\n aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)\n\n rel_rot_mat = torch.eye(3, device=vertices.device,\n dtype=dtype).unsqueeze_(dim=0)\n for idx in range(len(neck_kin_chain)):\n rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)\n\n y_rot_angle = torch.round(\n torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,\n max=39)).to(dtype=torch.long)\n neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)\n mask = y_rot_angle.lt(-39).to(dtype=torch.long)\n neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)\n y_rot_angle = (neg_mask * neg_vals +\n (1 - neg_mask) * y_rot_angle)\n\n dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,\n 0, y_rot_angle)\n dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,\n 0, y_rot_angle)\n\n return dyn_lmk_faces_idx, dyn_lmk_b_coords\n\n\ndef vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the\n landmarks.\n lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32\n The tensor of barycentric coordinates that are used to interpolate\n the landmarks\n\n Returns\n -------\n landmarks: torch.tensor BxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n '''\n # Extract the indices of the vertices for each face\n # BxLx3\n batch_size, num_verts = vertices.shape[:2]\n device = vertices.device\n\n lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(\n batch_size, -1, 3)\n\n lmk_faces += torch.arange(\n batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts\n\n lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(\n batch_size, -1, 3, 3)\n\n landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])\n return landmarks\n\n\ndef lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3\n The template mesh that will be deformed\n shapedirs : torch.tensor 1xNB\n The tensor of PCA shape displacements\n posedirs : torch.tensor Px(V * 3)\n The pose PCA coefficients\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from\n the position of the vertices\n parents: torch.tensor J\n The array that describes the kinematic tree for the model\n lbs_weights: torch.tensor N x V x (J + 1)\n The linear blend skinning weights that represent how much the\n rotation matrix of each part affects each vertex\n pose2rot: bool, optional\n Flag on whether to convert the input pose tensor to rotation\n matrices. The default value is True. If False, then the pose tensor\n should already contain rotation matrices and have a size of\n Bx(J + 1)x9\n dtype: torch.dtype, optional\n\n Returns\n -------\n verts: torch.tensor BxVx3\n The vertices of the mesh after applying the shape and pose\n displacements.\n joints: torch.tensor BxJx3\n The joints of the model\n '''\n\n batch_size = max(betas.shape[0], pose.shape[0])\n device = betas.device\n\n # Add shape contribution\n v_shaped = v_template + blend_shapes(betas, shapedirs)\n # v_shaped *= scale\n # Get the joints\n # NxJx3 array\n J = vertices2joints(J_regressor, v_shaped)\n\n # 3. Add pose blend shapes\n # N x J x 3 x 3\n ident = torch.eye(3, dtype=dtype, device=device)\n if pose2rot:\n rot_mats = batch_rodrigues(\n pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])\n\n pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])\n # (N x P) x (P, V * 3) -> N x V x 3\n pose_offsets = torch.matmul(pose_feature, posedirs) \\\n .view(batch_size, -1, 3)\n else:\n pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident\n rot_mats = pose.view(batch_size, -1, 3, 3)\n\n pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),\n posedirs).view(batch_size, -1, 3)\n\n v_posed = pose_offsets + v_shaped\n # 4. Get the global joint location\n J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)\n\n # 5. Do skinning:\n # W is N x V x (J + 1)\n W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])\n # (N x V x (J + 1)) x (N x (J + 1) x 16)\n num_joints = J_regressor.shape[0]\n T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \\\n .view(batch_size, -1, 4, 4)\n\n homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],\n dtype=dtype, device=device)\n v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)\n v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))\n\n verts = v_homo[:, :, :3, 0]\n\n return verts, J_transformed\n\n\ndef vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n\n Returns\n -------\n torch.tensor BxJx3\n The location of the joints\n '''\n\n return torch.einsum('bik,ji->bjk', [vertices, J_regressor])\n\n\ndef blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n\n\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n\n Returns\n -------\n torch.tensor BxVx3\n The per-vertex displacement due to shape deformation\n '''\n\n # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]\n # i.e. Multiply each shape displacement by its corresponding beta and\n # then sum them.\n blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])\n return blend_shape\n\n\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3\n The rotation matrices for the given axis-angle parameters\n '''\n\n batch_size = rot_vecs.shape[0]\n device = rot_vecs.device\n\n angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)\n rot_dir = rot_vecs / angle\n\n cos = torch.unsqueeze(torch.cos(angle), dim=1)\n sin = torch.unsqueeze(torch.sin(angle), dim=1)\n\n # Bx1 arrays\n rx, ry, rz = torch.split(rot_dir, 1, dim=1)\n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n\n zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))\n\n ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)\n rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)\n return rot_mat\n\n\ndef transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),\n F.pad(t, [0, 0, 0, 1], value=1)], dim=2)\n\n\ndef batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN\n The kinematic tree of each object\n dtype : torch.dtype, optional:\n The data type of the created tensors, the default is torch.float32\n\n Returns\n -------\n posed_joints : torch.tensor BxNx3\n The locations of the joints after applying the pose rotations\n rel_transforms : torch.tensor BxNx4x4\n The relative (with respect to the root joint) rigid transformations\n for all the joints\n \"\"\"\n joints = torch.unsqueeze(joints, dim=-1)\n\n rel_joints = joints.clone()\n rel_joints[:, 1:] -= joints[:, parents[1:]]\n\n transforms_mat = transform_mat(\n rot_mats.reshape(-1, 3, 3),\n rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)\n\n # transforms_mat[:, 0][:,:3,:3] *= scale\n transform_chain = [transforms_mat[:, 0]]\n for i in range(1, parents.shape[0]):\n # Subtract the joint location at the rest pose\n # No need for rotation, since it's identity when at rest\n curr_res = torch.matmul(transform_chain[parents[i]],\n transforms_mat[:, i])\n transform_chain.append(curr_res)\n\n transforms = torch.stack(transform_chain, dim=1)\n\n # The last column of the transformations contains the posed joints\n posed_joints = transforms[:, :, :3, 3]\n\n # The last column of the transformations contains the posed joints\n posed_joints = transforms[:, :, :3, 3]\n\n joints_homogen = F.pad(joints, [0, 0, 0, 1])\n\n rel_transforms = transforms - F.pad(\n torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])\n\n return posed_joints, rel_transforms\n",
"'''\n @FileName : non_linear_solver.py\n @EditTime : 2021-12-13 15:44:28\n @Author : Buzhen Huang\n @Email : [email protected]\n @Description : \n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\n\nimport time\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport sys\nimport os\n\nimport numpy as np\nimport torch\n\nfrom tqdm import tqdm\n\nfrom collections import defaultdict\n\nimport cv2\nimport PIL.Image as pil_img\n\nfrom core.optimizers import optim_factory\n\nfrom core.utils import fitting\n\ndef non_linear_solver(\n setting,\n data,\n dataset_obj,\n batch_size=1,\n data_weights=None,\n body_pose_prior_weights=None,\n kinetic_weights=None,\n shape_weights=None,\n coll_loss_weights=None,\n use_joints_conf=False,\n rho=100,\n interpenetration=False,\n loss_type='smplify',\n visualize=False,\n use_vposer=True,\n use_motionprior=False,\n interactive=True,\n use_cuda=True,\n **kwargs):\n\n device = setting['device']\n dtype = setting['dtype']\n vposer = setting['vposer']\n keypoints = data['keypoints']\n flags = data['flags']\n joint_weights = setting['joints_weight']\n models = setting['model']\n cameras = setting['cameras']\n pose_embeddings = setting['pose_embedding']\n\n assert (len(data_weights) ==\n len(body_pose_prior_weights) and len(shape_weights) ==\n len(body_pose_prior_weights) and len(coll_loss_weights) ==\n len(body_pose_prior_weights)), \"Number of weight must match\"\n \n # Load 2D keypoints\n keypoints = torch.tensor(keypoints, dtype=dtype, device=device)\n flags = torch.tensor(flags, dtype=dtype, device=device)\n gt_joints = keypoints[...,:2]\n joints_conf = keypoints[...,2]\n\n # Weights used for the pose prior and the shape prior\n opt_weights_dict = {'data_weight': data_weights,\n 'body_pose_weight': body_pose_prior_weights,\n 'shape_weight': shape_weights,\n 'kinetic_weight': kinetic_weights}\n if interpenetration:\n opt_weights_dict['coll_loss_weight'] = coll_loss_weights\n\n # Get loss weights for each stage\n keys = opt_weights_dict.keys()\n opt_weights = [dict(zip(keys, vals)) for vals in\n zip(*(opt_weights_dict[k] for k in keys\n if opt_weights_dict[k] is not None))]\n for weight_list in opt_weights:\n for key in weight_list:\n weight_list[key] = torch.tensor(weight_list[key],\n device=device,\n dtype=dtype)\n\n # Create fitting loss\n loss = fitting.create_loss(loss_type=loss_type,\n joint_weights=joint_weights,\n rho=rho,\n use_joints_conf=use_joints_conf,\n vposer=vposer,\n pose_embedding=pose_embeddings,\n body_pose_prior=setting['body_pose_prior'],\n shape_prior=setting['shape_prior'],\n angle_prior=setting['angle_prior'],\n interpenetration=interpenetration,\n dtype=dtype,\n frame_length=dataset_obj.frames,\n **kwargs)\n loss = loss.to(device=device)\n\n monitor = fitting.FittingMonitor(\n batch_size=batch_size, visualize=visualize, **kwargs)\n\n # Step 1: Optimize the cameras and motions\n final_loss_val = 0\n opt_start = time.time()\n for opt_idx, curr_weights in enumerate(tqdm(opt_weights, desc='Stage')):\n # Load all parameters for optimization\n body_params = []\n for model, pose_embedding in zip(models, pose_embeddings):\n body_param = list(model.parameters())\n body_params += list(\n filter(lambda x: x.requires_grad, body_param))\n if vposer is not None and opt_idx in [1,2,3]:\n body_params.append(pose_embedding)\n final_params = list(\n filter(lambda x: x.requires_grad, body_params))\n if opt_idx in [0,2,3]:\n for cam in cameras:\n if cam.translation.requires_grad:\n final_params.append(cam.translation)\n if cam.rotation.requires_grad:\n final_params.append(cam.rotation)\n body_optimizer, body_create_graph = optim_factory.create_optimizer(\n final_params, **kwargs)\n body_optimizer.zero_grad()\n\n loss.reset_loss_weights(curr_weights)\n\n closure = monitor.create_fitting_closure(\n body_optimizer, models,\n camera=cameras, gt_joints=gt_joints,\n joints_conf=joints_conf,\n flags=flags,\n joint_weights=joint_weights,\n loss=loss, create_graph=body_create_graph,\n use_vposer=use_vposer, vposer=vposer,\n use_motionprior=use_motionprior,\n pose_embeddings=pose_embeddings,\n return_verts=True, return_full_pose=True)\n\n if interactive:\n if use_cuda and torch.cuda.is_available():\n torch.cuda.synchronize()\n stage_start = time.time()\n final_loss_val = monitor.run_fitting(\n body_optimizer,\n closure, final_params,\n models,\n pose_embeddings=pose_embeddings, vposer=vposer, cameras=cameras,\n use_vposer=use_vposer, use_motionprior=use_motionprior)\n\n if interactive:\n if use_cuda and torch.cuda.is_available():\n torch.cuda.synchronize()\n elapsed = time.time() - stage_start\n if interactive:\n tqdm.write('Stage {:03d} done after {:.4f} seconds'.format(\n opt_idx, elapsed))\n\n if interactive:\n if use_cuda and torch.cuda.is_available():\n torch.cuda.synchronize()\n elapsed = time.time() - opt_start\n tqdm.write(\n 'Body fitting done after {:.4f} seconds'.format(elapsed))\n tqdm.write('Body final loss val = {:.5f}'.format(\n final_loss_val))\n\n result = {}\n for idx, (model, pose_embedding) in enumerate(zip(models, pose_embeddings)):\n # Get the result of the fitting process\n model_result = {key: val.detach().cpu().numpy()\n for key, val in model.named_parameters()}\n model_result['loss'] = final_loss_val\n model_result['pose_embedding'] = pose_embedding\n result['person%02d' %idx] = model_result\n\n # Get the optimized cameras\n rots, trans, intris = [], [], []\n for cam in cameras:\n rots.append(cam.rotation.detach().cpu().numpy())\n trans.append(cam.translation.detach().cpu().numpy())\n intri = np.eye(3)\n intri[0][0] = cam.focal_length_x.detach().cpu().numpy()\n intri[1][1] = cam.focal_length_y.detach().cpu().numpy()\n intri[:2,2] = cam.center.detach().cpu().numpy()\n intris.append(intri)\n result['cam_rots'] = rots\n result['cam_trans'] = trans\n result['intris'] = intris \n return result\n",
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport os.path as osp\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\n\nfrom .lbs import (\n lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords, my_lbs)\n\nfrom .vertex_ids import vertex_ids as VERTEX_IDS\nfrom .utils import Struct, to_np, to_tensor\nfrom .vertex_joint_selector import VertexJointSelector\n\n\nModelOutput = namedtuple('ModelOutput',\n ['vertices', 'joints', 'full_pose', 'betas',\n 'global_orient',\n 'body_pose', 'expression',\n 'left_hand_pose', 'right_hand_pose',\n 'jaw_pose'])\nModelOutput.__new__.__defaults__ = (None,) * len(ModelOutput._fields)\n\n\ndef create_scale(model_path, model_type='smpl',\n **kwargs):\n\n # If it's a folder, assume\n if osp.isdir(model_path):\n model_path = os.path.join(model_path, 'smpl')\n if model_type.lower() == 'smpl' or model_type.lower() == 'smpllsp' or model_type.lower() == 'smplhalpe':\n return SMPL(model_path, model_type=model_type, **kwargs)\n else:\n raise ValueError('Unknown model type {}, exiting!'.format(model_type))\n\n\nclass SMPL(nn.Module):\n\n NUM_JOINTS = 23\n NUM_BODY_JOINTS = 23\n NUM_BETAS = 10\n\n def __init__(self, model_path, \n data_struct=None,\n create_betas=True,\n betas=None,\n create_global_orient=True,\n global_orient=None,\n create_body_pose=True,\n body_pose=None,\n create_transl=True,\n transl=None,\n create_scale=True,\n scale=None,\n dtype=torch.float32,\n batch_size=1,\n joint_mapper=None,\n model_type='smpl',\n gender='neutral',\n vertex_ids=None,\n **kwargs):\n ''' SMPL model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n '''\n\n self.model_type = model_type\n self.gender = gender\n\n if data_struct is None:\n if osp.isdir(model_path):\n model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')\n smpl_path = os.path.join(model_path, model_fn)\n else:\n smpl_path = model_path\n assert osp.exists(smpl_path), 'Path {} does not exist!'.format(\n smpl_path)\n\n with open(smpl_path, 'rb') as smpl_file:\n data_struct = Struct(**pickle.load(smpl_file,\n encoding='latin1'))\n\n super(SMPL, self).__init__()\n self.batch_size = batch_size\n\n if vertex_ids is None:\n # SMPL and SMPL-H share the same topology, so any extra joints can\n # be drawn from the same place\n vertex_ids = VERTEX_IDS['smplh']\n\n self.dtype = dtype\n\n self.joint_mapper = joint_mapper\n\n self.vertex_joint_selector = VertexJointSelector(\n vertex_ids=vertex_ids, **kwargs)\n\n self.faces = data_struct.f\n self.register_buffer('faces_tensor',\n to_tensor(to_np(self.faces, dtype=np.int64),\n dtype=torch.long))\n\n if create_betas:\n if betas is None: # the sequence captures same person\n default_betas = torch.zeros([1, self.NUM_BETAS],\n dtype=dtype)\n else:\n if 'torch.Tensor' in str(type(betas)):\n default_betas = betas.clone().detach()\n else:\n default_betas = torch.tensor(betas,\n dtype=dtype)\n\n self.register_parameter('betas', nn.Parameter(default_betas,\n requires_grad=True))\n\n # The tensor that contains the global rotation of the model\n # It is separated from the pose of the joints in case we wish to\n # optimize only over one of them\n if create_global_orient:\n if global_orient is None:\n default_global_orient = torch.zeros([batch_size, 3],\n dtype=dtype)\n else:\n if 'torch.Tensor' in str(type(global_orient)):\n default_global_orient = global_orient.clone().detach()\n else:\n default_global_orient = torch.tensor(global_orient,\n dtype=dtype)\n\n global_orient = nn.Parameter(default_global_orient,\n requires_grad=True)\n self.register_parameter('global_orient', global_orient)\n\n if create_body_pose:\n if body_pose is None:\n default_body_pose = torch.zeros(\n [batch_size, self.NUM_BODY_JOINTS * 3], dtype=dtype)\n else:\n if 'torch.Tensor' in str(type(body_pose)):\n default_body_pose = body_pose.clone().detach()\n else:\n default_body_pose = torch.tensor(body_pose,\n dtype=dtype)\n self.register_parameter(\n 'body_pose',\n nn.Parameter(default_body_pose, requires_grad=True))\n\n if create_transl:\n if transl is None:\n default_transl = torch.zeros([batch_size, 3],\n dtype=dtype,\n requires_grad=True)\n else:\n default_transl = torch.tensor(transl, dtype=dtype)\n self.register_parameter(\n 'transl',\n nn.Parameter(default_transl, requires_grad=True))\n\n if create_scale: # the sequence captures same person\n if scale is None:\n default_scale = torch.ones([1, 1],\n dtype=dtype,\n requires_grad=True)\n else:\n default_scale = torch.tensor(scale, dtype=dtype)\n self.register_parameter(\n 'scale',\n nn.Parameter(default_scale, requires_grad=True))\n\n\n # The vertices of the template model\n self.register_buffer('v_template',\n to_tensor(to_np(data_struct.v_template),\n dtype=dtype))\n\n # The shape components\n shapedirs = data_struct.shapedirs\n # The shape components\n self.register_buffer(\n 'shapedirs',\n to_tensor(to_np(shapedirs), dtype=dtype))\n\n if self.model_type == 'smpllsp':\n lsp_regressor = np.load('data/J_regressor_lsp.npy')\n joint_regressor = to_tensor(lsp_regressor, dtype=dtype)\n self.register_buffer('joint_regressor', joint_regressor)\n elif self.model_type == 'smplhalpe':\n lsp_regressor = np.load('data/J_regressor_halpe.npy')\n joint_regressor = to_tensor(lsp_regressor, dtype=dtype)\n self.register_buffer('joint_regressor', joint_regressor)\n\n j_regressor = to_tensor(to_np(\n data_struct.J_regressor), dtype=dtype)\n self.register_buffer('J_regressor', j_regressor)\n\n # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207\n num_pose_basis = data_struct.posedirs.shape[-1]\n # 207 x 20670\n posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T\n self.register_buffer('posedirs',\n to_tensor(to_np(posedirs), dtype=dtype))\n\n # indices of parents for each joints\n parents = to_tensor(to_np(data_struct.kintree_table[0])).long()\n parents[0] = -1\n self.register_buffer('parents', parents)\n\n self.register_buffer('lbs_weights',\n to_tensor(to_np(data_struct.weights), dtype=dtype))\n\n def create_mean_pose(self, data_struct):\n pass\n\n @torch.no_grad()\n def reset_params(self, **params_dict):\n for param_name, param in self.named_parameters():\n if param_name in params_dict:\n param[:] = params_dict[param_name].clone().detach()#torch.tensor(params_dict[param_name])\n else:\n param.fill_(0)\n\n def get_num_verts(self):\n return self.v_template.shape[0]\n\n def get_num_faces(self):\n return self.faces.shape[0]\n\n def extra_repr(self):\n return 'Number of betas: {}'.format(self.NUM_BETAS)\n\n def forward(self, betas=None, body_pose=None, global_orient=None,\n transl=None, scale=None, return_verts=True, return_full_pose=False,\n **kwargs):\n ''' Forward pass for the SMPL model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape Bx10\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n '''\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None and hasattr(self, 'transl'):\n transl = self.transl\n\n apply_scale = scale is not None or hasattr(self, 'scale')\n if scale is None and hasattr(self, 'scale'):\n scale = self.scale\n\n full_pose = torch.cat([global_orient, body_pose], dim=1)\n\n if not apply_scale:\n scale = torch.ones([1, 1], dtype=self.dtype,\n requires_grad=False)\n \n vertices, joints = my_lbs(betas, full_pose, self.v_template,\n self.shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, scale, dtype=self.dtype)\n\n if self.model_type == 'smpllsp':\n joints = torch.matmul(self.joint_regressor, vertices)\n # 24 + 5\n joints = self.vertex_joint_selector(vertices, joints)\n\n if self.model_type == 'smplhalpe':\n joints = torch.matmul(self.joint_regressor, vertices)\n # Map the joints to the current dataset\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n\n if return_verts:\n vertices = vertices\n else:\n del vertices\n vertices = None\n\n output = ModelOutput(vertices=vertices if return_verts else None,\n global_orient=global_orient,\n body_pose=body_pose,\n joints=joints,\n betas=self.betas,\n full_pose=full_pose if return_full_pose else None)\n\n return output"
] | [
[
"torch.cos",
"torch.norm",
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.einsum",
"torch.sin",
"torch.eye",
"torch.arange",
"torch.unsqueeze",
"torch.matmul",
"torch.bmm",
"torch.split",
"torch.stack",
"torch.index_select",
"torch.nn.functional.pad"
],
[
"torch.cuda.synchronize",
"numpy.eye",
"torch.cuda.is_available",
"torch.tensor"
],
[
"torch.nn.Parameter",
"torch.ones",
"torch.cat",
"numpy.reshape",
"torch.zeros",
"torch.tensor",
"torch.matmul",
"torch.no_grad",
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ardhanii/covid19-sir | [
"59d95156b375c41259c46ce4e656b86903f92ec2",
"59d95156b375c41259c46ce4e656b86903f92ec2"
] | [
"covsirphy/loading/db_owid.py",
"covsirphy/regression/param_decision_tree.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport country_converter as coco\nimport pandas as pd\nfrom covsirphy.util.term import Term\nfrom covsirphy.loading.db_base import _RemoteDatabase\n\n\nclass _OWID(_RemoteDatabase):\n \"\"\"\n Access \"Our World In Data\".\n https://github.com/owid/covid-19-data/tree/master/public/data\n https://ourworldindata.org/coronavirus\n\n Args:\n filename (str): CSV filename to save records\n \"\"\"\n # URL for vaccine data\n URL_V = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/\"\n URL_V_REC = f\"{URL_V}vaccinations.csv\"\n URL_V_LOC = f\"{URL_V}locations.csv\"\n # URL for PCR data\n URL_P = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/\"\n URL_P_REC = f\"{URL_P}covid-testing-all-observations.csv\"\n # Citation\n CITATION = \"Hasell, J., Mathieu, E., Beltekian, D. et al.\" \\\n \" A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020).\" \\\n \" https://doi.org/10.1038/s41597-020-00688-8\"\n # Column names and data types\n # {\"name in database\": \"name defined in Term class\"}\n COL_DICT = {\n \"date\": Term.DATE,\n \"location\": Term.COUNTRY,\n Term.PROVINCE: Term.PROVINCE,\n \"iso_code\": Term.ISO3,\n \"vaccines\": Term.PRODUCT,\n \"total_vaccinations\": Term.VAC,\n \"people_vaccinated\": Term.V_ONCE,\n \"people_fully_vaccinated\": Term.V_FULL,\n \"tests\": Term.TESTS,\n }\n\n def download(self, verbose):\n \"\"\"\n Download the dataset from the server and set the list of primary sources.\n\n Args:\n verbose (int): level of verbosity\n\n Returns:\n pandas.DataFrame\n Index\n reset index\n Columns\n defined by the first values of self.COL_DICT.values()\n\n Note:\n If @verbose is equal to or over 1, how to show the list will be explained.\n \"\"\"\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import GridSearchCV, TimeSeriesSplit\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.tree import DecisionTreeRegressor\nfrom covsirphy.regression.regbase import _RegressorBase\nfrom covsirphy.regression.reg_rate_converter import _RateConverter\n\n\nclass _ParamDecisionTreeRegressor(_RegressorBase):\n \"\"\"\n Predict parameter values of ODE models with decision tree regressor.\n\n Args:\n - X_train (pandas.DataFrame): X for training with time index\n - X_test (pandas.DataFrame): X for test with time index\n - Y_train (pandas.DataFrame): Y for training with time index\n - Y_test (pandas.DataFrame): Y for test with time index\n - X_target (pandas.DataFrame): X for prediction with time index\n \"\"\"\n # Description of regressor\n DESC = \"Indicators -> Parameters with Decision Tree Regressor\"\n\n def _fit(self):\n \"\"\"\n Fit regression model with training dataset, update self._pipeline and self._param.\n \"\"\"\n # Paramters of the steps\n param_grid = {\n \"converter__to_convert\": [True, False],\n \"pca__n_components\": [0.3, 0.5, 0.7, 0.9],\n \"regressor__max_depth\": list(range(1, 10)),\n }\n # Fit with pipeline\n steps = [\n (\"converter\", _RateConverter()),\n (\"scaler\", MinMaxScaler()),\n (\"pca\", PCA(random_state=0)),\n (\"regressor\", DecisionTreeRegressor(random_state=0)),\n ]\n tscv = TimeSeriesSplit(n_splits=5).split(self._X_train)\n pipeline = GridSearchCV(Pipeline(steps=steps), param_grid, n_jobs=-1, cv=tscv)\n pipeline.fit(self._X_train, self._Y_train)\n # Update regressor\n self._pipeline = pipeline\n # Update param\n self._param.update(**{k: type(v) for (k, v) in steps})\n"
] | [
[
"pandas.read_csv"
],
[
"sklearn.tree.DecisionTreeRegressor",
"sklearn.pipeline.Pipeline",
"sklearn.model_selection.TimeSeriesSplit",
"sklearn.decomposition.PCA",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
egonrian/google-research | [
"9049acf9246c1b75170f0c6757e62a8f619a9db6",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"9049acf9246c1b75170f0c6757e62a8f619a9db6",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"8177adbe9ca0d7e5a9463b54581fe6dd27be0974",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467",
"2c0043ecd507e75e2df9973a3015daf9253e1467"
] | [
"task_set/tasks/fixed/fixed_text_rnn_classification_test.py",
"depth_and_motion_learning/consistency_losses.py",
"protein_lm/embed_test.py",
"group_agnostic_fairness/robust_learning_model.py",
"cluster_gcn/train.py",
"stochastic_to_deterministic/hashing.py",
"fairness_teaching/rl/data.py",
"neural_guided_symbolic_regression/utils/arithmetic_grammar_test.py",
"tunas/tools/infer_mobile_cost_model.py",
"flax_models/cifar/datasets/augmentation.py",
"depth_and_motion_learning/dataset/data_processing_util.py",
"simulation_research/traffic/evacuation_simulation.py",
"smith/loss_fns_test.py",
"dql_grasping/tf_modules.py",
"flax_models/bert/main.py",
"soft_sort/matrix_factorization/train.py",
"drfact/analysis/data_analysis.py",
"amortized_bo/domains.py",
"hyperbolic/utils/learn.py",
"fair_survival_analysis/coupled_deep_cph_vae.py",
"task_set/tasks/fixed/fixed_text_rnn_classification.py",
"kws_streaming/layers/windowing.py",
"goemotions/bert/modeling.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for task_set.tasks.fixed_text_rnn_classification.\"\"\"\nfrom absl.testing import parameterized\n\nfrom task_set import registry\nfrom task_set.tasks import family_test_utils\nfrom task_set.tasks.fixed import fixed_text_rnn_classification # pylint: disable=unused-import\nimport tensorflow.compat.v1 as tf\n\n\nclass FixedTextRNNClassificationTest(family_test_utils.SingleTaskTestCase):\n\n def test_right_number_of_tasks(self):\n task_names = registry.task_registry.get_all_fixed_config_names()\n self.assertLen(task_names, 12)\n\n @parameterized.parameters(registry.task_registry.get_all_fixed_config_names())\n def test_tasks(self, task_name):\n self.task_test(registry.task_registry.get_instance(task_name))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loss functions that impose RGB and depth motion-consistency across frames.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom depth_and_motion_learning import resampler\nfrom depth_and_motion_learning import transform_utils\n\n\ndef rgbd_consistency_loss(frame1transformed_depth,\n frame1rgb,\n frame2depth,\n frame2rgb,\n validity_mask=None):\n \"\"\"Computes a loss that penalizes RGBD inconsistencies between frames.\n\n This function computes 3 losses that penalize inconsistencies between two\n frames: depth, RGB, and structural similarity. It IS NOT SYMMETRIC with\n respect to both frames. In particular, to address occlusions, it only\n penalizes depth and RGB inconsistencies at pixels where frame1 is closer to\n the camera than frame2 (Why? see https://arxiv.org/abs/1904.04998). Therefore\n the intended usage pattern is running it twice - second time with the two\n frames swapped.\n\n Args:\n frame1transformed_depth: A transform_depth_map.TransformedDepthMap object\n representing the depth map of frame 1 after it was motion-transformed to\n frame 2, a motion transform that accounts for all camera and object motion\n that occurred between frame1 and frame2. The tensors inside\n frame1transformed_depth are of shape [B, H, W].\n frame1rgb: A tf.Tensor of shape [B, H, W, C] containing the RGB image at\n frame1.\n frame2depth: A tf.Tensor of shape [B, H, W] containing the depth map at\n frame2.\n frame2rgb: A tf.Tensor of shape [B, H, W, C] containing the RGB image at\n frame2.\n validity_mask: a tf.Tensor of a floating point type and a shape of\n [B, H, W, 1] containing a validity mask.\n\n Returns:\n A dicionary from string to tf.Tensor, with the following entries:\n depth_error: A tf scalar, the depth mismatch error between the two frames.\n rgb_error: A tf scalar, the rgb mismatch error between the two frames.\n ssim_error: A tf scalar, the strictural similarity mismatch error between\n the two frames.\n depth_proximity_weight: A tf.Tensor of shape [B, H, W], representing a\n function that peaks (at 1.0) for pixels where there is depth consistency\n between the two frames, and is small otherwise.\n frame1_closer_to_camera: A tf.Tensor of shape [B, H, W, 1], a mask that is\n 1.0 when the depth map of frame 1 has smaller depth than frame 2.\n \"\"\"\n frame2rgbd = tf.concat(\n [frame2rgb, tf.expand_dims((frame2depth), -1)], axis=-1)\n frame2rgbd_resampled = resampler.resampler_with_unstacked_warp(\n frame2rgbd,\n frame1transformed_depth.pixel_x,\n frame1transformed_depth.pixel_y,\n safe=False)\n frame2rgb_resampled, frame2depth_resampled = tf.split(\n frame2rgbd_resampled, [3, 1], axis=-1)\n frame2depth_resampled = tf.squeeze(frame2depth_resampled, axis=-1)\n\n # f1td.depth is the predicted depth at [pixel_y, pixel_x] for frame2. Now we\n # generate (by interpolation) the actual depth values for frame2's depth, at\n # the same locations, so that we can compare the two depths.\n\n # We penalize inconsistencies between the two frames' depth maps only if the\n # transformed depth map (of frame 1) falls closer to the camera than the\n # actual depth map (of frame 2). This is intended for avoiding penalizing\n # points that become occluded because of the transform.\n # So what about depth inconsistencies where frame1's depth map is FARTHER from\n # the camera than frame2's? These will be handled when we swap the roles of\n # frame 1 and 2 (more in https://arxiv.org/abs/1904.04998).\n frame1_closer_to_camera = tf.to_float(\n tf.logical_and(\n frame1transformed_depth.mask,\n tf.less(frame1transformed_depth.depth, frame2depth_resampled)))\n frames_l1_diff = tf.abs(frame2depth_resampled - frame1transformed_depth.depth)\n if validity_mask is not None:\n frames_l1_diff = frames_l1_diff * tf.squeeze(validity_mask, axis=[3])\n depth_error = tf.reduce_mean(\n tf.math.multiply_no_nan(frames_l1_diff, frame1_closer_to_camera))\n\n frames_rgb_l1_diff = tf.abs(frame2rgb_resampled - frame1rgb)\n if validity_mask is not None:\n frames_rgb_l1_diff = frames_rgb_l1_diff * validity_mask\n rgb_error = tf.math.multiply_no_nan(\n frames_rgb_l1_diff, tf.expand_dims(frame1_closer_to_camera, -1))\n rgb_error = tf.reduce_mean(rgb_error)\n\n # We generate a weight function that peaks (at 1.0) for pixels where when the\n # depth difference is less than its standard deviation across the frame, and\n # fall off to zero otherwise. This function is used later for weighing the\n # structural similarity loss term. We only want to demand structural\n # similarity for surfaces that are close to one another in the two frames.\n depth_error_second_moment = _weighted_average(\n tf.square(frame2depth_resampled - frame1transformed_depth.depth),\n frame1_closer_to_camera) + 1e-4\n depth_proximity_weight = tf.math.multiply_no_nan(\n depth_error_second_moment /\n (tf.square(frame2depth_resampled - frame1transformed_depth.depth) +\n depth_error_second_moment), tf.to_float(frame1transformed_depth.mask))\n\n if validity_mask is not None:\n depth_proximity_weight = depth_proximity_weight * tf.squeeze(\n validity_mask, axis=[3])\n\n # If we don't stop the gradient training won't start. The reason is presumably\n # that then the network can push the depths apart instead of seeking RGB\n # consistency.\n depth_proximity_weight = tf.stop_gradient(depth_proximity_weight)\n\n ssim_error, avg_weight = weighted_ssim(\n frame2rgb_resampled,\n frame1rgb,\n depth_proximity_weight,\n c1=float('inf'), # These values of c1 and c2 seemed to work better than\n c2=9e-6) # defaults. TODO(gariel): Make them parameters rather\n # than hard coded.\n ssim_error_mean = tf.reduce_mean(\n tf.math.multiply_no_nan(ssim_error, avg_weight))\n\n endpoints = {\n 'depth_error': depth_error,\n 'rgb_error': rgb_error,\n 'ssim_error': ssim_error_mean,\n 'depth_proximity_weight': depth_proximity_weight,\n 'frame1_closer_to_camera': frame1_closer_to_camera\n }\n return endpoints\n\n\ndef motion_field_consistency_loss(frame1transformed_pixelx,\n frame1transformed_pixely, mask, rotation1,\n translation1, rotation2, translation2):\n \"\"\"Computes a cycle consistency loss between two motion maps.\n\n Given two rotation and translation maps (of two frames), and a mapping from\n one frame to the other, this function assists in imposing that the fields at\n frame 1 represent the opposite motion of the ones in frame 2.\n\n In other words: At any given pixel on frame 1, if we apply the translation and\n rotation designated at that pixel, we land on some pixel in frame 2, and if we\n apply the translation and rotation designated there, we land back at the\n original pixel at frame 1.\n\n Args:\n frame1transformed_pixelx: A tf.Tensor of shape [B, H, W] representing the\n motion-transformed x-location of each pixel in frame 1.\n frame1transformed_pixely: A tf.Tensor of shape [B, H, W] representing the\n motion-transformed y-location of each pixel in frame 1.\n mask: A tf.Tensor of shape [B, H, W, 2] expressing the weight of each pixel\n in the calculation of the consistency loss.\n rotation1: A tf.Tensor of shape [B, 3] representing rotation angles.\n translation1: A tf.Tensor of shape [B, H, W, 3] representing translation\n vectors.\n rotation2: A tf.Tensor of shape [B, 3] representing rotation angles.\n translation2: A tf.Tensor of shape [B, H, W, 3] representing translation\n vectors.\n\n Returns:\n A dicionary from string to tf.Tensor, with the following entries:\n rotation_error: A tf scalar, the rotation consistency error.\n translation_error: A tf scalar, the translation consistency error.\n\n \"\"\"\n\n translation2resampled = resampler.resampler_with_unstacked_warp(\n translation2,\n tf.stop_gradient(frame1transformed_pixelx),\n tf.stop_gradient(frame1transformed_pixely),\n safe=False)\n rotation1field = tf.broadcast_to(\n _expand_dims_twice(rotation1, -2), tf.shape(translation1))\n rotation2field = tf.broadcast_to(\n _expand_dims_twice(rotation2, -2), tf.shape(translation2))\n rotation1matrix = transform_utils.matrix_from_angles(rotation1field)\n rotation2matrix = transform_utils.matrix_from_angles(rotation2field)\n\n rot_unit, trans_zero = transform_utils.combine(rotation2matrix,\n translation2resampled,\n rotation1matrix, translation1)\n eye = tf.eye(3, batch_shape=tf.shape(rot_unit)[:-2])\n\n # We normalize the product of rotations by the product of their norms, to make\n # the loss agnostic of their magnitudes, only wanting them to be opposite in\n # directions. Otherwise the loss has a tendency to drive the rotations to\n # zero.\n rot_error = tf.reduce_mean(tf.square(rot_unit - eye), axis=(3, 4))\n rot1_scale = tf.reduce_mean(tf.square(rotation1matrix - eye), axis=(3, 4))\n rot2_scale = tf.reduce_mean(tf.square(rotation2matrix - eye), axis=(3, 4))\n rot_error /= (1e-24 + rot1_scale + rot2_scale)\n rotation_error = tf.reduce_mean(rot_error)\n\n def norm(x):\n return tf.reduce_sum(tf.square(x), axis=-1)\n\n # Here again, we normalize by the magnitudes, for the same reason.\n translation_error = tf.reduce_mean(tf.math.multiply_no_nan(\n mask, norm(trans_zero) /\n (1e-24 + norm(translation1) + norm(translation2resampled))))\n\n return {\n 'rotation_error': rotation_error,\n 'translation_error': translation_error\n }\n\n\ndef rgbd_and_motion_consistency_loss(frame1transformed_depth,\n frame1rgb,\n frame2depth,\n frame2rgb,\n rotation1,\n translation1,\n rotation2,\n translation2,\n validity_mask=None):\n \"\"\"A helper that bundles rgbd and motion consistency losses together.\"\"\"\n endpoints = rgbd_consistency_loss(\n frame1transformed_depth,\n frame1rgb,\n frame2depth,\n frame2rgb,\n validity_mask=validity_mask)\n # We calculate the loss only for when frame1transformed_depth is closer to the\n # camera than frame2 (occlusion-awareness). See explanation in\n # rgbd_consistency_loss above.\n mask = endpoints['frame1_closer_to_camera']\n if validity_mask is not None:\n mask *= tf.squeeze(validity_mask, axis=3)\n endpoints.update(\n motion_field_consistency_loss(frame1transformed_depth.pixel_x,\n frame1transformed_depth.pixel_y, mask,\n rotation1, translation1, rotation2,\n translation2))\n return endpoints\n\n\ndef weighted_ssim(x, y, weight, c1=0.01**2, c2=0.03**2, weight_epsilon=0.01):\n \"\"\"Computes a weighted structured image similarity measure.\n\n See https://en.wikipedia.org/wiki/Structural_similarity#Algorithm. The only\n difference here is that not all pixels are weighted equally when calculating\n the moments - they are weighted by a weight function.\n\n Args:\n x: A tf.Tensor representing a batch of images, of shape [B, H, W, C].\n y: A tf.Tensor representing a batch of images, of shape [B, H, W, C].\n weight: A tf.Tensor of shape [B, H, W], representing the weight of each\n pixel in both images when we come to calculate moments (means and\n correlations).\n c1: A floating point number, regularizes division by zero of the means.\n c2: A floating point number, regularizes division by zero of the second\n moments.\n weight_epsilon: A floating point number, used to regularize division by the\n weight.\n\n Returns:\n A tuple of two tf.Tensors. First, of shape [B, H-2, W-2, C], is scalar\n similarity loss oer pixel per channel, and the second, of shape\n [B, H-2. W-2, 1], is the average pooled `weight`. It is needed so that we\n know how much to weigh each pixel in the first tensor. For example, if\n `'weight` was very small in some area of the images, the first tensor will\n still assign a loss to these pixels, but we shouldn't take the result too\n seriously.\n \"\"\"\n if c1 == float('inf') and c2 == float('inf'):\n raise ValueError('Both c1 and c2 are infinite, SSIM loss is zero. This is '\n 'likely unintended.')\n weight = tf.expand_dims(weight, -1)\n average_pooled_weight = _avg_pool3x3(weight)\n weight_plus_epsilon = weight + weight_epsilon\n inverse_average_pooled_weight = 1.0 / (average_pooled_weight + weight_epsilon)\n\n def weighted_avg_pool3x3(z):\n wighted_avg = _avg_pool3x3(z * weight_plus_epsilon)\n return wighted_avg * inverse_average_pooled_weight\n\n mu_x = weighted_avg_pool3x3(x)\n mu_y = weighted_avg_pool3x3(y)\n sigma_x = weighted_avg_pool3x3(x**2) - mu_x**2\n sigma_y = weighted_avg_pool3x3(y**2) - mu_y**2\n sigma_xy = weighted_avg_pool3x3(x * y) - mu_x * mu_y\n if c1 == float('inf'):\n ssim_n = (2 * sigma_xy + c2)\n ssim_d = (sigma_x + sigma_y + c2)\n elif c2 == float('inf'):\n ssim_n = 2 * mu_x * mu_y + c1\n ssim_d = mu_x**2 + mu_y**2 + c1\n else:\n ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)\n ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)\n result = ssim_n / ssim_d\n return tf.clip_by_value((1 - result) / 2, 0, 1), average_pooled_weight\n\n\ndef _avg_pool3x3(x):\n return tf.nn.avg_pool(x, [1, 3, 3, 1], [1, 1, 1, 1], 'VALID')\n\n\ndef _weighted_average(x, w, epsilon=1.0):\n weighted_sum = tf.reduce_sum(x * w, axis=(1, 2), keepdims=True)\n sum_of_weights = tf.reduce_sum(w, axis=(1, 2), keepdims=True)\n return weighted_sum / (sum_of_weights + epsilon)\n\n\ndef _expand_dims_twice(x, dim):\n return tf.expand_dims(tf.expand_dims(x, dim), dim)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for embedding functions.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport mock\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom protein_lm import data\nfrom protein_lm import embed\nfrom protein_lm import models\n\n\nclass EncodingTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n (10, None, 4),\n (None, 10, 4),\n (None, 3, 3),\n (3, None, 3))\n def test_encode_string_sequences(self, domain_length, length,\n expected_output_length):\n seqs = ['ABCD', 'EFG']\n domain = data.make_protein_domain(\n length=domain_length) if domain_length else None\n\n output_batch = embed._encode_string_sequences(\n seqs, domain=domain, length=length)\n self.assertEqual(output_batch.shape, (2, expected_output_length))\n\n\ndef _get_model(domain):\n return models.FlaxLM(\n domain=domain,\n num_layers=1,\n num_heads=1,\n qkv_dim=32,\n emb_dim=32,\n mlp_dim=32)\n\n\nclass EmbedTest(parameterized.TestCase, tf.test.TestCase):\n\n def setUp(self):\n self._domain = data.make_protein_domain(length=12)\n self._model = _get_model(self._domain)\n self._embed_fn = embed.get_embed_fn(\n model=self._model,\n domain=self._domain,\n reduce_fn=embed.masked_reduce_fn)\n super().setUp()\n\n def test_get_embed_fn_int_sequences(self):\n p1 = 'ACDEFHIKLNQP'\n p2 = 'ALNQP'\n encoded = self._domain.encode([p1, p2])\n int_embs = self._embed_fn(encoded)\n self.assertEqual((2, 32), int_embs.shape)\n\n def test_embed_strings(self):\n p1 = 'ACDEFHIKLNQP'\n p2 = 'ALNQP'\n str_embs = self._embed_fn([p1, p2])\n self.assertEqual((2, 32), str_embs.shape)\n\n @parameterized.parameters(\n (embed.sum_reducer, [[5, 7, 9]]),\n (embed.mean_reducer, [[2.5, 3.5, 4.5]]),\n (embed.max_reducer, [[4, 5, 6]]),\n )\n def test_reducer(self, reduce_fn, expected):\n embedding = np.array([[[1, 2, 6], [4, 5, 3], [7, 10, 9]]])\n mask = np.array([[1, 1, 0]])\n reduced = reduce_fn(embedding, mask)\n self.assertAllClose(reduced, expected)\n\n @parameterized.parameters(\n (True, True, True, True, [[1, 2]]),\n (False, True, True, True, [[3, 4]]),\n (True, False, True, True, [[7, 8]]),\n (True, True, False, True, [[9, 10]]),\n (True, True, True, False, [[46, 11]]),\n )\n def test_masked_reduce_fn(self, ignore_eos, ignore_bos, ignore_pad,\n ignore_mask, expected):\n embedding = np.array([[[1, 2], [13, 14], [5, 6], [17, 18], [91, 20]]])\n domain = self._domain\n inputs = np.array([[0, domain.vocab.bos, domain.vocab.eos, domain.vocab.pad,\n domain.vocab.mask]])\n reduced = embed.masked_reduce_fn(embedding=embedding,\n inputs=inputs,\n ignore_eos=ignore_eos,\n ignore_bos=ignore_bos,\n ignore_pad=ignore_pad,\n ignore_mask=ignore_mask)\n self.assertAllClose(reduced, expected)\n\n def test_validate_input_int_sequences(self):\n with self.assertRaisesRegex(ValueError, 'Input int-encoded sequences'):\n self._embed_fn([np.ones(14)])\n\n\nclass ProteinLMEmbedderTest(parameterized.TestCase, tf.test.TestCase):\n\n def setUp(self):\n self._domain = data.make_protein_domain(length=12)\n self._model = _get_model(self._domain)\n super().setUp()\n\n # TODO(gandreea): get this test to by fixing domain.encode padding issues\n # def test_embedder_encoding(self):\n # seqs = ['ACDEFHIKLNQP', 'ALNQP']\n # str_embs = self._embed_fn(seqs)\n # int_embs = self._embed_fn(self._domain.encode(seqs))\n # self.assertAllClose(int_embs, str_embs)\n\n def test_embedder_batching(self):\n \"\"\"Asserts that the model is always called with fixed-size batches.\"\"\"\n batch_size = 4\n embedder = embed.ProteinLMEmbedder(\n model=self._model, output_head='output_emb', batch_size=batch_size)\n embedder._embed_fn = mock.Mock(wraps=embedder._embed_fn)\n # Call on various differently-sized batches.\n expected_call_shapes = []\n expected_batch_shape = (batch_size, self._domain.length)\n for num_seqs in [1, 3, 5, 10]:\n embedder(self._domain.sample_uniformly(num_seqs))\n expected_num_batches = int(np.ceil(num_seqs / batch_size))\n expected_call_shapes.extend([expected_batch_shape] * expected_num_batches)\n\n actual_call_shapes = [\n call[0][0].shape for call in embedder._embed_fn.call_args_list\n ]\n self.assertAllEqual(actual_call_shapes, expected_call_shapes)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# pylint: disable=dangerous-default-value\n\"\"\"A custom estimator for adversarial robust learning.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.contrib import framework as contrib_framework\nfrom tensorflow.contrib import layers as contrib_layers\nfrom tensorflow.contrib import metrics as contrib_metrics\n\n\nclass _RobustModel():\n \"\"\"TensorFlow RobustModel base class.\n\n RobustModel class can be used to define a robust estimator.\n\n Robust estimator can be used to train a robust model with two DNNs:\n A primary DNN that trains for the main task.\n A adversarial DNN that aims to assign weights to examples based on the\n primary's example loss.\n\n The two models are jointly trained to optimize for a min max problem between\n primary and adversary by alternating between the two loss functions.\n \"\"\"\n\n def __init__(\n self,\n feature_columns,\n label_column_name,\n config,\n model_dir,\n primary_hidden_units=[64, 32],\n adversary_hidden_units=[32],\n batch_size=256,\n primary_learning_rate=0.01,\n adversary_learning_rate=0.01,\n optimizer='Adagrad',\n activation=tf.nn.relu,\n adversary_loss_type='ce_loss',\n adversary_include_label=True,\n upweight_positive_instance_only=False,\n pretrain_steps=5000\n ):\n \"\"\"Initializes a robust estimator.\n\n Args:\n feature_columns: list of feature_columns.\n label_column_name: (string) name of the target variable.\n config: `RunConfig` object to configure the runtime settings.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into an estimator\n to continue training a previously saved model.\n primary_hidden_units: List with number of hidden units per layer for the\n shared bottom. All layers are fully connected.\n Ex. `[64, 32]` means first layer has 64 nodes and second one has 32.\n adversary_hidden_units: List with number of hidden units per layer for the\n shared bottom. All layers are fully connected.\n Ex. `[32]` means first layer has 32 nodes.\n batch_size: (int) batch size.\n primary_learning_rate: learning rate of primary DNN.\n adversary_learning_rate: learning rate of adversary DNN.\n optimizer: An instance of `tf.Optimizer` used to train the model.\n activation: Activation function applied to each layer.\n adversary_loss_type: (string) specifying the type of loss function to be\n used in adversary. Takes values in [\"hinge_loss\", \"ce_loss\"], which\n stand for hinge loss, and sigmoid cross entropy loss, respectively.\n adversary_include_label: Boolean flag. If set, adds label as input to the\n adversary feature columns.\n upweight_positive_instance_only: Boolean flag. If set, weights only\n positive examples in adversary hinge_loss.\n pretrain_steps: (int) The number of training steps for whih the model\n should train only primary model, before switching to alternate training\n between primary and adversary.\n\n Raises:\n ValueError: if label_column_name not specified.\n ValueError: if primary_hidden_units is not a list.\n ValueError: if adversary_hidden_units is not a list.\n\n \"\"\"\n if not label_column_name:\n raise ValueError('Need to specify a label_column_name.')\n\n if not isinstance(primary_hidden_units, list):\n raise ValueError('primary_hidden_units should be a list of size 2.')\n\n if not isinstance(adversary_hidden_units, list):\n raise ValueError('adversary_hidden_units should be a list of size 1.')\n\n self._feature_columns = feature_columns\n self._primary_learning_rate = primary_learning_rate\n self._adversary_learning_rate = adversary_learning_rate\n self._optimizer = optimizer\n self._model_dir = model_dir\n self._primary_hidden_units = primary_hidden_units\n self._adversary_hidden_units = adversary_hidden_units\n self._config = config\n self._activation = activation\n self._batch_size = batch_size\n self._label_column_name = label_column_name\n self._adversary_include_label = adversary_include_label\n self._adversary_loss_type = adversary_loss_type\n self._pretrain_steps = pretrain_steps\n self._upweight_positive_instance_only = upweight_positive_instance_only\n\n def _primary_loss(self, labels, logits, example_weights):\n \"\"\"Computes weighted sigmoid cross entropy loss.\n\n Args:\n labels: Labels.\n logits: Logits.\n example_weights: a float tensor of shape [batch_size, 1] for the\n reweighting values for each example in the batch.\n\n Returns:\n loss: (scalar) loss\n \"\"\"\n with tf.name_scope(None, 'primary_loss', (logits, labels)) as name:\n sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits, name=name)\n primary_weighted_loss = (example_weights * sigmoid_loss)\n primary_weighted_loss = tf.reduce_mean(primary_weighted_loss)\n return primary_weighted_loss\n\n def _get_hinge_loss(self, labels, logits, pos_weights):\n \"\"\"Computes hinge loss over labels and logits from primary task.\n\n Args:\n labels: Labels.\n logits: Logits.\n pos_weights: a float tensor of shape [batch_size, 1]. Assigns weight 1\n for positive examples, and weight 0 for negative examples in the batch.\n\n Returns:\n loss: a float tensor of shape [batch_size, 1] containing hinge loss.\n \"\"\"\n # If set, gives weight to only positive instances\n if self._upweight_positive_instance_only:\n hinge_loss = tf.losses.hinge_loss(\n labels=labels, logits=logits, weights=pos_weights, reduction='none')\n else:\n hinge_loss = tf.losses.hinge_loss(labels=labels,\n logits=logits,\n reduction='none')\n # To avoid numerical errors at loss = ``0''\n hinge_loss = tf.maximum(hinge_loss, 0.1)\n return hinge_loss\n\n def _get_cross_entropy_loss(self, labels, logits):\n \"\"\"Computes cross-entropy loss over labels and logits from primary task.\"\"\"\n return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n\n def _adversary_loss(self,\n labels,\n logits,\n pos_weights,\n example_weights,\n adversary_loss_type='hinge_loss'):\n \"\"\"Computes (negative) adversary loss.\n\n At the end of this function, the calculated loss\n is multiplied with -1, so that it can be maximized later on by minimizing\n the output of this function.\n\n Args:\n labels: Labels.\n logits: Logits.\n pos_weights: a float tensor of shape [batch_size, 1]\n to compute weighted hinge_loss\n example_weights: a float tensor of shape [batch_size, 1] for the\n reweighting values for each example in the batch.\n adversary_loss_type: (string) flag defining which loss type to use.\n Takes values in [\"hinge_loss\",\"ce_loss\"].\n\n Returns:\n loss: (scalar) loss\n \"\"\"\n with tf.name_scope(None, 'adversary_loss', (logits, labels, pos_weights)):\n if adversary_loss_type == 'hinge_loss':\n loss = self._get_hinge_loss(labels, logits, pos_weights)\n tf.summary.histogram('hinge_loss', loss)\n elif adversary_loss_type == 'ce_loss':\n loss = self._get_cross_entropy_loss(labels, logits)\n tf.summary.histogram('ce_loss', loss)\n\n # Multiplies loss by -1 so that the adversary loss is maximimized.\n adversary_weighted_loss = -(example_weights * loss)\n\n return tf.reduce_mean(adversary_weighted_loss)\n\n def _get_or_create_global_step_var(self):\n \"\"\"Return the global_step variable, creating it if it does not exist.\n\n Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.\n\n Returns:\n The global_step variable, or a new created one if it does not exist.\n \"\"\"\n return tf.train.get_or_create_global_step()\n\n def _get_adversary_features_and_feature_columns(self, features, targets):\n \"\"\"Return adversary features and feature columns.\"\"\"\n adversarial_features = features.copy()\n adversary_feature_columns = self._feature_columns[:]\n # Adds label to adversarial features\n if self._adversary_include_label:\n adversary_feature_columns.append(\n tf.feature_column.numeric_column(self._label_column_name))\n adversarial_features[self._label_column_name] = targets[\n self._label_column_name]\n\n return adversarial_features, adversary_feature_columns\n\n def _compute_example_weights(self, adv_output_layer):\n \"\"\"Applies sigmoid to adversary output layer and returns normalized example weight.\"\"\"\n example_weights = tf.nn.sigmoid(adv_output_layer)\n mean_example_weights = tf.reduce_mean(example_weights)\n example_weights /= tf.maximum(mean_example_weights, 1e-4)\n example_weights = tf.ones_like(example_weights)+example_weights\n return example_weights\n\n def _get_model_fn(self):\n \"\"\"Method that gets a model_fn for creating an `Estimator` Object.\"\"\"\n\n def model_fn(features, labels, mode):\n \"\"\"robustModel model_fn.\n\n Args:\n features: `Tensor` or `dict` of `Tensor`.\n labels: A `dict` of `Tensor` Objects. Expects to have a key/value pair\n for the key self.label_column_name.\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`. Currently PREDICT mode is not implemented.\n\n Returns:\n An instance of `tf.estimator.EstimatorSpec', which encapsulates the\n `mode`, `predictions`, `loss` and the `train_op`. Note that here\n `predictions` is either a `Tensor` or a `dict` of `Tensor` objects,\n representing the prediction of the bianry classification model.\n 'loss` is a scalar containing the loss of the step and `train_op` is the\n op for training.\n \"\"\"\n\n # Instantiates a tensor with weight for positive class examples only\n pos_weights = tf.cast(tf.equal(labels[self._label_column_name], 1),\n dtype=tf.float32)\n\n # Instantiates a tensor with true class labels\n class_labels = labels[self._label_column_name]\n\n # Initialize a global step variable used for alternate training\n current_step = self._get_or_create_global_step_var()\n\n if mode == tf.estimator.ModeKeys.EVAL:\n tf.logging.info('model_fn: EVAL, {}'.format(mode))\n elif mode == tf.estimator.ModeKeys.TRAIN:\n tf.logging.info('model_fn: TRAIN, {}'.format(mode))\n\n # Creates a DNN architecture for primary binary classification task\n with tf.name_scope('primary_NN'):\n with tf.variable_scope('primary'):\n input_layer = tf.feature_column.input_layer(features,\n self._feature_columns)\n h1 = tf.layers.Dense(self._primary_hidden_units[0],\n activation=self._activation)(input_layer)\n h2 = tf.layers.Dense(self._primary_hidden_units[1],\n activation=self._activation)(h1)\n logits = tf.layers.Dense(1)(h2)\n sigmoid_output = tf.nn.sigmoid(logits, name='sigmoid')\n class_predictions = tf.cast(\n tf.greater(sigmoid_output, 0.5), tf.float32)\n tf.summary.histogram('class_predictions', class_predictions)\n\n # Creates a network architecture for the adversarial regression task\n with tf.name_scope('adversary_NN'):\n with tf.variable_scope('adversary'):\n # Gets adversary features and features columns\n adversarial_features, adversary_feature_columns = self._get_adversary_features_and_feature_columns(features, labels) # pylint: disable=line-too-long\n adv_input_layer = tf.feature_column.input_layer(\n adversarial_features, adversary_feature_columns)\n adv_h1 = tf.layers.Dense(self._adversary_hidden_units[0])(\n adv_input_layer)\n adv_output_layer = tf.layers.Dense(1, use_bias=True)(adv_h1)\n example_weights = tf.cond(\n tf.greater(current_step, self._pretrain_steps),\n true_fn=lambda: self._compute_example_weights(adv_output_layer),\n false_fn=lambda: tf.ones_like(class_labels))\n\n # Adds summary variables to tensorboard\n with tf.name_scope('example_weights'):\n tf.summary.histogram('example_weights', example_weights)\n tf.summary.histogram('label', class_labels)\n\n # Initializes Loss Functions\n primary_loss = self._primary_loss(class_labels, logits, example_weights)\n adversary_loss = self._adversary_loss(class_labels, logits, pos_weights,\n example_weights,\n self._adversary_loss_type)\n\n # Sets up dictionaries used for computing performance metrics\n predictions = {\n (self._label_column_name, 'class_ids'):\n tf.reshape(class_predictions, [-1]),\n (self._label_column_name, 'logistic'):\n tf.reshape(sigmoid_output, [-1]),\n ('example_weights'):\n tf.reshape(example_weights, [-1])\n }\n\n class_id_kwargs = {\n 'labels': class_labels,\n 'predictions': class_predictions\n }\n logistics_kwargs = {'labels': class_labels, 'predictions': sigmoid_output}\n\n # EVAL Mode\n if mode == tf.estimator.ModeKeys.EVAL:\n with tf.name_scope('eval_metrics'):\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(**class_id_kwargs),\n 'precision': tf.metrics.precision(**class_id_kwargs),\n 'recall': tf.metrics.recall(**class_id_kwargs),\n 'fp': tf.metrics.false_positives(**class_id_kwargs),\n 'fn': tf.metrics.false_negatives(**class_id_kwargs),\n 'tp': tf.metrics.true_positives(**class_id_kwargs),\n 'tn': tf.metrics.true_negatives(**class_id_kwargs),\n 'fpr': contrib_metrics.streaming_false_positive_rate(**class_id_kwargs), # pylint: disable=line-too-long\n 'fnr': contrib_metrics.streaming_false_negative_rate(**class_id_kwargs), # pylint: disable=line-too-long\n 'auc': tf.metrics.auc(curve='ROC', **logistics_kwargs),\n 'aucpr': tf.metrics.auc(curve='PR', **logistics_kwargs)\n }\n\n # EstimatorSpec object for evaluation\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=primary_loss,\n eval_metric_ops=eval_metric_ops)\n\n # TRAIN Mode\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Filters trainable variables for each task\n all_trainable_vars = tf.trainable_variables()\n primary_trainable_vars = [\n v for v in all_trainable_vars if 'primary' in v.op.name\n ]\n adversary_trainable_vars = [\n v for v in all_trainable_vars if 'adversary' in v.op.name\n ]\n\n # TRAIN_OP for adversary DNN\n train_op_adversary = contrib_layers.optimize_loss(\n loss=adversary_loss,\n variables=adversary_trainable_vars,\n global_step=contrib_framework.get_global_step(),\n learning_rate=self._adversary_learning_rate,\n optimizer=self._optimizer)\n\n # TRAIN_OP for primary DNN\n train_op_primary = contrib_layers.optimize_loss(\n loss=primary_loss,\n variables=primary_trainable_vars,\n global_step=contrib_framework.get_global_step(),\n learning_rate=self._primary_learning_rate,\n optimizer=self._optimizer)\n\n # Upto ``pretrain_steps'' trains primary only.\n # Beyond ``pretrain_steps'' alternates between primary and adversary.\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=primary_loss + adversary_loss,\n train_op=tf.cond(\n tf.greater(current_step, self._pretrain_steps),\n true_fn=lambda: tf.group([train_op_primary, train_op_adversary]), # pylint: disable=line-too-long\n false_fn=lambda: tf.group([train_op_primary])))\n\n return estimator_spec\n\n return model_fn\n\n\nclass _RobustEstimator(tf.estimator.Estimator):\n \"\"\"An estimator based on the core estimator.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes the estimator.\"\"\"\n self.model = _RobustModel(*args, **kwargs)\n super(_RobustEstimator, self).__init__(\n model_fn=self.model._get_model_fn(), # pylint: disable=protected-access\n model_dir=self.model._model_dir, # pylint: disable=protected-access\n config=self.model._config # pylint: disable=protected-access\n )\n\n\ndef get_estimator(*args, **kwargs):\n return _RobustEstimator(*args, **kwargs)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main script for training GCN models.\"\"\"\n\nimport time\nimport models\nimport numpy as np\nimport partition_utils\nimport tensorflow.compat.v1 as tf\nimport utils\n\ntf.logging.set_verbosity(tf.logging.INFO)\n# Set random seed\nseed = 1\nnp.random.seed(seed)\n\n# Settings\nflags = tf.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('save_name', './mymodel.ckpt', 'Path for saving model')\nflags.DEFINE_string('dataset', 'ppi', 'Dataset string.')\nflags.DEFINE_string('data_prefix', 'data/', 'Datapath prefix.')\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\nflags.DEFINE_integer('epochs', 400, 'Number of epochs to train.')\nflags.DEFINE_integer('hidden1', 2048, 'Number of units in hidden layer 1.')\nflags.DEFINE_float('dropout', 0.2, 'Dropout rate (1 - keep probability).')\nflags.DEFINE_float('weight_decay', 0, 'Weight for L2 loss on embedding matrix.')\nflags.DEFINE_integer('early_stopping', 1000,\n 'Tolerance for early stopping (# of epochs).')\nflags.DEFINE_integer('num_clusters', 50, 'Number of clusters.')\nflags.DEFINE_integer('bsize', 1, 'Number of clusters for each batch.')\nflags.DEFINE_integer('num_clusters_val', 5,\n 'Number of clusters for validation.')\nflags.DEFINE_integer('num_clusters_test', 1, 'Number of clusters for test.')\nflags.DEFINE_integer('num_layers', 5, 'Number of GCN layers.')\nflags.DEFINE_float(\n 'diag_lambda', 1,\n 'A positive number for diagonal enhancement, -1 indicates normalization without diagonal enhancement'\n)\nflags.DEFINE_bool('multilabel', True, 'Multilabel or multiclass.')\nflags.DEFINE_bool('layernorm', True, 'Whether to use layer normalization.')\nflags.DEFINE_bool(\n 'precalc', True,\n 'Whether to pre-calculate the first layer (AX preprocessing).')\nflags.DEFINE_bool('validation', True,\n 'Print validation accuracy after each epoch.')\n\n\ndef load_data(data_prefix, dataset_str, precalc):\n \"\"\"Return the required data formats for GCN models.\"\"\"\n (num_data, train_adj, full_adj, feats, train_feats, test_feats, labels,\n train_data, val_data,\n test_data) = utils.load_graphsage_data(data_prefix, dataset_str)\n visible_data = train_data\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_data, :] = labels[train_data, :]\n y_val[val_data, :] = labels[val_data, :]\n y_test[test_data, :] = labels[test_data, :]\n\n train_mask = utils.sample_mask(train_data, labels.shape[0])\n val_mask = utils.sample_mask(val_data, labels.shape[0])\n test_mask = utils.sample_mask(test_data, labels.shape[0])\n\n if precalc:\n train_feats = train_adj.dot(feats)\n train_feats = np.hstack((train_feats, feats))\n test_feats = full_adj.dot(feats)\n test_feats = np.hstack((test_feats, feats))\n\n return (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, train_data, val_data, test_data,\n num_data, visible_data)\n\n\n# Define model evaluation function\ndef evaluate(sess, model, val_features_batches, val_support_batches,\n y_val_batches, val_mask_batches, val_data, placeholders):\n \"\"\"evaluate GCN model.\"\"\"\n total_pred = []\n total_lab = []\n total_loss = 0\n total_acc = 0\n\n num_batches = len(val_features_batches)\n for i in range(num_batches):\n features_b = val_features_batches[i]\n support_b = val_support_batches[i]\n y_val_b = y_val_batches[i]\n val_mask_b = val_mask_batches[i]\n num_data_b = np.sum(val_mask_b)\n if num_data_b == 0:\n continue\n else:\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_val_b,\n val_mask_b, placeholders)\n outs = sess.run([model.loss, model.accuracy, model.outputs],\n feed_dict=feed_dict)\n\n total_pred.append(outs[2][val_mask_b])\n total_lab.append(y_val_b[val_mask_b])\n total_loss += outs[0] * num_data_b\n total_acc += outs[1] * num_data_b\n\n total_pred = np.vstack(total_pred)\n total_lab = np.vstack(total_lab)\n loss = total_loss / len(val_data)\n acc = total_acc / len(val_data)\n\n micro, macro = utils.calc_f1(total_pred, total_lab, FLAGS.multilabel)\n return loss, acc, micro, macro\n\n\ndef main(unused_argv):\n \"\"\"Main function for running experiments.\"\"\"\n # Load data\n (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, _, val_data, test_data, num_data,\n visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)\n\n # Partition graph and do preprocessing\n if FLAGS.bsize > 1:\n _, parts = partition_utils.partition_graph(train_adj, visible_data,\n FLAGS.num_clusters)\n parts = [np.array(pt) for pt in parts]\n else:\n (parts, features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess(train_adj, train_feats, y_train,\n train_mask, visible_data,\n FLAGS.num_clusters,\n FLAGS.diag_lambda)\n\n (_, val_features_batches, val_support_batches, y_val_batches,\n val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val, val_mask,\n np.arange(num_data),\n FLAGS.num_clusters_val,\n FLAGS.diag_lambda)\n\n (_, test_features_batches, test_support_batches, y_test_batches,\n test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,\n test_mask, np.arange(num_data),\n FLAGS.num_clusters_test,\n FLAGS.diag_lambda)\n idx_parts = list(range(len(parts)))\n\n # Some preprocessing\n model_func = models.GCN\n\n # Define placeholders\n placeholders = {\n 'support':\n tf.sparse_placeholder(tf.float32),\n 'features':\n tf.placeholder(tf.float32),\n 'labels':\n tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask':\n tf.placeholder(tf.int32),\n 'dropout':\n tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero':\n tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = model_func(\n placeholders,\n input_dim=test_feats.shape[1],\n logging=True,\n multilabel=FLAGS.multilabel,\n norm=FLAGS.layernorm,\n precalc=FLAGS.precalc,\n num_layers=FLAGS.num_layers)\n\n # Initialize session\n sess = tf.Session()\n tf.set_random_seed(seed)\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n cost_val = []\n total_training_time = 0.0\n # Train model\n for epoch in range(FLAGS.epochs):\n t = time.time()\n np.random.shuffle(idx_parts)\n if FLAGS.bsize > 1:\n (features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess_multicluster(\n train_adj, parts, train_feats, y_train, train_mask,\n FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)\n for pid in range(len(features_batches)):\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n else:\n np.random.shuffle(idx_parts)\n for pid in idx_parts:\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n total_training_time += time.time() - t\n print_str = 'Epoch: %04d ' % (epoch + 1) + 'training time: {:.5f} '.format(\n total_training_time) + 'train_acc= {:.5f} '.format(outs[2])\n\n # Validation\n if FLAGS.validation:\n cost, acc, micro, macro = evaluate(sess, model, val_features_batches,\n val_support_batches, y_val_batches,\n val_mask_batches, val_data,\n placeholders)\n cost_val.append(cost)\n print_str += 'val_acc= {:.5f} '.format(\n acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)\n\n tf.logging.info(print_str)\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(\n cost_val[-(FLAGS.early_stopping + 1):-1]):\n tf.logging.info('Early stopping...')\n break\n\n tf.logging.info('Optimization Finished!')\n\n # Save model\n saver.save(sess, FLAGS.save_name)\n\n # Load model (using CPU for inference)\n with tf.device('/cpu:0'):\n sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n sess_cpu.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess_cpu, FLAGS.save_name)\n # Testing\n test_cost, test_acc, micro, macro = evaluate(\n sess_cpu, model, test_features_batches, test_support_batches,\n y_test_batches, test_mask_batches, test_data, placeholders)\n print_str = 'Test set results: ' + 'cost= {:.5f} '.format(\n test_cost) + 'accuracy= {:.5f} '.format(\n test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)\n tf.logging.info(print_str)\n\n\nif __name__ == '__main__':\n tf.app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hashing function to make a stochastic classifier deterministic.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport hashlib\nfrom absl import app\nimport numpy as np\n\n\ndef compute_hash(features, hash_matrix, hash_vector):\n \"\"\"Compute hash values for features using the hash function (A * x + c) mod 2.\n\n Args:\n features: NumPy float array of shape (n, d), the features to hash.\n hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),\n a random matrix A to construct the hash function.\n hash_vector: NumPy float array of shape (1, num_hash_bits),\n a random vector c to construct the hash function.\n\n Returns:\n NumPy float array of shape (n, 1) containing the hashed values in [0, 1].\n \"\"\"\n # Helper function to convert an int array to a bit string array.\n def convert_int_to_bin(x, dimension):\n # Converts x to an array of bit strings of size dimension.\n return '{:b}'.format(x).zfill(dimension)[-dimension:]\n convert_int_to_bin = np.vectorize(convert_int_to_bin)\n\n # Helper function to convert a bit string array to an into array.\n convert_bin_to_int = np.vectorize(lambda x: int(x, 2))\n\n # Number of features and hash bits.\n num_features = features.shape[0]\n num_feature_bits, num_hash_bits = hash_matrix.shape\n\n # Concatenate features and apply MD5 hash to get a fixed length encoding.\n feature_sum_str = [''.join(x) for x in features.astype('str')]\n feature_sum_hex = [hashlib.md5(s.encode('utf-8')).hexdigest()\n for s in feature_sum_str]\n feature_sum_int = [int(h, 16) for h in feature_sum_hex]\n\n # Binarize features\n feature_sum_bin = convert_int_to_bin(\n feature_sum_int, dimension=num_feature_bits)\n feature_sum_bin_matrix = np.array(\n [[int(c) for c in s] for s in feature_sum_bin])\n\n # Compute hash (Ax + c) mod 2.\n feature_hashed = (\n np.dot(feature_sum_bin_matrix, hash_matrix) +\n np.repeat(hash_vector, repeats=num_features, axis=0))\n feature_hashed_bits = np.mod(feature_hashed, 2)\n\n # Convert hash to bit string.\n feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)\n feature_hashed_bit_str = [''.join(s) for s in feature_hashed_bit_char]\n feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)\n hashed_val = feature_hashed_int * 1. / 2 ** num_hash_bits\n\n # Return normalized hashed values in [0, 1].\n return hashed_val.reshape(-1, 1)\n\n\ndef main(argv):\n \"\"\"Example usage of hash function.\"\"\"\n del argv\n\n num_feature_bits = 128\n num_hash_bits = 32\n\n # Random hash matrix and vector to construct hash function.\n hash_matrix = (np.random.rand(\n num_feature_bits, num_hash_bits) > 0.5).astype('int')\n hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')\n\n # Generate random features.\n num_examples = 10\n dimension = 4\n features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)\n\n # Compute hash.\n hash_val = compute_hash(features, hash_matrix, hash_vector)\n\n print('Feature matrix:')\n print(features)\n print('\\nHashed values:')\n print(hash_val)\n\n\nif __name__ == '__main__':\n app.run(main)\n\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n# pylint: skip-file\n\nATT_ID = {'5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2,\n 'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6,\n 'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10,\n 'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13,\n 'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16,\n 'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19,\n 'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22,\n 'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25,\n 'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28,\n 'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31,\n 'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34,\n 'Wearing_Hat': 35, 'Wearing_Lipstick': 36,\n 'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39}\nID_ATT = {v: k for k, v in ATT_ID.items()}\n\nCENTRAL_FRACTION = 0.89\nLOAD_SIZE = 142 #286\nCROP_SIZE = 128 #256\n\ndef cal_eo(a, y_label, y_pred):\n a = np.array(a)\n y_label = np.array(y_label)\n y_pred = np.array(y_pred)\n\n\n idx00 = np.logical_and(a==0,y_label==0)\n idx01 = np.logical_and(a==0,y_label==1)\n idx10 = np.logical_and(a==1,y_label==0)\n idx11 = np.logical_and(a==1,y_label==1)\n\n if y_pred[idx00].shape[0] ==0:\n d00=0.5\n else:\n d00 = 1 - np.sum(y_pred[idx00])/y_pred[idx00].shape[0]\n\n if y_pred[idx01].shape[0] ==0:\n d01=0.5\n else:\n d01 = np.sum(y_pred[idx01])/y_pred[idx01].shape[0]\n\n\n if y_pred[idx10].shape[0] ==0:\n d10=0.5\n else:\n d10 = 1 - np.sum(y_pred[idx10])/y_pred[idx10].shape[0]\n\n if y_pred[idx11].shape[0] ==0:\n d11=0.5\n else:\n d11 = np.sum(y_pred[idx11])/y_pred[idx11].shape[0]\n\n eo = np.abs(d00-d10)+np.abs(d01-d11)\n\n return (d00,d01,d10,d11,eo)\n\ndef reorg(label_path,af,bf):\n img_names = np.genfromtxt(label_path, dtype=str, usecols=0)\n labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))\n entry = np.concatenate((img_names[:, np.newaxis], labels), axis=1)\n a = np.asarray((labels[:,ATT_ID[af]]+1)//2)\n b = np.asarray((labels[:,ATT_ID[bf]]+1)//2)\n d00 = []\n d01 = []\n d10 = []\n d11 = []\n for i in range(labels.shape[0]):\n if a[i]==0:\n if b[i]==0: d00.append(entry[i])\n elif b[i]==1: d01.append(entry[i])\n elif a[i]==1:\n if b[i]==0: d10.append(entry[i])\n elif b[i]==1: d11.append(entry[i])\n min_leng = np.min([len(d00),len(d01),len(d10),len(d11)])\n new_list = d00[:min_leng]+d01[:3*min_leng]+d10[:3*min_leng]+d11[:min_leng]\n # new_list = d00[:min_leng]+d01[:min_leng]+d10[:min_leng]+d11[:min_leng]\n return np.array(new_list)\n\ndef load_train(image_path, label, att):\n image = tf.io.read_file(image_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_crop(image, [CROP_SIZE, CROP_SIZE, 3])\n image = tf.clip_by_value(image, 0, 255) / 127.5 - 1\n label = (label + 1) // 2\n att = (att + 1) // 2\n image = tf.cast(image, tf.float32)\n label = tf.cast(label, tf.int32)\n att = tf.cast(att, tf.int32)\n return (image, label, att)\n\ndef load_test(image_path, label, att):\n image = tf.io.read_file(image_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])\n image = tf.image.central_crop(image, CENTRAL_FRACTION)\n image = tf.clip_by_value(image, 0, 255) / 127.5 - 1\n label = (label + 1) // 2\n att = (att + 1) // 2\n image = tf.cast(image, tf.float32)\n label = tf.cast(label, tf.int32)\n att = tf.cast(att, tf.int32)\n return (image, label, att)\n\n# load partial training dataset\ndef data_train(image_path, label_path, batch_size):\n a = 'Male'\n b = 'Arched_Eyebrows'\n new_entry = reorg(label_path,a,b)\n n_examples = new_entry.shape[0]\n img_names = new_entry[:,0]\n img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])\n img_labels = new_entry[:,1:]\n labels = img_labels[:,ATT_ID['Arched_Eyebrows']].astype(int)\n att = img_labels[:,ATT_ID['Male']].astype(int)\n\n train_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))\n train_dataset = train_dataset.map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n train_dataset = train_dataset.shuffle(n_examples, seed=0)\n train_dataset = train_dataset.batch(batch_size, drop_remainder=True)\n train_dataset = train_dataset.repeat().prefetch(1)\n\n train_iter = train_dataset.make_one_shot_iterator()\n batch = train_iter.get_next()\n\n return batch, int(np.ceil(n_examples/batch_size))\n\n# load entire training dataset\n# def data_train(image_path, label_path, batch_size):\n# img_names = np.genfromtxt(label_path, dtype=str, usecols=0)\n# img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])\n# img_labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))\n# n_examples = img_names.shape[0]\n# labels = img_labels[:,ATT_ID['Arched_Eyebrows']]\n# att = img_labels[:,ATT_ID['Male']]\n\n\n# train_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))\n# train_dataset = train_dataset.map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n# train_dataset = train_dataset.shuffle(n_examples, seed=0)\n# train_dataset = train_dataset.batch(batch_size, drop_remainder=True)\n# train_dataset = train_dataset.repeat().prefetch(1)\n\n# train_iter = train_dataset.make_one_shot_iterator()\n# batch = train_iter.get_next()\n\n# return batch, int(np.ceil(n_examples/batch_size))\n\ndef data_test(image_path, label_path, batch_size):\n img_names = np.genfromtxt(label_path, dtype=str, usecols=0)\n img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])\n img_labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))\n n_examples = img_names.shape[0]\n labels = img_labels[:,ATT_ID['Arched_Eyebrows']]\n att = img_labels[:,ATT_ID['Male']]\n\n test_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))\n test_dataset = test_dataset.map(load_test, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n test_dataset = test_dataset.batch(batch_size, drop_remainder=False)\n test_dataset = test_dataset.repeat().prefetch(1)\n\n test_iter = test_dataset.make_one_shot_iterator()\n batch = test_iter.get_next()\n\n return batch, int(np.ceil(n_examples/batch_size))\n\n\n\n\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for arithmetic_grammar.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import map\nimport tensorflow.compat.v1 as tf\n\nfrom neural_guided_symbolic_regression.utils import arithmetic_grammar\n\n\nclass ReadGrammarFromFileTest(tf.test.TestCase):\n\n def setUp(self):\n super(ReadGrammarFromFileTest, self).setUp()\n # NLTK grammar use double quotes for production rules.\n # pylint: disable=g-inconsistent-quotes\n self.expected_set = set([\n \"S -> S '+' T\",\n \"S -> S '-' T\",\n \"S -> S '*' T\",\n \"S -> S '/' T\",\n \"S -> T\",\n \"T -> '(' S ')'\",\n \"T -> 'x'\",\n \"T -> '1'\",\n ])\n # pylint: enable=g-inconsistent-quotes\n\n def test_read_grammar_return_grammar(self):\n grammar = arithmetic_grammar.read_grammar_from_file(\n 'third_party/google_research/google_research/'\n 'neural_guided_symbolic_regression/grammar/'\n 'univariate_one_constant_grammar.txt',\n return_list=False)\n production_rules_set = set(map(str, grammar.productions()))\n self.assertEqual(production_rules_set, self.expected_set)\n\n def test_read_grammar_return_list(self):\n grammar = arithmetic_grammar.read_grammar_from_file(\n 'third_party/google_research/google_research/'\n 'neural_guided_symbolic_regression/grammar/'\n 'univariate_one_constant_grammar.txt',\n return_list=True)\n production_rules_set = set(map(str, grammar))\n self.assertEqual(production_rules_set, self.expected_set)\n\n\nclass ArithmeticGrammarTest(parameterized.TestCase, tf.test.TestCase):\n\n def test_input_grammar_rules_not_list(self):\n with self.assertRaisesRegex(ValueError,\n 'The input grammar_rules should be list.'):\n arithmetic_grammar.Grammar('foo')\n\n def test_input_grammar_rules_not_unique(self):\n with self.assertRaisesRegex(ValueError,\n 'The grammar production rules are not unique.'):\n arithmetic_grammar.Grammar(['foo', 'foo'])\n\n def test_input_grammar_rules_contain_padding_dummy_production_rule(self):\n # If dummy production rule exists in the input grammar rules, it will be\n # duplicated with the dummy production rule appended in the\n # arithmetic_grammar.\n with self.assertRaisesRegex(ValueError,\n 'The grammar production rules are not unique.'):\n arithmetic_grammar.Grammar(['foo', 'Nothing -> None'])\n\n def test_input_grammar_rules_not_change(self):\n grammar_rules = ['S -> T', 'T -> \"x\"']\n arithmetic_grammar.Grammar(grammar_rules)\n self.assertListEqual(grammar_rules, ['S -> T', 'T -> \"x\"'])\n\n def test_basic_production_rules(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n\n self.assertLen(grammar.prod_rules, 5)\n self.assertEqual(grammar.num_production_rules, 5)\n self.assertEqual(grammar.padding_rule_index, 4)\n self.assertEqual(grammar.start_index.symbol(), 'S')\n self.assertEqual(str(grammar.start_rule), \"S -> S '+' T\")\n self.assertEqual(grammar.unique_lhs, ['Nothing', 'S', 'T'])\n self.assertEqual(grammar.num_unique_lhs, 3)\n np.testing.assert_allclose(\n grammar.masks,\n [[0., 0., 0., 0., 1.], [1., 1., 0., 0., 0.], [0., 0., 1., 1., 0.]])\n np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,\n [1, 1, 2, 2, 0])\n self.assertEqual(grammar.prod_rule_rhs_indices, [[1, 2], [2], [1], [], []])\n self.assertEqual(grammar.max_rhs_indices_size, 2)\n\n def test_basic_production_rules_add_unique_production_rule_to_start(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(\n grammar_rules, add_unique_production_rule_to_start=True)\n\n self.assertLen(grammar.prod_rules, 6)\n self.assertEqual(grammar.num_production_rules, 6)\n self.assertEqual(grammar.padding_rule_index, 5)\n self.assertEqual(grammar.start_index.symbol(), 'O')\n self.assertEqual(str(grammar.start_rule), 'O -> S')\n self.assertEqual(grammar.unique_lhs, ['Nothing', 'O', 'S', 'T'])\n self.assertEqual(grammar.num_unique_lhs, 4)\n np.testing.assert_allclose(\n grammar.masks,\n [[0., 0., 0., 0., 0., 1.],\n [1., 0., 0., 0., 0., 0.],\n [0., 1., 1., 0., 0., 0.],\n [0., 0., 0., 1., 1., 0.]])\n np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,\n [1, 2, 2, 3, 3, 0])\n self.assertEqual(grammar.prod_rule_rhs_indices,\n [[2], [2, 3], [3], [2], [], []])\n self.assertEqual(grammar.max_rhs_indices_size, 2)\n\n def test_basic_production_rules_padding_at_end_false(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)\n\n self.assertLen(grammar.prod_rules, 5)\n self.assertEqual(grammar.num_production_rules, 5)\n self.assertEqual(grammar.padding_rule_index, 0)\n self.assertEqual(grammar.start_index.symbol(), 'S')\n self.assertEqual(str(grammar.start_rule), \"S -> S '+' T\")\n self.assertEqual(grammar.unique_lhs, ['Nothing', 'S', 'T'])\n self.assertEqual(grammar.num_unique_lhs, 3)\n np.testing.assert_allclose(\n grammar.masks,\n [[1., 0., 0., 0., 0.], [0., 1., 1., 0., 0.], [0., 0., 0., 1., 1.]])\n np.testing.assert_allclose(grammar.prod_rule_index_to_lhs_index,\n [0, 1, 1, 2, 2])\n self.assertEqual(grammar.prod_rule_rhs_indices, [[], [1, 2], [2], [1], []])\n self.assertEqual(grammar.max_rhs_indices_size, 2)\n\n @parameterized.parameters([\n (True, True, \"\\t0: S -> T\\n\\t1: T -> 'x'\\n\\t2: Nothing -> None\\n\"),\n (True, False, \"0: S -> T\\n1: T -> 'x'\\n2: Nothing -> None\\n\"),\n (False, True, \"\\t0: Nothing -> None\\n\\t1: S -> T\\n\\t2: T -> 'x'\\n\"),\n (False, False, \"0: Nothing -> None\\n1: S -> T\\n2: T -> 'x'\\n\"),\n ])\n def test_grammar_to_string(self, padding_at_end, indent, expected_string):\n grammar_rules = [\n 'S -> T',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(\n grammar_rules, padding_at_end=padding_at_end)\n\n self.assertEqual(grammar.grammar_to_string(indent=indent), expected_string)\n\n def test_invalid_grammar_string_no_space_before_arrow(self):\n with self.assertRaisesRegex(ValueError, 'Unable to parse'):\n # No space between arrow and left hand side symbol.\n arithmetic_grammar.Grammar(['a-> b'])\n\n def test_invalid_grammar_string_no_space_after_arrow(self):\n # No space between arrow and right hand side symbol.\n # This is a valid input and should not raise error.\n arithmetic_grammar.Grammar(['a ->b'])\n\n def test_invalid_grammar_string_no_arrow(self):\n with self.assertRaisesRegex(ValueError, 'Unable to parse'):\n # Invalid input with no arrow.\n arithmetic_grammar.Grammar(['a b'])\n\n def test_invalid_grammar_string_two_left_hand_side_symbols(self):\n with self.assertRaisesRegex(ValueError, 'Unable to parse'):\n # Invalid input with more than one left hand side symbol.\n arithmetic_grammar.Grammar(['a b -> c'])\n\n def test_invalid_grammar_string_no_left_hand_side_symbol(self):\n with self.assertRaisesRegex(ValueError, 'Unable to parse'):\n # Invalid input with no left hand side symbol.\n arithmetic_grammar.Grammar([' -> c'])\n\n def test_invalid_grammar_string_empty_right_hand_side_symbol(self):\n # No right hand side symbol.\n # This is a valid input and should not raise error.\n arithmetic_grammar.Grammar(['a -> '])\n\n def test_parse_expressions_to_indices_sequences_input_not_list(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n\n with self.assertRaisesRegex(\n ValueError, 'expression_strings is expected to be list, but got'):\n grammar.parse_expressions_to_indices_sequences(\n # Note the input expression_strings is a string not a list of strings.\n expression_strings='x + ( x )',\n max_length=8\n )\n\n def test_parse_expressions_to_indices_sequences_short_max_length(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n\n with self.assertRaisesRegex(\n ValueError,\n r'The number of production rules to parse expression .* '\n 'can not be greater than max_length'):\n grammar.parse_expressions_to_indices_sequences(\n expression_strings=['x + ( x )'],\n max_length=2\n )\n\n def test_parse_expressions_to_indices_sequences_invalid_expression_string(\n self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n with self.assertRaisesRegex(\n ValueError, 'cannot be parsed to production rules'):\n grammar.parse_expressions_to_indices_sequences(\n expression_strings=['x x'],\n max_length=8\n )\n\n def test_grammar_with_callables(self):\n grammar_rules = [\n 'S -> S \"+\" S', # index 0\n 'S -> S \"-\" S', # index 1\n 'S -> \"FUNCTION1(\" P \")\"', # index 2\n 'P -> T', # index 3\n 'P -> \"1\" \"+\" T', # index 4\n 'S -> T', # index 5\n 'T -> \"FUNCTION2(\" \"x\" \",\" \"c\" \")\"', # index 6\n ] # padding rule index 7\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n indices_sequences = grammar.parse_expressions_to_indices_sequences(\n expression_strings=[\n 'FUNCTION1( FUNCTION2( x , c ) ) - '\n 'FUNCTION2( x , c ) + FUNCTION2( x , c )'],\n max_length=10\n )\n np.testing.assert_equal(\n indices_sequences,\n [\n # Preorder traversal of parsing tree.\n # S\n # |\n # S '+' S\n # | |\n # S '-' S T\n # | | |\n # 'FUNCTION1(' P ')' T 'FUNCTION2( x , c )'\n # | |\n # T 'FUNCTION2( x , c )'\n # |\n # 'FUNCTION2( x , c )'\n [\n 0, # 'S -> S \"+\" S'\n 1, # 'S -> S \"-\" S'\n 2, # 'S -> \"FUNCTION1(\" P \")\"'\n 3, # 'P -> T'\n 6, # 'T -> \"FUNCTION2(\" \"x\" \",\" \"c\" \")\"'\n 5, # 'S -> T'\n 6, # 'T -> \"FUNCTION2(\" \"x\" \",\" \"c\" \")\"'\n 5, # 'S -> T'\n 6, # 'T -> \"FUNCTION2(\" \"x\" \",\" \"c\" \")\"'\n 7, # Padding dummy production rule.\n ]\n ]\n )\n\n def test_parse_expressions_to_indices_sequences(self):\n grammar_rules = [\n 'S -> S \"+\" T', # index 0\n 'S -> T', # index 1\n 'T -> \"(\" S \")\"', # index 2\n 'T -> \"x\"', # index 3\n ] # padding rule index 4\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n indices_sequences = grammar.parse_expressions_to_indices_sequences(\n expression_strings=['x + ( x )'],\n max_length=8\n )\n\n np.testing.assert_equal(\n indices_sequences,\n [\n # Expression string: 'x + ( x )'\n # Preorder traversal of parsing tree.\n # S\n # |\n # S '+' T\n # | |\n # T '(' S ')'\n # | |\n # 'x' 'x'\n [\n 0, # 'S -> S \"+\" T'\n 1, # 'S -> T'\n 3, # 'T -> \"x\"'\n 2, # 'T -> \"(\" S \")\"'\n 1, # 'S -> T'\n 3, # 'T -> \"x\"'\n 4, # Padding dummy production rule.\n 4, # Padding dummy production rule.\n ]\n ]\n )\n\n def test_parse_expressions_to_indices_sequences_padding_at_end_false(self):\n grammar_rules = [\n 'S -> S \"+\" T', # index 1\n 'S -> T', # index 2\n 'T -> \"(\" S \")\"', # index 3\n 'T -> \"x\"', # index 4\n ] # padding rule index 0\n\n grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)\n indices_sequences = grammar.parse_expressions_to_indices_sequences(\n expression_strings=['x + ( x )'],\n max_length=8\n )\n\n np.testing.assert_equal(\n indices_sequences,\n [\n # Expression string: 'x + ( x )'\n # Preorder traversal of parsing tree.\n # S\n # |\n # S '+' T\n # | |\n # T '(' S ')'\n # | |\n # 'x' 'x'\n [\n 1, # 'S -> S \"+\" T'\n 2, # 'S -> T'\n 4, # 'T -> \"x\"'\n 3, # 'T -> \"(\" S \")\"'\n 2, # 'S -> T'\n 4, # 'T -> \"x\"'\n 0, # Padding dummy production rule.\n 0, # Padding dummy production rule.\n ]\n ]\n )\n\n def test_parse_expressions_to_indices_sequences_pad_front_unique_start(self):\n grammar_rules = [\n 'S -> S \"+\" T', # index 2\n 'S -> T', # index 3\n 'T -> \"(\" S \")\"', # index 4\n 'T -> \"x\"', # index 5\n ] # padding rule index 0\n # 'O -> S' will be added with index 1.\n\n grammar = arithmetic_grammar.Grammar(\n grammar_rules,\n padding_at_end=False,\n add_unique_production_rule_to_start=True)\n indices_sequences = grammar.parse_expressions_to_indices_sequences(\n expression_strings=['x + ( x )'],\n max_length=8\n )\n\n np.testing.assert_equal(\n indices_sequences,\n [\n # Expression string: 'x + ( x )'\n # Preorder traversal of parsing tree.\n # O\n # |\n # S\n # |\n # S '+' T\n # | |\n # T '(' S ')'\n # | |\n # 'x' 'x'\n [\n 1, # 'O -> S'\n 2, # 'S -> S \"+\" T'\n 3, # 'S -> T'\n 5, # 'T -> \"x\"'\n 4, # 'T -> \"(\" S \")\"'\n 3, # 'S -> T'\n 5, # 'T -> \"x\"'\n 0, # Padding dummy production rule.\n ]\n ]\n )\n\n def test_parse_expressions_to_tensor(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules)\n\n expression_tensor = grammar.parse_expressions_to_tensor(\n expression_strings=['x + ( x )'],\n max_length=8\n )\n\n np.testing.assert_allclose(\n expression_tensor,\n [\n # Expression string: 'x + ( x )'\n # Preorder traversal of parsing tree.\n # S\n # |\n # S '+' T\n # | |\n # T '(' S ')'\n # | |\n # 'x' 'x'\n [\n [1., 0., 0., 0., 0.], # 'S -> S \"+\" T'\n [0., 1., 0., 0., 0.], # 'S -> T'\n [0., 0., 0., 1., 0.], # 'T -> \"x\"'\n [0., 0., 1., 0., 0.], # 'T -> \"(\" S \")\"'\n [0., 1., 0., 0., 0.], # 'S -> T'\n [0., 0., 0., 1., 0.], # 'T -> \"x\"'\n [0., 0., 0., 0., 1.], # Padding dummy production rule.\n [0., 0., 0., 0., 1.], # Padding dummy production rule.\n ]\n ]\n )\n\n def test_parse_expressions_to_tensor_padding_at_end_false(self):\n grammar_rules = [\n 'S -> S \"+\" T',\n 'S -> T',\n 'T -> \"(\" S \")\"',\n 'T -> \"x\"',\n ]\n\n grammar = arithmetic_grammar.Grammar(grammar_rules, padding_at_end=False)\n\n expression_tensor = grammar.parse_expressions_to_tensor(\n expression_strings=['x + ( x )'],\n max_length=8\n )\n\n np.testing.assert_allclose(\n expression_tensor,\n [\n # Expression string: 'x + ( x )'\n # Preorder traversal of parsing tree.\n # S\n # |\n # S '+' T\n # | |\n # T '(' S ')'\n # | |\n # 'x' 'x'\n [\n [0., 1., 0., 0., 0.], # 'S -> S \"+\" T'\n [0., 0., 1., 0., 0.], # 'S -> T'\n [0., 0., 0., 0., 1.], # 'T -> \"x\"'\n [0., 0., 0., 1., 0.], # 'T -> \"(\" S \")\"'\n [0., 0., 1., 0., 0.], # 'S -> T'\n [0., 0., 0., 0., 1.], # 'T -> \"x\"'\n [1., 0., 0., 0., 0.], # Padding dummy production rule.\n [1., 0., 0., 0., 0.], # Padding dummy production rule.\n ]\n ]\n )\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Binary that runs inference on a pre-trained cost model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow.compat.v1 as tf\n\nfrom tunas import mobile_cost_model\nfrom tunas import mobile_search_space_v3\nfrom tunas import search_space_utils\n\nflags.DEFINE_string(\n 'indices', '',\n 'Colon-separated list of integers specifying the network architecture '\n 'to evaluate.')\nflags.DEFINE_string(\n 'ssd', mobile_search_space_v3.PROXYLESSNAS_SEARCH,\n 'Search space definition to use.')\n\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n indices = search_space_utils.parse_list(FLAGS.indices, int)\n ssd = FLAGS.ssd\n cost = mobile_cost_model.estimate_cost(indices, ssd)\n print('estimated cost: {:f}'.format(cost))\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.ERROR)\n tf.disable_v2_behavior()\n tf.app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Implements data augmentations for cifar10/cifar100.\"\"\"\n\nfrom typing import Dict\nfrom absl import flags\nimport tensorflow as tf\nfrom flax_models.cifar.datasets import auto_augment\n\n\nFLAGS = flags.FLAGS\n\n\nflags.DEFINE_integer('cutout_length', 16,\n 'Length (in pixels) of the cutout patch. Default value of '\n '16 is used to get SOTA on cifar10/cifar100')\n\n\ndef weak_image_augmentation(example,\n random_crop_pad = 4):\n \"\"\"Applies random crops and horizontal flips.\n\n Simple data augmentations that are (almost) always used with cifar. Pad the\n image with `random_crop_pad` before randomly cropping it to its original\n size. Also randomly apply horizontal flip.\n\n Args:\n example: An example dict containing an image and a label.\n random_crop_pad: By how many pixels should the image be padded on each side\n before cropping.\n\n Returns:\n An example with the same label and an augmented version of the image.\n \"\"\"\n image, label = example['image'], example['label']\n image = tf.image.random_flip_left_right(image)\n image_shape = tf.shape(image)\n image = tf.pad(\n image, [[random_crop_pad, random_crop_pad],\n [random_crop_pad, random_crop_pad], [0, 0]],\n mode='REFLECT')\n image = tf.image.random_crop(image, image_shape)\n return {'image': image, 'label': label}\n\n\ndef auto_augmentation(example,\n dataset_name):\n \"\"\"Applies the AutoAugment policy found for the dataset.\n\n AutoAugment: Learning Augmentation Policies from Data\n https://arxiv.org/abs/1805.09501\n\n Args:\n example: An example dict containing an image and a label.\n dataset_name: Name of the dataset for which we should return the optimal\n policy.\n\n Returns:\n An example with the same label and an augmented version of the image.\n \"\"\"\n image, label = example['image'], example['label']\n image = auto_augment.get_autoaugment_fn(dataset_name)(image)\n return {'image': image, 'label': label}\n\n\ndef cutout(batch):\n \"\"\"Applies cutout to a batch of images.\n\n The cut out patch will be replaced by zeros (thus the batch should be\n normalized before cutout is applied).\n\n Reference:\n Improved Regularization of Convolutional Neural Networks with Cutout\n https://arxiv.org/abs/1708.04552\n\n Implementation inspired by:\n third_party/cloud_tpu/models/efficientnet/autoaugment.py\n\n Args:\n batch: A batch of images and labels.\n\n Returns:\n The same batch where cutout has been applied to the images.\n \"\"\"\n length, replace = FLAGS.cutout_length, 0.0\n images, labels = batch['image'], batch['label']\n num_channels = tf.shape(images)[3]\n image_height, image_width = tf.shape(images)[1], tf.shape(images)[2]\n\n cutout_center_height = tf.random.uniform(\n shape=[], minval=0, maxval=image_height,\n dtype=tf.int32)\n cutout_center_width = tf.random.uniform(\n shape=[], minval=0, maxval=image_width,\n dtype=tf.int32)\n\n lower_pad = tf.maximum(0, cutout_center_height - length // 2)\n upper_pad = tf.maximum(0, image_height - cutout_center_height - length // 2)\n left_pad = tf.maximum(0, cutout_center_width - length // 2)\n right_pad = tf.maximum(0, image_width - cutout_center_width - length // 2)\n\n cutout_shape = [image_height - (lower_pad + upper_pad),\n image_width - (left_pad + right_pad)]\n\n padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]\n\n mask = tf.pad(\n tf.zeros(cutout_shape, dtype=images.dtype),\n padding_dims, constant_values=1)\n\n patch = tf.ones_like(images, dtype=images.dtype) * replace,\n\n mask = tf.expand_dims(mask, -1)\n mask = tf.tile(mask, [1, 1, num_channels])\n\n images = tf.where(\n tf.equal(mask, 0),\n patch,\n images)\n\n images = tf.squeeze(images, axis=0)\n\n return {'image': images, 'label': labels}\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for data processing in struct2depth readers.\"\"\"\n\n\n\nfrom typing import Text\n\nimport tensorflow.compat.v1 as tf\n\n\ndef read_image_as_float_tensor(image_filepath):\n \"\"\"Returns a 3-channel float Tensor of the specified image.\n\n Args:\n image_filepath: A local valid filepath. Supported types BMP, GIF (only\n first image is taken from an animated GIF), JPEG, and PNG,\n \"\"\"\n # Read, decode, and normalize images.\n encoded_image = tf.io.read_file(image_filepath)\n # decode_image supports BMP, GIF, JPEG, or PNG by calling the appropriate\n # format decoding method. All decode_bmp, decode_jpeg and decode_png return\n # 3-D arrays [height, width, num_channels].\n # By default, decode_gif returns a 4-D array [num_frames, height, width, 3],\n # expand_animations=False will truncate animated GIF files to the first frame.\n # Thereby enforces a tensor rank=3.\n # Channels=3 deals with 'RGBA' format PNG by dropping the transparency mask.\n decoded_image = tf.image.decode_image(encoded_image, channels=3,\n expand_animations=False)\n # Help 'tf.keras.initializers.*' to infer shape.\n decoded_image.set_shape([None, None, 3])\n # Scaling is performed appropriately before casting.\n decoded_image = tf.image.convert_image_dtype(decoded_image, dtype=tf.float32)\n # Decoded_image range is [0.0, 1.0].\n return decoded_image\n\n\ndef read_image_grayscale(image_filepath):\n \"\"\"Returns a 1-channel uint8 Tensor of the specified image.\n\n Args:\n image_filepath: A local valid filepath.\n \"\"\"\n # Read, decode, and normalize images.\n image_string = tf.io.read_file(image_filepath)\n # Decode_image might return a 4-dimensional shape.\n image_grayscale = tf.image.decode_image(image_string, channels=1)\n # Enforces a 3-dimensional shape.\n image_grayscale.set_shape([None, None, 1])\n return image_grayscale\n\n\ndef read_image_validity_mask(image_filepath):\n \"\"\"Returns a 1-channel binary Tensor(int32) of the specified image.\n\n Args:\n image_filepath: A local valid filepath.\n \"\"\"\n validity_mask_uint8 = read_image_grayscale(image_filepath)\n # TPU does not support uint8-images, thus validity_mask is re-encoded as\n # int32-image.\n validity_mask_int = tf.cast(validity_mask_uint8, dtype=tf.int32)\n # validity_mask are used to compute the loss in valid pixels only.\n # validity_mask is converted to binary {0, 1} values to allow:\n # valid_loss_per_pixel = loss_per_pixel * validity_mask.\n validity_mask_int = tf.math.minimum(validity_mask_int, 1)\n return validity_mask_int\n\n\ndef crop_egomotion(egomotion, offset_height, offset_width, target_height,\n target_width):\n \"\"\"Transforms camera egomotion when the image is cropped.\n\n Args:\n egomotion: a 2-d transformation matrix.\n offset_height: amount of offset in y direction.\n offset_width: amount of offset in x direction.\n target_height: target height of images.\n target_width: target width of images.\n\n Returns:\n A 2-d transformation matrix.\n \"\"\"\n del offset_height, offset_width, target_height, target_width # unused\n return egomotion\n\n\ndef crop_intrinsics(intrinsics, offset_height, offset_width, target_height,\n target_width):\n \"\"\"Crops camera intrinsics based on target image dimensions and offset.\n\n Args:\n intrinsics: 1-d array containing w, h, fx, fy, x0, y0.\n offset_height: amount of offset in y direction.\n offset_width: amount of offset in x direction.\n target_height: target height of images.\n target_width: target width of images.\n\n Returns:\n A 1-d tensor containing the adjusted camera intrinsics.\n \"\"\"\n with tf.name_scope('crop_intrinsics'):\n w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)\n\n x0 -= tf.cast(offset_width, tf.float32)\n y0 -= tf.cast(offset_height, tf.float32)\n\n w = tf.cast(target_width, tf.float32)\n h = tf.cast(target_height, tf.float32)\n\n return tf.stack((w, h, fx, fy, x0, y0))\n\n\ndef crop_image(image, offset_height, offset_width, target_height, target_width):\n \"\"\"Crops an image represented as a tensor.\n\n Args:\n image: an image represented as a (height, wdith, channels)-tensor.\n offset_height: amount of offset in y direction.\n offset_width: amount of offset in x direction.\n target_height: target height of images.\n target_width: target width of images.\n\n Returns:\n A cropped image represented as a (height, width, channels)-tensor.\n\n Raises:\n ValueError: Image tensor has incorrect rank.\n \"\"\"\n with tf.name_scope('crop_image'):\n if image.shape.rank != 3:\n raise ValueError('Rank of endpoint is %d. Must be 3.' %\n (image.shape.rank))\n out_img = tf.image.crop_to_bounding_box(image, offset_height, offset_width,\n target_height, target_width)\n return out_img\n\n\ndef resize_egomotion(egomotion, target_size):\n \"\"\"Transforms camera egomotion when the image is resized.\n\n Args:\n egomotion: a 2-d transformation matrix.\n target_size: target size, a tuple of (height, width).\n\n Returns:\n A 2-d transformation matrix.\n \"\"\"\n del target_size # unused\n return egomotion\n\n\ndef resize_intrinsics(intrinsics, target_size):\n \"\"\"Transforms camera intrinsics when image is resized.\n\n Args:\n intrinsics: 1-d array containing w, h, fx, fy, x0, y0.\n target_size: target size, a tuple of (height, width).\n\n Returns:\n A 1-d tensor containing the adjusted camera intrinsics.\n \"\"\"\n with tf.name_scope('resize_intrinsics'):\n w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)\n\n def float_div(a, b):\n return tf.cast(a, tf.float32) / tf.cast(b, tf.float32)\n\n xfactor = float_div(target_size[1], w)\n yfactor = float_div(target_size[0], h)\n fx *= xfactor\n fy *= yfactor\n x0 *= xfactor\n y0 *= yfactor\n w = target_size[1]\n h = target_size[0]\n\n return tf.stack((w, h, fx, fy, x0, y0))\n\n\ndef resize_area(image, size):\n \"\"\"Resizes an image represented as a tensor using the area method.\n\n Args:\n image: an image represented as a (height, width, channels)-tensor.\n size: A tuple ot two integers, the target (height, width).\n\n Returns:\n An image represented as a (height, wdith, channels)-tensor.\n \"\"\"\n return _apply_on_one_image(tf.image.resize_area, image, size)\n\n\ndef resize_nearest_neighbor(image, size):\n \"\"\"Resizes an image represented as a tensor using the nearest neighbor method.\n\n Args:\n image: an image represented as a (height, width, channels)-tensor.\n size: A tuple ot two integers, the target (height, width).\n\n Returns:\n An image represented as a (height, wdith, channels)-tensor.\n \"\"\"\n\n return _apply_on_one_image(tf.image.resize_nearest_neighbor, image, size)\n\n\ndef flip_egomotion(egomotion):\n \"\"\"Transforms camera egomotion when the image is flipped horizontally.\n\n The intrinsics matrix is ((fx, 0, x0), (0, fy, y0), (0, 0, 1)).\n Given a pixel (px, py, 1), the x coordinate is x = px * fx + 1.\n Now what if we flip the image along x? This maps px to w - 1 - px,\n where w is the image width. Therefore for the flipped image,\n we have x' = (w - px - 1) * fx + 1.\n Therefore x' = -x + (w - 1 - 2 * x0) / fx,\n if x0 = ((w - 1) / 2), that is, if the optical center is exactly at\n the center of the image, then indeed x' = -x, so we can just flip x.\n Otherwise there is a correction which is inrinsics-dependent:\n we'd have to add a small translation component to flip_mat, but we ignore\n this small correction for now.\n Args:\n egomotion: a 2-d transformation matrix.\n\n Returns:\n A 2-d transformation matrix.\n \"\"\"\n with tf.name_scope('flip_egomotion'):\n flip_mat = tf.constant(\n [[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],\n dtype=tf.float32)\n egomotion = tf.matmul(tf.matmul(flip_mat, egomotion), flip_mat)\n return egomotion\n\n\ndef flip_intrinsics(intrinsics):\n \"\"\"Flips camera intrinsics when the image is flipped horizontally.\n\n Args:\n intrinsics: 1-d array containing w, h, fx, fy, x0, y0.\n\n Returns:\n A 1-d tensor containing the adjusted camera intrinsics.\n \"\"\"\n with tf.name_scope('flip_intrinsics'):\n w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)\n x0 = w - x0\n y0 = h - y0\n\n return tf.stack((w, h, fx, fy, x0, y0))\n\n\ndef flip_left_right(image):\n \"\"\"Horizontally flips an image (left/right) represented as a tensor.\n\n Args:\n image: an image represented as a (height, wdith, channels)-tensor.\n\n Returns:\n A flipped image represented as a (height, wdith, channels)-tensor.\n \"\"\"\n return _apply_on_one_image(tf.image.flip_left_right, image)\n\n\ndef _apply_on_one_image(fn, image, *args, **kwargs):\n \"\"\"Makes a function that acts on one image (out of one that acts on a batch).\n\n Args:\n fn: A function that receives a batch of images as a first argument (rank 4),\n and other args and kwargs.\n image: A tensor of rank 3 (height, width, channels) representing an image.\n *args: Arguments to pass to fn\n **kwargs: Keyword arguments to pass to fn\n\n Returns:\n The result of `fn` when applied on `image`, after adding a batch dimension\n to `image` and removing it from the result.\n \"\"\"\n\n with tf.name_scope('apply_on_one_image'):\n image = tf.convert_to_tensor(image)\n if image.shape.rank != 3:\n raise ValueError('Rank of endpoint is %d. Must be 3.' %\n image.shape.rank)\n\n out_image = tf.expand_dims(image, axis=0)\n out_image = fn(out_image, *args, **kwargs)\n return tf.squeeze(out_image, axis=0)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library for analysis of evacuation simulation results.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import flags\nimport matplotlib.pylab as pylab\nimport numpy as np\nimport sumolib\n\nfrom simulation_research.traffic import file_util\nfrom simulation_research.traffic import map_visualizer\nfrom simulation_research.traffic import random_traffic_generator\nfrom simulation_research.traffic import simulation_data_parser\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('random_seed', '369',\n 'random seed for the simulation.')\n\n_DEMANDS = 'demands'\n\n\nclass EvacuationSimulationAnalyzer(object):\n \"\"\"A convenience class for analyzing results of evacuation simulation.\n\n This is a one-off for facilitating the analysis for a specific paper resulting\n from the internship work of albertyuchen@. If it is used more generally, it\n should be refactored and tested.\n \"\"\"\n\n def __init__(self, output_dir, sumo_net_file):\n self._output_dir = output_dir\n self._sumo_net_file = sumo_net_file\n\n def generate_evacuation_taz_demands(\n self, residential_car_density, serving_car_density,\n demand_mean_hours,\n demand_stddev_hours,\n population_portion):\n \"\"\"Generates evacuation TAZ demands.\"\"\"\n\n # TODO(yusef): Fix map + total number of cars.\n # To make the demands consistent, use the default map, paradise_type.net.xml\n # as the input map instead of the reversed. For Paradise map, an easy way to\n # check is that the total number of cars is 11072.\n net = sumolib.net.readNet(self._sumo_net_file)\n traffic_generator = random_traffic_generator.RandomTrafficGenerator(net)\n visualizer = map_visualizer.MapVisualizer(net)\n\n print(\n 'Generating TAZ demands with STD: ', demand_stddev_hours,\n ' Portion: ', population_portion)\n\n # Demands from residential roads.\n residential_edge_type = ['highway.residential']\n residential_edges = net.filterEdges(residential_edge_type)\n demand_mean_seconds = demand_mean_hours * 60 * 60\n demand_stddev_seconds = demand_stddev_hours * 60 * 60\n time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(\n demand_mean_seconds, demand_stddev_seconds)\n car_per_meter_residential = residential_car_density * population_portion\n\n np.random.seed(FLAGS.random_seed)\n residential = traffic_generator.create_evacuation_auto_routing_demands(\n residential_edges,\n time_sampler_parameters,\n car_per_meter_residential)\n\n # Demands from parking roads.\n parking_edge_type = ['highway.service']\n parking_edges = net.filterEdges(parking_edge_type)\n time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(\n demand_mean_seconds, demand_stddev_seconds)\n car_per_meter_parking = serving_car_density * population_portion\n\n parking = traffic_generator.create_evacuation_auto_routing_demands(\n parking_edges,\n time_sampler_parameters,\n car_per_meter_parking)\n\n all_demands = residential + parking\n departure_time_points = [x.time for x in all_demands]\n cars_per_time_point = [x.num_cars for x in all_demands]\n departure_time_points = np.array(departure_time_points) / 3600\n print('TAZ demands. Total vehicles: ', sum(cars_per_time_point))\n\n # TODO(yusef): reconcile.\n demands_dir = os.path.join(self._output_dir, _DEMANDS)\n file_util.f_makedirs(demands_dir)\n output_hist_figure_path = os.path.join(\n demands_dir,\n 'departure_time_histogram_taz_std_%s_portion_%s.pdf' %\n (demand_stddev_hours, population_portion))\n output_cumulative_figure_path = os.path.join(\n demands_dir,\n 'departure_time_cumulative_taz_std_%s_portion_%s.pdf' %\n (demand_stddev_hours, population_portion))\n pkl_file = os.path.join(\n demands_dir,\n 'demands_taz_tuple_std_%s_portion_%s.pkl' %\n (demand_stddev_hours, population_portion))\n routes_file = os.path.join(\n demands_dir,\n 'demands_taz_std_%s_portion_%s.rou.xml' %\n (demand_stddev_hours, population_portion))\n\n # Output the demand xml file.\n visualizer.plot_demands_departure_time(\n departure_time_points,\n cars_per_time_point,\n output_hist_figure_path=output_hist_figure_path,\n output_cumulative_figure_path=output_cumulative_figure_path)\n file_util.save_variable(pkl_file, all_demands)\n exit_taz = 'exit_taz'\n traffic_generator.write_evacuation_vehicle_auto_routing_demands(\n all_demands, exit_taz, routes_file)\n\n def compare_demands_difference(self):\n \"\"\"Compared the differences between demands and evacuations.\"\"\"\n x = file_util.load_variable(\n 'demands/demands_shortest_path_tuple_std_1.5.pkl')\n y = file_util.load_variable('demands/demands_taz_tuple_std_1.5.pkl')\n\n # x = [(a.origin, a.num_cars) for a in x]\n # y = [(a.origin, a.num_cars) for a in y]\n x_cars = [a.num_cars for a in x]\n y_cars = [a.num_cars for a in y]\n print(sum(x_cars), sum(y_cars))\n x = [(a.origin, a.num_cars, a.time) for a in x]\n y = [(a.origin, a.num_cars, a.time) for a in y]\n x = set(x)\n y = set(y)\n print(len(x), len(y))\n print(x.issubset(y))\n\n common = x.intersection(y)\n print(len(common))\n x_ = x.difference(common)\n y_ = y.difference(common)\n print(x_)\n print(y_)\n\n def generate_evacuation_shortest_path_demands(\n self, residential_car_density, serving_car_density,\n evacuation_edges, demand_mean_hours, demand_stddev_hours,\n population_portion):\n \"\"\"Generates evacuation demands.\"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n traffic_generator = random_traffic_generator.RandomTrafficGenerator(net)\n visualizer = map_visualizer.MapVisualizer(net)\n\n print('Generating TAZ demands with STD: ', demand_stddev_hours,\n ' Portion: ', population_portion)\n\n # Calculate the distance to the evacuation exits.\n evacuation_path_trees = {}\n evacuation_path_length = {}\n for exit_edge in evacuation_edges:\n evacuation_path_trees[exit_edge], evacuation_path_length[exit_edge] = (\n net.getRestrictedShortestPathsTreeToEdge(exit_edge))\n\n # Demands from residential roads.\n residential_edge_type = ['highway.residential']\n residential_edges = net.filterEdges(residential_edge_type)\n demand_mean_seconds = demand_mean_hours * 60 * 60\n demand_stddev_seconds = demand_stddev_hours * 60 * 60\n time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(\n demand_mean_seconds, demand_stddev_seconds)\n car_per_meter_residential = residential_car_density * population_portion\n\n np.random.seed(FLAGS.random_seed)\n residential = traffic_generator.create_evacuation_shortest_path_demands(\n residential_edges,\n time_sampler_parameters,\n car_per_meter_residential,\n evacuation_edges,\n evacuation_path_trees,\n evacuation_path_length)\n\n # Demands from parking roads.\n parking_edge_type = ['highway.service']\n parking_edges = net.filterEdges(parking_edge_type)\n time_sampler_parameters = random_traffic_generator.TimeSamplerGammaMeanStd(\n demand_mean_seconds, demand_stddev_seconds)\n car_per_meter_parking = serving_car_density * population_portion\n\n parking = traffic_generator.create_evacuation_shortest_path_demands(\n parking_edges,\n time_sampler_parameters,\n car_per_meter_parking,\n evacuation_edges,\n evacuation_path_trees,\n evacuation_path_length)\n\n all_demands = residential + parking\n departure_time_points = [x.time for x in all_demands]\n cars_per_time_point = [x.num_cars for x in all_demands]\n departure_time_points = np.array(departure_time_points) / 3600\n print('Shortest path demands. Total vehicles: ', sum(cars_per_time_point))\n\n # Output the demand xml file.\n demands_dir = os.path.join(self._output_dir, _DEMANDS)\n file_util.f_makedirs(demands_dir)\n output_hist_figure_path = os.path.join(\n demands_dir,\n 'departure_time_histogram_shortest_path_std_%s_portion_%s.pdf' %\n (demand_stddev_hours, population_portion))\n output_cumulative_figure_path = os.path.join(\n demands_dir,\n 'departure_time_cumulative_shortest_path_std_%s_portion_%s.pdf' %\n (demand_stddev_hours, population_portion))\n pkl_file = os.path.join(\n demands_dir,\n 'demands_shortest_path_tuple_std_%s_portion_%s.pkl' %\n (demand_stddev_hours, population_portion))\n routes_file = os.path.join(\n demands_dir,\n 'demands_shortest_path_std_%s_portion_%s.rou.xml' %\n (demand_stddev_hours, population_portion))\n\n visualizer.plot_demands_departure_time(\n departure_time_points,\n cars_per_time_point,\n output_hist_figure_path=output_hist_figure_path,\n output_cumulative_figure_path=output_cumulative_figure_path)\n file_util.save_variable(pkl_file, all_demands)\n traffic_generator.write_evacuation_vehicle_path_demands(\n all_demands, routes_file)\n\n def parse_fcd_results_single_file(self, hours):\n \"\"\"Extract the data then save to file.\"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n data_parser = simulation_data_parser.SimulationDataParser()\n plot_edges = net.getEdges()\n\n fcd_file = os.path.join(self._output_dir, 'traffic.fcd.xml')\n output_folder = os.path.join(self._output_dir, 'trajectory/')\n if not file_util.f_exists(output_folder):\n file_util.f_mkdir(output_folder)\n\n time_segment_length_seconds = hours * 3600\n time_range_seconds = [0, 3600 * 12]\n data_parser.save_batch_edge_id_to_trajectory(\n fcd_file, plot_edges,\n time_range=time_range_seconds,\n time_segment_length=time_segment_length_seconds,\n parse_time_step=10, output_folder=output_folder)\n\n def parse_fcd_results_multiple_files(self):\n \"\"\"Extract the data then save to file.\"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n data_parser = simulation_data_parser.SimulationDataParser()\n plot_edges = net.getEdges()\n\n fcd_file_folder = 'output/fcd_segments/'\n # fcd_file_list = os.listdir(fcd_file_folder)\n fcd_file_list = ['traffic.segment_2.fcd.xml']\n\n output_folder = os.path.join(self._output_dir, 'trajectory/')\n for fcd_file in fcd_file_list:\n print('Analyzing file: ', fcd_file)\n # time_segment_length = 0.5 * 3600\n time_segment_length = None\n # time_range = [0, 3600*12]\n time_range = None\n\n data_parser.save_batch_edge_id_to_trajectory(\n os.path.join(fcd_file_folder, fcd_file),\n plot_edges,\n time_range=time_range, time_segment_length=time_segment_length,\n parse_time_step=10, output_folder=output_folder)\n\n def visualize_fcd_on_map(self):\n \"\"\"Plot metric maps.\n\n Pay attention to the map.\n \"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n visualizer = map_visualizer.MapVisualizer(net)\n plot_edges = net.getEdges()\n\n trajectory_folder = os.path.join(self._output_dir, 'trajectory/')\n output_folder = os.path.join(trajectory_folder, 'trajectory_fig/')\n if not file_util.f_exists(output_folder):\n file_util.f_mkdir(output_folder)\n\n trajectory_file_list = os.listdir(trajectory_folder)\n # trajectory_file_list = [\n # 'edge_id_to_trajectory_9000_10800.pkl']\n\n for trajectory_file in trajectory_file_list:\n if not trajectory_file.endswith('.pkl'):\n continue\n trajectory_pkl_file = os.path.join(trajectory_folder, trajectory_file)\n print('Loading file: ', trajectory_pkl_file)\n edge_id_to_trajectory = file_util.load_variable(trajectory_pkl_file)\n print('Time range: ', edge_id_to_trajectory['time_interval'])\n output_figure_path = (output_folder + 'speed_map_%s_%s.pdf' %\n (int(edge_id_to_trajectory['time_interval'][0]),\n int(edge_id_to_trajectory['time_interval'][1])))\n\n visualizer.plot_edge_trajectory_histogram_on_map(\n plot_edges,\n edge_id_to_trajectory,\n output_figure_path=output_figure_path,\n plot_max_speed=13.4112)\n\n def _extract_detector_data(self):\n \"\"\"Extracts detector data form xml files.\"\"\"\n data_parser = simulation_data_parser.SimulationDataParser()\n visualizer = map_visualizer.MapVisualizer()\n\n detector_folder = os.path.join(self._output_dir, 'detector/')\n detector_trajectory_folder = os.path.join(\n detector_folder, 'detector_trajectory/')\n\n if not file_util.exists(detector_trajectory_folder):\n file_util.mkdir(detector_trajectory_folder)\n\n detector_files = os.listdir(detector_folder)\n for detector_file in detector_files:\n if not detector_file.endswith('.xml'):\n continue\n # print('Extract file: ', detector_file)\n output_file = os.path.splitext(detector_file)[0]+'.pkl'\n output_file = os.path.join(detector_trajectory_folder, output_file)\n detector_file = os.path.join(detector_folder, detector_file)\n print('Save file: ', output_file)\n data_parser.get_and_save_detector_data(detector_file, output_file)\n\n # Creates figures for individual detector.\n output_figure_folder = os.path.join(detector_folder, 'detector_fig/')\n if not file_util.f_exists(output_figure_folder):\n file_util.f_mkdir(output_figure_folder)\n visualizer.plot_individual_detector(\n detector_trajectory_folder, output_figure_folder)\n\n # NB: This method and the following seem hard-coded for Paradise.\n def plot_save_detector_data_normal(self):\n \"\"\"Plots detector data.\n\n Paradise evacuation edges:\n '27323694.1622', # Skyway Rd.\n '10293408#4', # Neal Rd.\n '-184839999#0', # Clark Rd.\n '-538864403#0' # Pentz Rd.\n \"\"\"\n self._extract_detector_data()\n\n detector_trajectory_folder = os.path.join(\n self._output_dir, 'detector/detector_trajectory/')\n output_figure_folder = os.path.join(\n self._output_dir, 'detector/detector_fig/')\n if not file_util.exists(output_figure_folder):\n file_util.mkdir(output_figure_folder)\n visualizer = map_visualizer.MapVisualizer()\n\n detector_pkl_files_by_group = [\n [detector_trajectory_folder + 'e1Detector_27323694_0_0.pkl',\n detector_trajectory_folder + 'e1Detector_27323694_1_1.pkl'],\n [detector_trajectory_folder + 'e1Detector_10293408#4_0_2.pkl'],\n [detector_trajectory_folder + 'e1Detector_-184839999#0_0_3.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_1_4.pkl'],\n [detector_trajectory_folder + 'e1Detector_-538864403#0_0_5.pkl']]\n visualizer.plot_detector_flow_density_by_group(\n detector_pkl_files_by_group,\n ['Skyway', 'Neal_Rd', 'Clark_Rd', 'Pentz_Rd'],\n output_figure_folder=output_figure_folder)\n\n # Cumulative vehicle flow.\n detector_pkl_files = [\n detector_trajectory_folder + 'e1Detector_27323694_0_0.pkl',\n detector_trajectory_folder + 'e1Detector_27323694_1_1.pkl',\n detector_trajectory_folder + 'e1Detector_10293408#4_0_2.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_0_3.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_1_4.pkl',\n detector_trajectory_folder + 'e1Detector_-538864403#0_0_5.pkl']\n\n visualizer.plot_detector_arrival_time_by_group(\n detector_pkl_files,\n output_figure_folder)\n\n # NB: This method and the following seem hard-coded for Paradise.\n def plot_save_detector_data_reverse(self):\n \"\"\"Plots detector data.\n\n Paradise evacuation edges:\n '27323694.1622', # Skyway Rd.\n '37625137#0.49' # Skyway Rd reverse.\n '10293408#4', # Neal Rd.\n '-184839999#0', # Clark Rd.\n '-538864403#0' # Pentz Rd.\n \"\"\"\n self._extract_detector_data()\n\n detector_trajectory_folder = os.path.join(\n self._output_dir, 'detector/detector_trajectory/')\n output_figure_folder = os.path.join(\n self._output_dir, 'detector/detector_fig/')\n if not file_util.exists(output_figure_folder):\n file_util.mkdir(output_figure_folder)\n visualizer = map_visualizer.MapVisualizer()\n\n detector_pkl_files_by_group = [\n [detector_trajectory_folder + 'e1Detector_27323694_0_0.pkl',\n detector_trajectory_folder + 'e1Detector_27323694_1_1.pkl',\n detector_trajectory_folder + 'e1Detector_37625137#1_0_6.pkl',\n detector_trajectory_folder + 'e1Detector_37625137#1_1_7.pkl'],\n [detector_trajectory_folder + 'e1Detector_10293408#4_0_2.pkl'],\n [detector_trajectory_folder + 'e1Detector_-184839999#0_0_3.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_1_4.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_2_8.pkl',\n detector_trajectory_folder + 'e1Detector_-184839999#0_3_9.pkl'],\n [detector_trajectory_folder + 'e1Detector_-538864403#0_0_5.pkl',\n detector_trajectory_folder + 'e1Detector_-538864403#0_1_10.pkl']]\n visualizer.plot_detector_flow_density_by_group(\n detector_pkl_files_by_group,\n ['Skyway', 'Neal_Rd', 'Clark_Rd', 'Pentz_Rd'],\n output_figure_folder=output_figure_folder)\n\n # Cumulative vehicle flow.\n detector_pkl_files = [\n 'e1Detector_27323694_0_0.pkl',\n 'e1Detector_27323694_1_1.pkl',\n 'e1Detector_37625137#1_0_6.pkl',\n 'e1Detector_37625137#1_1_7.pkl',\n 'e1Detector_10293408#4_0_2.pkl',\n 'e1Detector_-184839999#0_0_3.pkl',\n 'e1Detector_-184839999#0_1_4.pkl',\n 'e1Detector_-184839999#0_2_8.pkl',\n 'e1Detector_-184839999#0_3_9.pkl',\n 'e1Detector_-538864403#0_0_5.pkl',\n 'e1Detector_-538864403#0_1_10.pkl']\n detector_pkl_files = [os.path.join(detector_trajectory_folder, filename) for\n filename in detector_pkl_files]\n\n visualizer.plot_detector_arrival_time_by_group(\n detector_pkl_files,\n output_figure_folder)\n\n def _analyze_summary_demands_vs_evacuation(self, demand_file,\n summary_file,\n output_dir=None):\n \"\"\"Plot summary vs demands.\"\"\"\n data_parser = simulation_data_parser.SimulationDataParser()\n visualizer = map_visualizer.MapVisualizer()\n\n demands = file_util.load_variable(demand_file)\n sorted_demands = sorted(demands, key=lambda x: x.time)\n demand_time_line = [x.time for x in sorted_demands]\n demand_time_line = np.array(demand_time_line) / 3600\n demand_car_count = [x.num_cars for x in sorted_demands]\n demand_cumulative_values = (\n np.cumsum(demand_car_count) / sum(demand_car_count))\n\n summary = data_parser.parse_summary_file(summary_file)\n summary_time_line = np.array(summary['time']) / 3600\n summary_cumulative_values = (\n np.array(summary['ended']) / sum(demand_car_count))\n\n # Calculate the gap between them.\n gap_area = visualizer.calculate_gap_area_between_cummulative_curves(\n demand_time_line, demand_cumulative_values,\n summary_time_line, summary_cumulative_values)\n\n if not output_dir:\n return (demand_time_line, demand_cumulative_values, summary_time_line,\n summary_cumulative_values, gap_area)\n\n # Plot demands v.s. evacuation.\n fig = pylab.figure(figsize=(8, 6))\n ax = fig.add_subplot(111)\n pylab.plt.plot(demand_time_line, demand_cumulative_values, label='Demands')\n pylab.plt.plot(\n summary_time_line, summary_cumulative_values, label='Evacuation')\n visualizer.add_pertentage_interception_lines(\n summary_time_line, summary_cumulative_values, [0.5, .9, .95])\n pylab.plt.xlabel('Time [h]')\n pylab.plt.ylabel('Cummulative percentage of total vehicles')\n pylab.plt.legend()\n ax.autoscale_view(True, True, True)\n output_figure_path = os.path.join(output_dir, 'evacuation_curve.pdf')\n pylab.savefig(output_figure_path)\n\n return (demand_time_line, demand_cumulative_values, summary_time_line,\n summary_cumulative_values, gap_area)\n\n def plot_summary_demands_vs_evacuation_group(\n self, demand_files, output_dir_names, labels):\n \"\"\"Plots demands-evacuation curves.\"\"\"\n gap_areas = []\n\n fig = pylab.figure(figsize=(8, 6))\n fig.add_subplot(111)\n\n for i, (demand_file, output_dir_name) in enumerate(\n zip(demand_files, output_dir_names)):\n print('Processing: ', demand_file, 'Summary: ', output_dir_name)\n (demand_time_line, demand_cumulative_values,\n summary_time_line, summary_cumulative_values,\n gap_area) = self._analyze_summary_demands_vs_evacuation(\n demand_file, os.path.join(output_dir_name, 'summary.xml'), None)\n gap_areas.append(gap_area)\n print('Gap: %.3f' % gap_area)\n\n pylab.plt.plot(demand_time_line, demand_cumulative_values, '--',\n color=pylab.plt.cm.jet(i/3), label=labels[i])\n pylab.plt.plot(summary_time_line, summary_cumulative_values,\n color=pylab.plt.cm.jet(i/3))\n pylab.plt.xlim(0, 8)\n pylab.plt.legend(loc='lower right')\n pylab.plt.xlabel('Time [h]')\n pylab.plt.ylabel('Vehicles cummulative percentage')\n pylab.savefig(os.path.join(self._output_dir, 'test.pdf'))\n\n def plot_traveling_time(self):\n \"\"\"Plot tripinfo data.\"\"\"\n visualizer = map_visualizer.MapVisualizer()\n data_parser = simulation_data_parser.SimulationDataParser()\n tripinfo_file = 'output/tripinfo.xml'\n # output_folder = 'output/'\n # output_file = 'tripinfo.pkl'\n\n tripinfo = data_parser.get_tripinfo_attribute_to_trips(tripinfo_file)\n\n bins = np.linspace(0, 12, 49)\n positions_on_edge = (np.array(tripinfo['depart']) -\n np.array(tripinfo['departDelay'])) / 3600\n values_on_edge = np.array(tripinfo['duration'])\n print(len(values_on_edge), len(values_on_edge))\n\n # print(positions_on_edge)\n bin_mean, bin_boundary = visualizer._histogram_along_edge(\n values_on_edge, positions_on_edge, bins=bins)\n\n # print(bin_mean, bin_boundary)\n fig = pylab.figure(figsize=(8, 6))\n fig.add_subplot(111)\n pylab.plt.plot(bin_boundary[:-1], bin_mean)\n pylab.plt.xlabel('Time [h]')\n pylab.plt.xlim(0, 10)\n pylab.plt.ylim(0, 10000)\n pylab.plt.ylabel('Average traveling time.')\n pylab.savefig(os.path.join(self._output_dir, 'traveling_time_hist.pdf'))\n\n def plot_map(self, output_file_name):\n \"\"\"Plot the edges by types.\"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n visualizer = map_visualizer.MapVisualizer(net)\n residential_edge_type = ['highway.residential']\n parking_edge_type = ['highway.service']\n residential_edges = net.filterEdges(residential_edge_type)\n parking_edges = net.filterEdges(parking_edge_type)\n\n visualizer.plot_edges(\n [(residential_edges, 'lime', 0.2),\n (parking_edges, 'darkgreen', 0.2)],\n output_figure_path=os.path.join(self._output_dir, output_file_name))\n\n def data_explore(self):\n \"\"\"Generate cars from residential roads.\"\"\"\n net = sumolib.net.readNet(self._sumo_net_file)\n residential_edge_type = ['highway.residential']\n residential_edges = net.filterEdges(residential_edge_type)\n service_edge_type = ['highway.service']\n service_edges = net.filterEdges(service_edge_type)\n\n residential_road_lengths = []\n for e in residential_edges:\n residential_road_lengths.append(e.getLength())\n\n service_road_lengths = []\n for e in service_edges:\n service_road_lengths.append(e.getLength())\n\n print('Total number of all edges:', len(net.getEdges()))\n\n print('Residential road stats')\n print('Sum of lengths: ', np.sum(residential_road_lengths))\n print('Max length: ', np.max(residential_road_lengths))\n print('Min length: ', np.min(residential_road_lengths))\n print('# edges < 1 / 0.0415: ',\n np.sum(np.array(residential_road_lengths) < 1/0.0415))\n print('Sum of residential roads: ', len(residential_road_lengths))\n print('')\n print('Services road stats')\n print('Sum of lengths: ', np.sum(service_road_lengths))\n print('Max length: ', np.max(service_road_lengths))\n print('Min length: ', np.min(service_road_lengths))\n print('# edges < 1 / 0.0415: ',\n np.sum(np.array(service_road_lengths) < 1/0.0415))\n print('Sum of residential roads: ', len(service_road_lengths))\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom smith import loss_fns\n\n\nclass LossFnsTest(tf.test.TestCase):\n\n def test_get_prediction_loss_cosine(self):\n input_tensor_1 = tf.constant(\n [[0.5, 0.7, 0.8, 0.9, 0.1, 0.1], [0.1, 0.3, 0.3, 0.3, 0.1, 0.1]],\n dtype=tf.float32)\n input_tensor_2 = tf.constant(\n [[0.1, 0.2, 0.2, 0.2, 0.2, 0.1], [0.1, 0.4, 0.4, 0.4, 0.1, 0.1]],\n dtype=tf.float32)\n labels = tf.constant([0, 1.0], dtype=tf.float32)\n neg_to_pos_example_ratio = 1.0\n similarity_score_amplifier = 6.0\n loss, per_example_loss, similarities = \\\n loss_fns.get_prediction_loss_cosine(\n input_tensor_1=input_tensor_1,\n input_tensor_2=input_tensor_2,\n labels=labels,\n similarity_score_amplifier=similarity_score_amplifier,\n neg_to_pos_example_ratio=neg_to_pos_example_ratio)\n with tf.Session() as sess:\n sess.run([tf.global_variables_initializer()])\n loss_numpy = sess.run(loss)\n per_example_loss_numpy = sess.run(per_example_loss)\n similarities_numpy = sess.run(similarities)\n self.assertEqual(loss_numpy.shape, ())\n self.assertDTypeEqual(loss_numpy, np.float32)\n\n self.assertEqual(per_example_loss_numpy.shape, (2,))\n self.assertDTypeEqual(per_example_loss_numpy, np.float32)\n\n self.assertEqual(similarities_numpy.shape, (2,))\n self.assertDTypeEqual(similarities_numpy, np.float32)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reused modules for building actors/critics for grasping task.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import layers as contrib_layers\nfrom tensorflow.contrib import slim as contrib_slim\n\nslim = contrib_slim\n\n# Register normalization functions as configurables.\ngin.config.external_configurable(\n contrib_layers.layer_norm, module='tf.contrib.layers')\ngin.config.external_configurable(\n contrib_layers.batch_norm, module='tf.contrib.layers')\n\n\[email protected]\ndef argscope(is_training=None, normalizer_fn=slim.layer_norm):\n \"\"\"Default TF argscope used for convnet-based grasping models.\n\n Args:\n is_training: Whether this argscope is for training or inference.\n normalizer_fn: Which conv/fc normalizer to use.\n Returns:\n Dictionary of argument overrides.\n \"\"\"\n with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_initializer=tf.truncated_normal_initializer(stddev=0.01),\n activation_fn=tf.nn.relu,\n normalizer_fn=normalizer_fn):\n with slim.arg_scope(\n [slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope:\n return scope\n\n\ndef tile_to_match_context(net, context):\n \"\"\"Tiles net along a new axis=1 to match context.\n\n Repeats minibatch elements of `net` tensor to match multiple corresponding\n minibatch elements from `context`.\n Args:\n net: Tensor of shape [num_batch_net, ....].\n context: Tensor of shape [num_batch_net, num_examples, context_size].\n Returns:\n Tensor of shape [num_batch_net, num_examples, ...], where each minibatch\n element of net has been tiled M times where M = num_batch_context /\n num_batch_net.\n \"\"\"\n with tf.name_scope('tile_to_context'):\n num_samples = tf.shape(context)[1]\n net_examples = tf.expand_dims(net, 1) # [batch_size, 1, ...]\n\n net_ndim = len(net_examples.get_shape().as_list())\n # Tile net by num_samples in axis=1.\n multiples = [1]*net_ndim\n multiples[1] = num_samples\n net_examples = tf.tile(net_examples, multiples)\n return net_examples\n\n\ndef add_context(net, context):\n \"\"\"Merges visual perception with context using elementwise addition.\n\n Actions are reshaped to match net dimension depth-wise, and are added to\n the conv layers by broadcasting element-wise across H, W extent.\n\n Args:\n net: Tensor of shape [batch_size, H, W, C].\n context: Tensor of shape [batch_size * num_examples, C].\n Returns:\n Tensor with shape [batch_size * num_examples, H, W, C]\n \"\"\"\n num_batch_net = tf.shape(net)[0]\n _, h, w, d1 = net.get_shape().as_list()\n _, d2 = context.get_shape().as_list()\n assert d1 == d2\n context = tf.reshape(context, [num_batch_net, -1, d2])\n net_examples = tile_to_match_context(net, context)\n # Flatten first two dimensions.\n net = tf.reshape(net_examples, [-1, h, w, d1])\n context = tf.reshape(context, [-1, 1, 1, d2])\n context = tf.tile(context, [1, h, w, 1])\n net = tf.add_n([net, context])\n return net\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main file for pre-training or fine-tuning BERT.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport jax\nfrom ml_collections import config_flags\nimport tensorflow as tf\n\nfrom flax_models.bert import run_classifier # pylint: disable=fine-too-long CHANGEME\nfrom flax_models.bert import run_pretraining # pylint: disable=fine-too-long CHANGEME\n\n\nFLAGS = flags.FLAGS\n\nconfig_flags.DEFINE_config_file(\n \"config\", None, \"Training configuration.\", lock_config=True)\nflags.DEFINE_string(\"workdir\", None, \"Work unit directory.\")\nflags.mark_flags_as_required([\"config\", \"workdir\"])\n\n\n\ndef main(argv):\n del argv\n\n # Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make\n # it unavailable to JAX.\n tf.config.experimental.set_visible_devices([], \"GPU\")\n\n\n train_mode = FLAGS.config.mode\n train_lib = run_classifier\n if train_mode == \"pretraining\":\n train_lib = run_pretraining\n elif train_mode != \"classification\":\n logging.warning(\"Unknown mode: %s -- running classification\", train_mode)\n\n train_lib.train_and_evaluate(FLAGS.config, FLAGS.workdir)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trains a keras model on a tf.Dataset.\"\"\"\n\nimport os.path\nimport time\n\nfrom absl import app\nfrom absl import flags\n\nimport gin\nimport tensorflow.compat.v2 as tf\n\nfrom soft_sort.matrix_factorization import training\n\n\nflags.DEFINE_string('base_dir', None, 'Directory to save trained model in.')\nflags.DEFINE_integer('seed', None, 'A random seed.')\nflags.DEFINE_multi_string('gin_config', [], 'List of config files paths.')\nflags.DEFINE_multi_string(\n 'gin_bindings', [], 'Multi string of gin parameter bindings.')\nflags.DEFINE_string(\n 'config_path', 'matrix_factorization/configs/',\n 'Where to find the gin config files.')\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n tf.enable_v2_behavior()\n filenames = [os.path.join(FLAGS.config_path, p) for p in FLAGS.gin_config]\n gin.parse_config_files_and_bindings(filenames, FLAGS.gin_bindings)\n\n seed = FLAGS.seed if FLAGS.seed is not None else int(time.time())\n tf.random.set_seed(seed)\n training.TrainingLoop(workdir=FLAGS.base_dir).run()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Analyzes the linked QA data with indexed corpus.\"\"\"\n\nimport collections\nimport itertools\nimport json\nimport os\nimport pickle\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom language.google.drfact import index_corpus\nfrom language.labs.drkit import search_utils\nimport networkx as nx\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tqdm import tqdm\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_boolean(\"do_init_concept2freq\", None,\n \"Whether to run mention counting.\")\nflags.DEFINE_boolean(\"do_meantion_counting\", None,\n \"Whether to run paragraph preprocessing.\")\nflags.DEFINE_boolean(\"do_generate_entity_networks\", None,\n \"Whether to run networkx graph of entities.\")\nflags.DEFINE_boolean(\"do_qa_hop_analysis\", None,\n \"Whether to analyze the hops between q and a concepts.\")\n\nflags.DEFINE_string(\"index_data_dir\", \"\",\n \"The path to the folder of indexed files.\")\nflags.DEFINE_string(\"linked_qa_file\", \"\",\n \"The path to the folder of linked qa files.\")\nflags.DEFINE_string(\"analysis_linked_qa_file\", \"\",\n \"The path to the folder of linked qa files.\")\n\nflags.DEFINE_string(\"drkit_format_corpus_path\", \"\",\n \"The path to the drkit-format processed corpus.\")\nflags.DEFINE_string(\"concept_frequency_dict_path\", \"\",\n \"The path to save the concept2freq.\")\nflags.DEFINE_string(\"corpus_concept_vocab\", \"\",\n \"The path to save the concept2freq.\")\n\n\ndef do_init_concept2freq():\n \"\"\"Save the number of mentions of each concept.\"\"\"\n with tf.gfile.Open(FLAGS.drkit_format_corpus_path) as f:\n logging.info(\"Reading the corpus from %s ...\", f.name)\n jsonlines = f.read().split(\"\\n\")\n data = [json.loads(jsonline) for jsonline in jsonlines if jsonline]\n concept2freq = collections.defaultdict(lambda: 0)\n for instance in tqdm(data[:], desc=\"Computing concept2freq\"):\n for mention in instance[\"mentions\"]:\n concept2freq[mention[\"kb_id\"]] += 1\n with tf.gfile.Open(FLAGS.concept_frequency_dict_path, \"w\") as f:\n logging.info(\"Saving the concept2freq to %s ...\", f.name)\n json.dump(concept2freq, f)\n logging.info(\"# of non-empty concepts: %d. \", len(concept2freq))\n concepts = [\n k for k, _ in sorted(\n concept2freq.items(), key=lambda item: item[1], reverse=True)\n ]\n with tf.gfile.Open(FLAGS.corpus_concept_vocab, \"w\") as f:\n logging.info(\"Saving the concept2freq to %s ...\", f.name)\n f.write(\"\\n\".join(concepts))\n\n\ndef load_entity2mention():\n \"\"\"Loads the entity2mention data.\"\"\"\n e2m_checkpoint = os.path.join(FLAGS.index_data_dir, \"ent2ment.npz\")\n with tf.device(\"/cpu:0\"):\n logging.info(\"Reading %s\", e2m_checkpoint)\n tf_e2m_data, tf_e2m_indices, tf_e2m_rowsplits = (\n search_utils.load_ragged_matrix(\"ent2ment\", e2m_checkpoint))\n with tf.name_scope(\"RaggedConstruction\"):\n e2m_ragged_ind = tf.RaggedTensor.from_row_splits(\n values=tf_e2m_indices, row_splits=tf_e2m_rowsplits, validate=False)\n e2m_ragged_val = tf.RaggedTensor.from_row_splits(\n values=tf_e2m_data, row_splits=tf_e2m_rowsplits, validate=False)\n return e2m_ragged_ind, e2m_ragged_val\n\n\ndef do_meantion_counting(concept2freq):\n \"\"\"Executes the mention counting process for a linked QA file.\"\"\"\n with tf.gfile.Open(FLAGS.linked_qa_file) as f:\n logging.info(\"Reading linked QA data from %s ...\", f.name)\n jsonlines = f.read().split(\"\\n\")\n data = [json.loads(jsonline) for jsonline in jsonlines if jsonline]\n data_analysis = []\n for instance in tqdm(data, desc=\"Mention Counting\"):\n question_concepts = [e[\"kb_id\"] for e in instance[\"entities\"]]\n answer_concepts = [sf[\"kb_id\"] for sf in instance[\"supporting_facts\"]]\n question_concepts_num_mentions = [\n (concept, concept2freq.get(concept, 0)) for concept in question_concepts\n ]\n answer_concepts_num_mentions = [\n (concept, concept2freq.get(concept, 0)) for concept in answer_concepts\n ]\n analysis = dict()\n analysis[\"id\"] = instance[\"_id\"]\n analysis[\"q\"] = instance[\"question\"]\n analysis[\"a\"] = instance[\"answer\"]\n analysis[\"question_concepts_analysis\"] = question_concepts_num_mentions\n analysis[\"answer_concepts_analysis\"] = answer_concepts_num_mentions\n analysis[\"avg_num_mentions_question_concepts\"] = float(\n np.mean([c[1] for c in question_concepts_num_mentions]))\n analysis[\"avg_num_mentions_answer_concepts\"] = float(\n np.mean([c[1] for c in answer_concepts_num_mentions]))\n data_analysis.append(analysis)\n with tf.gfile.Open(FLAGS.analysis_linked_qa_file, \"w\") as f_out:\n logging.info(\"Writing analysis to output file...%s\", f_out.name)\n f_out.write(\"\\n\".join(json.dumps(q) for q in data_analysis))\n\n\ndef do_generate_entity_networks(entity2id, e2m_ragged_ind, mentions):\n \"\"\"Generates and saves the networkx graph object to file.\"\"\"\n entity_connection_nxgraph = nx.Graph()\n for concept_i_id in tqdm(range(len(entity2id))):\n cooccured_mentions = e2m_ragged_ind[concept_i_id]\n for mid in cooccured_mentions.numpy():\n concept_j_id = mentions[mid][0]\n if concept_i_id != concept_j_id:\n entity_connection_nxgraph.add_edge(concept_i_id, concept_j_id)\n with tf.gfile.Open(\n os.path.join(FLAGS.index_data_dir, \"entity_network.gpickle\"), \"wb\") as f:\n logging.info(\"Reading %s\", f.name)\n pickle.dump(f)\n\n\ndef find_shortest_path(entity_networks,\n entity2id,\n concept_i,\n concept_j,\n max_hops=3,\n only_length=True):\n \"\"\"Finds shortest paths between two nodes.\"\"\"\n assert max_hops >= 1\n concept_i_id, concept_j_id = entity2id[concept_i], entity2id[concept_j]\n print(\"concept_ids:\", concept_i_id, \",\", concept_j_id)\n\n k = nx.shortest_path_length(\n entity_networks, source=concept_i_id, target=concept_j_id)\n if only_length:\n return k\n else:\n max_hops = min(max_hops, k)\n print(\"shortest_path_length: \", k)\n paths = nx.all_simple_paths(\n entity_networks,\n source=concept_i_id,\n target=concept_j_id,\n cutoff=max_hops)\n return k, list(paths)\n\n\ndef do_qa_hop_analysis(entity2id):\n \"\"\"Analyze the hops between question concepts and answer concepts.\"\"\"\n with tf.gfile.Open(\n os.path.join(FLAGS.index_data_dir, \"entity_networks.gpickle\"), \"rb\") as f:\n entity_networks = pickle.load(f)\n\n with tf.gfile.Open(FLAGS.analysis_linked_qa_file) as f:\n logging.info(\"Reading analysis linked QA data from %s ...\", f.name)\n jsonlines = f.read().split(\"\\n\")\n data = [json.loads(jsonline) for jsonline in jsonlines if jsonline]\n data_analysis = []\n for instance in tqdm(data, desc=\"Hop analysis\"):\n question_concepts = [e[0] for e in instance[\"question_concepts_analysis\"]]\n answer_concepts = [e[0] for e in instance[\"answer_concepts_analysis\"]]\n hops = []\n for qc, ac in itertools.product(question_concepts, answer_concepts):\n print(qc, ac)\n k = find_shortest_path(entity_networks, entity2id, qc, ac)\n if k >= 1:\n hops.append(k)\n instance[\"qa_hop_min\"] = min(hops)\n instance[\"qa_hop_max\"] = max(hops)\n instance[\"qa_hop_avg\"] = np.mean(hops)\n data_analysis.append(instance)\n with tf.gfile.Open(FLAGS.analysis_linked_qa_file, \"w\") as f_out:\n logging.info(\"Writing hop analysis to output file...%s\", f_out.name)\n f_out.write(\"\\n\".join(json.dumps(q) for q in data_analysis))\n\n\ndef main(_):\n # Load all basic data.\n\n if FLAGS.do_init_concept2freq:\n do_init_concept2freq()\n return\n\n entity2id = index_corpus.load_concept_vocab(FLAGS.corpus_concept_vocab)\n if FLAGS.do_meantion_counting:\n with tf.gfile.Open(FLAGS.concept_frequency_dict_path) as f:\n logging.info(\"Reading %s\", f.name)\n concept2freq = json.load(f)\n do_meantion_counting(concept2freq)\n if FLAGS.do_generate_entity_networks:\n\n with tf.gfile.Open(\n os.path.join(FLAGS.index_data_dir, \"mentions.npy\"), \"rb\") as f:\n logging.info(\"Reading %s\", f.name)\n mentions = np.load(f)\n e2m_ragged_ind, _ = load_entity2mention()\n do_generate_entity_networks(entity2id, e2m_ragged_ind, mentions)\n if FLAGS.do_qa_hop_analysis:\n do_qa_hop_analysis(entity2id)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Specifications for different types of input/output domains.\"\"\"\n\nimport abc\nimport collections\n\nimport gin\nimport numpy as np\nimport six\nfrom six.moves import range\n\nfrom amortized_bo import utils\n\nBOS_TOKEN = '<' # Beginning of sequence token.\nEOS_TOKEN = '>' # End of sequence token.\nPAD_TOKEN = '_' # End of sequence token.\nMASK_TOKEN = '*' # End of sequence token.\nSEP_TOKEN = '|' # A special token for separating tokens for serialization.\n\n\[email protected]\nclass Vocabulary(object):\n \"\"\"Basic vocabulary used to represent output tokens for domains.\"\"\"\n\n def __init__(self,\n tokens,\n include_bos=False,\n include_eos=False,\n include_pad=False,\n include_mask=False,\n bos_token=BOS_TOKEN,\n eos_token=EOS_TOKEN,\n pad_token=PAD_TOKEN,\n mask_token=MASK_TOKEN):\n \"\"\"A token vocabulary.\n\n Args:\n tokens: An list of tokens to put in the vocab. If an int, will be\n interpreted as the number of tokens and '0', ..., 'tokens-1' will be\n used as tokens.\n include_bos: Whether to append `bos_token` to `tokens` that marks the\n beginning of a sequence.\n include_eos: Whether to append `eos_token` to `tokens` that marks the\n end of a sequence.\n include_pad: Whether to append `pad_token` to `tokens` to marks past end\n of sequence.\n include_mask: Whether to append `mask_token` to `tokens` to mark masked\n positions.\n bos_token: A special token than marks the beginning of sequence.\n Ignored if `include_bos == False`.\n eos_token: A special token than marks the end of sequence.\n Ignored if `include_eos == False`.\n pad_token: A special token than marks past the end of sequence.\n Ignored if `include_pad == False`.\n mask_token: A special token than marks MASKED positions for e.g. BERT.\n Ignored if `include_mask == False`.\n \"\"\"\n if not isinstance(tokens, collections.Iterable):\n tokens = range(tokens)\n tokens = [str(token) for token in tokens]\n if include_bos:\n tokens.append(bos_token)\n if include_eos:\n tokens.append(eos_token)\n if include_pad:\n tokens.append(pad_token)\n if include_mask:\n tokens.append(mask_token)\n if len(set(tokens)) != len(tokens):\n raise ValueError('tokens not unique!')\n special_tokens = sorted(set(tokens) & set([SEP_TOKEN]))\n if special_tokens:\n raise ValueError(\n f'tokens contains reserved special tokens: {special_tokens}!')\n\n self._tokens = tokens\n self._token_ids = list(range(len(self._tokens)))\n self._id_to_token = collections.OrderedDict(\n zip(self._token_ids, self._tokens))\n self._token_to_id = collections.OrderedDict(\n zip(self._tokens, self._token_ids))\n self._bos_token = bos_token if include_bos else None\n self._eos_token = eos_token if include_eos else None\n self._mask_token = mask_token if include_mask else None\n self._pad_token = pad_token if include_pad else None\n\n def __len__(self):\n return len(self._tokens)\n\n @property\n def tokens(self):\n \"\"\"Return the tokens of the vocabulary.\"\"\"\n return list(self._tokens)\n\n @property\n def token_ids(self):\n \"\"\"Return the tokens ids of the vocabulary.\"\"\"\n return list(self._token_ids)\n\n @property\n def bos(self):\n \"\"\"Returns the index of the BOS token or None if unspecified.\"\"\"\n return (None if self._bos_token is None else\n self._token_to_id[self._bos_token])\n\n @property\n def eos(self):\n \"\"\"Returns the index of the EOS token or None if unspecified.\"\"\"\n return (None if self._eos_token is None else\n self._token_to_id[self._eos_token])\n\n @property\n def mask(self):\n \"\"\"Returns the index of the MASK token or None if unspecified.\"\"\"\n return (None if self._mask_token is None else\n self._token_to_id[self._mask_token])\n\n @property\n def pad(self):\n \"\"\"Returns the index of the PAD token or None if unspecified.\"\"\"\n return (None\n if self._pad_token is None else self._token_to_id[self._pad_token])\n\n def is_valid(self, value):\n \"\"\"Tests if a value is a valid token id and returns a bool.\"\"\"\n return value in self._token_ids\n\n def are_valid(self, values):\n \"\"\"Tests if values are valid token ids and returns an array of bools.\"\"\"\n return np.array([self.is_valid(value) for value in values])\n\n def encode(self, tokens):\n \"\"\"Maps an iterable of string tokens to a list of integer token ids.\"\"\"\n if six.PY3 and isinstance(tokens, bytes):\n # Always use Unicode in Python 3.\n tokens = tokens.decode('utf-8')\n return [self._token_to_id[token] for token in tokens]\n\n def decode(self, values, stop_at_eos=False, as_str=True):\n \"\"\"Maps an iterable of integer token ids to string tokens.\n\n Args:\n values: An iterable of token ids.\n stop_at_eos: Whether to ignore all values after the first EOS token id.\n as_str: Whether to return a list of tokens or a concatenated string.\n\n Returns:\n A string of tokens or a list of tokens if `as_str == False`.\n \"\"\"\n if stop_at_eos and self.eos is None:\n raise ValueError('EOS unspecified!')\n tokens = []\n for value in values:\n value = int(value) # Requires if value is a scalar tensor.\n if stop_at_eos and value == self.eos:\n break\n tokens.append(self._id_to_token[value])\n return ''.join(tokens) if as_str else tokens\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Domain(object):\n \"\"\"Base class of problem domains, which specifies the set of valid objects.\"\"\"\n\n @property\n def mask_fn(self):\n \"\"\"Returns a masking function or None.\"\"\"\n\n @abc.abstractmethod\n def is_valid(self, sample):\n \"\"\"Tests if the given sample is valid for this domain.\"\"\"\n\n def are_valid(self, samples):\n \"\"\"Tests if the given samples are valid for this domain.\"\"\"\n return np.array([self.is_valid(sample) for sample in samples])\n\n\nclass DiscreteDomain(Domain):\n \"\"\"Base class for discrete domains: sequences of categorical variables.\"\"\"\n\n def __init__(self, vocab):\n self._vocab = vocab\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n @property\n def vocab(self):\n return self._vocab\n\n def encode(self, samples, **kwargs):\n \"\"\"Maps a list of string tokens to a list of lists of integer token ids.\"\"\"\n return [self.vocab.encode(sample, **kwargs) for sample in samples]\n\n def decode(self, samples, **kwargs):\n \"\"\"Maps list of lists of integer token ids to list of strings.\"\"\"\n return [self.vocab.decode(sample, **kwargs) for sample in samples]\n\n\[email protected]\nclass FixedLengthDiscreteDomain(DiscreteDomain):\n \"\"\"Output is a fixed length discrete sequence.\"\"\"\n\n def __init__(self, vocab_size=None, length=None, vocab=None):\n \"\"\"Creates an instance of this class.\n\n Args:\n vocab_size: An optional integer for constructing a vocab of this size.\n If provided, `vocab` must be `None`.\n length: The length of the domain (required).\n vocab: The `Vocabulary` of the domain. If provided, `vocab_size` must be\n `None`.\n\n Raises:\n ValueError: If neither `vocab_size` nor `vocab` is provided.\n ValueError: If `length` if not provided.\n \"\"\"\n if length is None:\n raise ValueError('length must be provided!')\n if not (vocab_size is None) ^ (vocab is None):\n raise ValueError('Exactly one of vocab_size of vocab must be specified!')\n self._length = length\n if vocab is None:\n vocab = Vocabulary(vocab_size)\n super(FixedLengthDiscreteDomain, self).__init__(vocab)\n\n @property\n def length(self):\n return self._length\n\n @property\n def size(self):\n \"\"\"The number of structures in the Domain.\"\"\"\n return self.vocab_size**self.length\n\n def is_valid(self, sequence):\n return len(sequence) == self.length and self.vocab.are_valid(sequence).all()\n\n def sample_uniformly(self, num_samples, seed=None):\n random_state = utils.get_random_state(seed)\n return np.int32(\n random_state.randint(\n size=[num_samples, self.length], low=0, high=self.vocab_size))\n\n def index_to_structure(self, index):\n \"\"\"Given an integer and target length, encode into structure.\"\"\"\n structure = np.zeros(self.length, dtype=np.int32)\n tokens = [int(token, base=len(self.vocab))\n for token in np.base_repr(index, base=len(self.vocab))]\n structure[-len(tokens):] = tokens\n return structure\n\n def structure_to_index(self, structure):\n \"\"\"Returns the index of a sequence over a vocabulary of size `vocab_size`.\"\"\"\n structure = np.asarray(structure)[::-1]\n return np.sum(structure * np.power(len(self.vocab), range(len(structure))))\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Loss helper functions.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\n\ndef softmax_cross_entropy(pos, neg):\n \"\"\"softmax cross entropy loss.\n\n Let d_p = pos, d_n = neg.\n we minimize:\n log(1+exp(d_p)) + log(1+exp(-d_n))\n for stability, is it equivalent to\n d_p + log(1+exp(-d_p)) + log(1+exp(-d_n))\n\n Args:\n pos: Tensor.\n neg: Tensor of the same shape of pos.\n\n Returns:\n Tensor holding pointwise loss of the same shape as pos.\n \"\"\"\n log_exp_pos = tf.math.log1p(tf.math.exp(-pos))\n log_exp_neg = tf.math.log1p(tf.math.exp(-neg))\n return pos + log_exp_pos + log_exp_neg\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition of the proposed Coupled Deep Cox VAE.\n\nThis module has the tensorflow definitions of the proposed Coupled Deep Cox VAE\nmodel and utility functions to train and evaluate the model.\n\nThe module depends on tensorflow 2.\n\nNot designed to be called directly, would be called when running a function from\nfair_survival_analysis.fair_survival_analysis\n\n\"\"\"\nfrom coupled_deep_cph_utils import partial_ll_loss\nfrom coupled_deep_cph_utils import train_breslow\n\nimport lifelines\nimport numpy as np\n\nfrom sklearn.utils import shuffle\n\nimport tensorflow as tf\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense\n\ndtype = tf.float32\n\nFLOAT_64 = False\n\nif FLOAT_64:\n tf.keras.backend.set_floatx('float64')\n dtype = tf.float64\n\n\nclass CoupledDeepCPHVAE(Model):\n \"\"\"Tensorflow model definition of the Coupled Deep CPH VAE Survival Model.\n\n The Coupled Deep CPH VAE model involves learning shared representations for\n the each demographic in the dataset, which are modelled as a latent variable\n using a VAE. The representation then interacts with multiple output heads to\n determine the log-partial hazard for an individual in each group.\n\n \"\"\"\n\n def __init__(self, hidden, output_size):\n super(CoupledDeepCPHVAE, self).__init__()\n\n self.hidden = hidden\n self.encoder1 = Dense(hidden, activation='relu', use_bias=False)\n self.encoder2 = Dense(hidden + hidden, use_bias=False)\n\n self.decoder1 = Dense(hidden, activation='relu', use_bias=False)\n self.decoder2 = Dense(output_size, use_bias=False)\n\n self.prot = Dense(1, use_bias=False, kernel_initializer='zeros')\n self.nprot = Dense(1, use_bias=False, kernel_initializer='zeros')\n\n @tf.function\n def sample(self, eps=None):\n if eps is None:\n eps = tf.random.normal(shape=(100, self.hidden), dtype=dtype)\n return self.decode(eps)\n\n def encode(self, x):\n mean, logvar = tf.split(\n self.encoder2(self.encoder1(x)), num_or_size_splits=2, axis=1)\n return mean, logvar\n\n def decode(self, z):\n logits = self.decoder2(self.decoder1(z))\n return logits\n\n def reparameterize(self, mean, logvar):\n eps = tf.random.normal(shape=mean.shape, dtype=dtype)\n\n return eps * tf.exp(logvar * .5) + mean\n\n def call(self, x):\n\n x = tf.keras.activations.sigmoid(self.encode(x)[0])\n return self.prot(x), self.nprot(x)\n\n\ndef log_normal_pdf(sample, mean, logvar, raxis=1):\n\n log2pi = tf.cast(tf.math.log(2. * np.pi), dtype=dtype)\n return tf.reduce_mean(\n -.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),\n axis=raxis)\n\n\ndef vae_loss(model, x):\n \"\"\"The Loss from the VAE component of the Coupled Deep CPH VAE.\n\n Args:\n model:\n instance of CoupledDeepCPHVAE class.\n x:\n a numpy array of input features (Training Data).\n\n Returns:\n a differentiable tensorflow variable with the mean loss over the batch.\n\n \"\"\"\n\n mean, logvar = model.encode(x)\n z = model.reparameterize(mean, logvar)\n x_logit = model.decode(z)\n mse = tf.keras.losses.MSE(x, x_logit)\n\n logpx_z = -mse\n logpz = log_normal_pdf(z, 0., 0.)\n logqz_x = log_normal_pdf(z, mean, logvar)\n\n return -tf.reduce_mean(logpx_z + logpz - logqz_x)\n\n\ndef train_step(model, x, t, e, a, optimizer, bs=256, lambd=1., seed=0):\n\n \"\"\"Optimizes the model for one epoch.\n\n Args:\n model:\n instance of CoupledDeepCPHVAE class.\n x:\n a numpy array of input features (Training Data).\n t:\n a numpy vector of event times (Training Data).\n e:\n a numpy vector of event indicators (1 if event occured, 0 otherwise)\n (Training Data).\n a:\n a numpy vector of the protected group membership (Training Data).\n optimizer:\n instance of tf.keras.optimizers (default is Adam)\n bs:\n int minibatch size.\n lambd:\n float Strength of the VAE loss term.\n seed:\n random seed.\n\n Returns:\n None. Trains the model inplace.\n\n \"\"\"\n\n x, t, e, a = shuffle(x, t, e, a, random_state=seed)\n\n n = x.shape[0]\n\n batches = (n // bs) + 1\n\n for i in range(batches):\n\n xb = x[i * bs:(i + 1) * bs]\n tb = t[i * bs:(i + 1) * bs]\n eb = e[i * bs:(i + 1) * bs]\n ab = a[i * bs:(i + 1) * bs]\n\n with tf.GradientTape() as tape:\n\n pll = partial_ll_loss(model, xb, tb, eb, ab, l2=0.001)\n vaeloss = vae_loss(model, xb)\n loss = pll + lambd*vaeloss\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n\ndef test_step(model, x, t, e, a, loss='concordance', lambd=1.):\n\n \"\"\"Test the model and compute validation metric.\n\n Args:\n model:\n instance of CoupledDeepCPHVAE class.\n x:\n a numpy array of input features (Val/Test Data).\n t:\n a numpy vector of event times (Val/Test Data).\n e:\n a numpy vector of event indicators (1 if event occured, 0 otherwise)\n (Val/Test Data).\n a:\n a numpy vector of the protected group membership (Val/Test Data).\n loss (str):\n string the loss metric to compute. one of 'concordance' or 'pll'.\n lambd (float):\n Strength of the VAE loss term.\n\n Returns:\n a float loss.\n\n \"\"\"\n\n if loss == 'concordance':\n\n risks = np.zeros_like(a)\n\n lrisksp, lrisksn = model(x)\n\n lrisksp, lrisksn = lrisksp[:, 0], lrisksn[:, 0]\n\n risks[a == 1] = lrisksp[a == 1]\n risks[a == 0] = lrisksn[a == 0]\n\n pci = lifelines.utils.concordance_index(t[a == 1], -risks[a == 1],\n e[a == 1])\n nci = lifelines.utils.concordance_index(t[a == 0], -risks[a == 0],\n e[a == 0])\n return 0.5 * (nci + pci)\n\n if loss == 'pll':\n\n pll = partial_ll_loss(model, x, t, e, a, l2=0.001)\n vaeloss = vae_loss(model, x)\n loss = pll + lambd*vaeloss\n return float(loss)\n\n\ndef train(model,\n xt,\n tt,\n et,\n at,\n xv,\n tv,\n ev,\n av,\n groups,\n lambd=1e-5,\n epochs=200,\n patience=2,\n vloss='pll'):\n\n \"\"\"The function used to train the Coupled Deep CPH VAE.\n\n Trains the model and corresponding breslow's estimator given some training and\n validation examples for a fixed number of epochs and learning rate.\n\n Args:\n model:\n instance of CoupledDeepCPHVAE class.\n xt:\n a numpy array of input features (Training Data).\n tt:\n a numpy vector of event times (Training Data).\n et:\n a numpy vector of event indicators (1 if event occured, 0 otherwise)\n (Training Data).\n at:\n a numpy vector of the protected group membership (Training Data).\n xv:\n a numpy array of input features (Validation Data).\n tv:\n a numpy vector of event times (Validation Data).\n ev:\n a numpy vector of event indicators (1 if event occured, 0 otherwise)\n (Validation Data).\n av:\n a numpy vector of the protected group membership (Validation Data).\n groups:\n List of the demographics to adjust for.\n lambd:\n float Strength of the VAE loss term.\n epochs:\n int Number of Training epochs to run.\n patience:\n number of training epochs to wait before stopping optimization.\n vloss:\n validation metric to optimize for. One of \"pll\" or \"concordance\".\n\n Returns:\n a trained survival analysis model and a breslow estimator.\n\n \"\"\"\n\n prot, nprot = groups[0], groups[1]\n\n optimizer = tf.keras.optimizers.Adam(lr=0.001)\n\n valc = 0\n patience_ = 0\n\n at_ = at.copy()\n at_[at_ == prot] = 1\n at_[at_ == nprot] = 0\n\n av_ = av.copy()\n av_[av_ == prot] = 1\n av_[av_ == nprot] = 0\n\n for epoch in range(epochs):\n\n train_step(model, xt, tt, et, at_, optimizer, lambd=lambd, seed=epoch)\n valcn = test_step(model, xv, tv, ev, av_, loss=vloss, lambd=lambd)\n\n if epoch % 1 == 0:\n print(patience_, epoch, valcn)\n\n if valcn < valc:\n patience_ += 1\n\n if patience_ == patience:\n return (model, train_breslow(model, xt, tt, et, at, xv, tv, ev, av,\n groups))\n\n valc = valcn\n\n return (model, train_breslow(model, xt, tt, et, at, xv, tv, ev, av, groups))\n\n\ndef predict_survival(trained_model, x, t, a, groups):\n\n \"\"\"Returns the survival probability given a model and the breslow's estimator.\n\n Args:\n trained_model:\n tuple consisting of an instance of a \"trained\" CoupledDeepCPHVAE class\n and the corresponding breslow's estimator.\n x:\n a numpy array of input features (Test Data).\n t:\n a numpy vector of event times (Test Data).\n a:\n a numpy vector of the protected group membership (Test Data).\n groups:\n List of the demographics to adjust for.\n\n Returns:\n a numpy vector of the survival probabilities.\n\n \"\"\"\n\n prot, nprot = groups[0], groups[1]\n\n model, blsurvival = trained_model\n\n blsurvivalp, blsurvivaln = blsurvival\n\n survivals = -np.ones_like(a)\n\n lrisksp, lrisksn = model(x)\n\n risksp, risksn = np.exp(lrisksp), np.exp(lrisksn)\n\n s0p = np.log(float(blsurvivalp.T(t)))\n s0n = np.log(float(blsurvivaln.T(t)))\n\n stp = np.exp(risksp * s0p)[:, 0]\n stn = np.exp(risksn * s0n)[:, 0]\n\n survivals[a == prot] = stp[a == prot]\n survivals[a == nprot] = stn[a == nprot]\n\n return survivals\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Fixed tasks containing text classification using RNN.\"\"\"\nimport sonnet as snt\n\nfrom task_set import datasets\nfrom task_set import registry\nfrom task_set.tasks import base\nimport tensorflow.compat.v1 as tf\n\n\ndef imdb_subword(batch_size, patch_length):\n return datasets.random_slice_text_data(\n dataset_name=\"imdb_reviews/subwords8k\",\n batch_size=batch_size,\n cache_dataset=True,\n patch_length=patch_length)\n\n\ndef rnn_classification(core_fn,\n vocab_size=10000,\n embed_dim=64,\n aggregate_method=\"last\"):\n \"\"\"Helper for RNN based text classification tasks.\n\n Args:\n core_fn: callable callable that returns a sonnet RNN core\n vocab_size: int number of words to use for the embedding table. All index\n higher than this will be clipped\n embed_dim: int size of the embedding dim\n aggregate_method: str how to aggregate the sequence of features. If 'last'\n grab the last hidden features. If 'avg' compute the average over the full\n sequence.\n\n Returns:\n a callable that returns a sonnet module representing the loss.\n \"\"\"\n\n def _build(batch):\n \"\"\"Build the loss sonnet module.\"\"\"\n # TODO(lmetz) these are dense updates.... so keeping this small for now.\n tokens = tf.minimum(batch[\"text\"],\n tf.to_int64(tf.reshape(vocab_size - 1, [1, 1])))\n embed = snt.Embed(vocab_size=vocab_size, embed_dim=embed_dim)\n embedded_tokens = embed(tokens)\n rnn = core_fn()\n bs = tokens.shape.as_list()[0]\n\n state = rnn.initial_state(bs, trainable=True)\n\n outputs, state = tf.nn.dynamic_rnn(\n rnn, embedded_tokens, initial_state=state)\n if aggregate_method == \"last\":\n last_output = outputs[:, -1] # grab the last output\n elif aggregate_method == \"avg\":\n last_output = tf.reduce_mean(outputs, [1]) # average over length\n else:\n raise ValueError(\"not supported aggregate_method\")\n\n logits = snt.Linear(2)(last_output)\n\n loss_vec = tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=batch[\"label_onehot\"], logits=logits)\n\n return tf.reduce_mean(loss_vec)\n\n return lambda: snt.Module(_build)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_LSTM128_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.LSTM(128), embed_dim=64, aggregate_method=\"last\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch128_LSTM128_bs64\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.LSTM(128), embed_dim=64, aggregate_method=\"last\")\n dataset = imdb_subword(64, 128)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_LSTM128_E128_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.LSTM(128), embed_dim=128, aggregate_method=\"last\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch128_LSTM128_embed128_bs64\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.LSTM(128), embed_dim=128, aggregate_method=\"last\")\n dataset = imdb_subword(64, 128)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch128_LSTM128_avg_bs64\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.LSTM(128), embed_dim=64, aggregate_method=\"avg\")\n dataset = imdb_subword(64, 128)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_GRU128_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.GRU(128), embed_dim=64, aggregate_method=\"last\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_GRU64_avg_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.GRU(64), embed_dim=64, aggregate_method=\"avg\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_VRNN128_tanh_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.VanillaRNN(128), embed_dim=64, aggregate_method=\"last\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_VRNN64_tanh_avg_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.VanillaRNN(64), embed_dim=64, aggregate_method=\"avg\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_VRNN64_relu_avg_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n lambda: snt.VanillaRNN(64, activation=tf.nn.relu),\n embed_dim=64,\n aggregate_method=\"avg\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\ndef _get_irnn_cell_fn(num_unit):\n init = {snt.VanillaRNN.HIDDEN_TO_HIDDEN: {\"w\": tf.initializers.identity(1.0)}}\n\n def rnn():\n return snt.VanillaRNN(num_unit, activation=tf.nn.relu, initializers=init)\n\n return rnn\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_IRNN64_relu_avg_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n _get_irnn_cell_fn(64), embed_dim=64, aggregate_method=\"avg\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n\n\[email protected]_registry.register_fixed(\n \"FixedTextRNNClassification_imdb_patch32_IRNN64_relu_last_bs128\")\ndef _():\n base_model_fn = rnn_classification(\n _get_irnn_cell_fn(64), embed_dim=64, aggregate_method=\"last\")\n dataset = imdb_subword(128, 32)\n return base.DatasetModelTask(base_model_fn, dataset)\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A layer which applies windowing on input data.\"\"\"\nimport numpy as np\nfrom kws_streaming.layers.compat import tf\n\n\ndef _hann_offset_window_generator(window_length, dtype):\n \"\"\"Computes a hanning window with offset.\n\n Args:\n window_length: The length of the window (typically frame size).\n dtype: TF data type\n\n Returns:\n Tensor of size frame_size with the window to apply.\n \"\"\"\n arg = np.pi * 2.0 / (window_length)\n hann = 0.5 - (0.5 * np.cos(arg * (np.arange(window_length) + 0.5)))\n return hann.astype(dtype)\n\n\ndef _hann_window_generator(window_length, dtype):\n \"\"\"Computes a standard version of Hann window.\n\n More details at https://en.wikipedia.org/wiki/Hann_function\n Args:\n window_length: The length of the window (typically frame size).\n dtype: TF data type\n\n Returns:\n Tensor of size frame_size with the window to apply.\n \"\"\"\n arg = 2 * np.pi / window_length\n hann = 0.5 - 0.5 * np.cos(arg * np.arange(window_length))\n return hann.astype(dtype)\n\n\nclass Windowing(tf.keras.layers.Layer):\n \"\"\"Apply window function on input data.\n\n This is useful to enhance the ability of an FFT to extract spectral data\n from signal. It is applied on the last dim of input data\n \"\"\"\n\n def __init__(self, window_size=400, window_type='hann', **kwargs):\n super(Windowing, self).__init__(**kwargs)\n self.window_size = window_size\n self.window_type = window_type\n\n def build(self, input_shape):\n super(Windowing, self).build(input_shape)\n self.window_size = int(input_shape[-1])\n if self.window_type == 'hann_offest':\n self.window = _hann_offset_window_generator(self.window_size, np.float32)\n elif self.window_type == 'hann':\n self.window = _hann_window_generator(self.window_size, np.float32)\n else:\n raise ValueError('unsupported window_type:%s' % self.window_type)\n\n def call(self, inputs):\n # last dim has to be the same with window_size\n if inputs.shape[-1] != self.window_size:\n raise ValueError('inputs.shape[-1]:%d must = self.window_size:%d' %\n (inputs.shape[-1], self.window_size))\n\n return inputs * self.window\n\n def get_config(self):\n config = {'window_size': self.window_size, 'window_type': self.window_type}\n base_config = super(Windowing, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The main BERT model and related functions.\n\nSource: https://github.com/google-research/bert\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport json\nimport math\nimport re\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tf_slim.layers import layers\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass BertModel(object):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n label_embeddings = tf.get_variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.word_embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.word_embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))\n\n def get_pooled_output(self):\n return self.pooled_output\n\n def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output\n\n def get_all_encoder_layers(self):\n return self.all_encoder_layers\n\n def get_word_embedding_output(self):\n \"\"\"Get output of the word(piece) embedding lookup.\n\n This is BEFORE positional embeddings and token type embeddings have been\n added.\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the word(piece) embedding layer.\n \"\"\"\n return self.word_embedding_output\n\n def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output\n\n def get_embedding_table(self):\n return self.embedding_table\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)\n\n\ndef get_assignment_map_from_checkpoint(tvars, init_checkpoint,\n transfer_learning):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n if transfer_learning and ((\"output_weights\" in name) or\n (\"output_bias\" in name)):\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\ndef dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, keep_prob=(1 - dropout_prob))\n return output\n\n\ndef layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)\n\n\ndef layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor\n\n\ndef create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)\n\n\ndef embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.nn.embedding_lookup()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n if use_one_hot_embeddings:\n flat_input_ids = tf.reshape(input_ids, [-1])\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.nn.embedding_lookup(embedding_table, input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)\n\n\ndef embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n\n\ndef dense_layer_3d(input_tensor,\n num_attention_heads,\n size_per_head,\n initializer,\n activation,\n name=None):\n \"\"\"A dense layer with 3D kernel.\n\n Args:\n input_tensor: float Tensor of shape [batch, seq_length, hidden_size].\n num_attention_heads: Number of attention heads.\n size_per_head: The size per attention head.\n initializer: Kernel initializer.\n activation: Actication function.\n name: The name scope of this layer.\n\n Returns:\n float logits Tensor.\n \"\"\"\n\n last_dim = get_shape_list(input_tensor)[-1]\n\n with tf.variable_scope(name):\n w = tf.get_variable(\n name=\"kernel\",\n shape=[last_dim, num_attention_heads * size_per_head],\n initializer=initializer)\n w = tf.reshape(w, [last_dim, num_attention_heads, size_per_head])\n b = tf.get_variable(\n name=\"bias\",\n shape=[num_attention_heads * size_per_head],\n initializer=tf.zeros_initializer)\n b = tf.reshape(b, [num_attention_heads, size_per_head])\n ret = tf.einsum(\"abc,cde->abde\", input_tensor, w)\n ret += b\n if activation is not None:\n return activation(ret)\n else:\n return ret\n\n\ndef dense_layer_3d_proj(input_tensor,\n hidden_size,\n num_attention_heads,\n head_size,\n initializer,\n activation,\n name=None):\n \"\"\"A dense layer with 3D kernel for projection.\n\n Args:\n input_tensor: float Tensor of shape [batch,from_seq_length,\n num_attention_heads, size_per_head].\n hidden_size: The size of hidden layer.\n num_attention_heads: The size of output dimension.\n head_size: The size of head.\n initializer: Kernel initializer.\n activation: Actication function.\n name: The name scope of this layer.\n\n Returns:\n float logits Tensor.\n \"\"\"\n head_size = hidden_size // num_attention_heads\n with tf.variable_scope(name):\n w = tf.get_variable(\n name=\"kernel\",\n shape=[hidden_size, hidden_size],\n initializer=initializer)\n w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])\n b = tf.get_variable(\n name=\"bias\", shape=[hidden_size], initializer=tf.zeros_initializer)\n\n ret = tf.einsum(\"BFNH,NHD->BFD\", input_tensor, w)\n ret += b\n if activation is not None:\n return activation(ret)\n else:\n return ret\n\n\ndef dense_layer_2d(input_tensor,\n output_size,\n initializer,\n activation,\n name=None):\n \"\"\"A dense layer with 2D kernel.\n\n Args:\n input_tensor: Float tensor with rank 3.\n output_size: The size of output dimension.\n initializer: Kernel initializer.\n activation: Actication function.\n name: The name scope of this layer.\n\n Returns:\n float logits Tensor.\n \"\"\"\n last_dim = get_shape_list(input_tensor)[-1]\n with tf.variable_scope(name):\n w = tf.get_variable(\n name=\"kernel\", shape=[last_dim, output_size], initializer=initializer)\n b = tf.get_variable(\n name=\"bias\", shape=[output_size], initializer=tf.zeros_initializer)\n\n ret = tf.einsum(\"abc,cd->abd\", input_tensor, w)\n ret += b\n if activation is not None:\n return activation(ret)\n else:\n return ret\n\n\ndef attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n In practice, the multi-headed attention are done with tf.einsum as follows:\n Input_tensor: [BFD]\n Wq, Wk, Wv: [DNH]\n Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)\n K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)\n V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)\n attention_scores:[BNFT] = einsum('BFNH,BTNH>BNFT', Q, K) / sqrt(H)\n attention_probs:[BNFT] = softmax(attention_scores)\n context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)\n Wout:[DNH]\n Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, num_attention_heads,\n size_per_head].\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n # `query_layer` = [B, F, N, H]\n query_layer = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,\n create_initializer(initializer_range), query_act,\n \"query\")\n\n # `key_layer` = [B, T, N, H]\n key_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,\n create_initializer(initializer_range), key_act,\n \"key\")\n\n # `value_layer` = [B, T, N, H]\n value_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,\n create_initializer(initializer_range), value_act,\n \"value\")\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_layer, query_layer)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs, value_layer)\n\n return context_layer\n\n\ndef transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n See the original paper:\n https://arxiv.org/abs/1706.03762\n Also see:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n prev_output = input_tensor\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n with tf.variable_scope(\"self\"):\n attention_output = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = dense_layer_3d_proj(\n attention_output, hidden_size,\n num_attention_heads, attention_head_size,\n create_initializer(initializer_range), None, \"dense\")\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = dense_layer_2d(\n attention_output, intermediate_size,\n create_initializer(initializer_range), intermediate_act_fn, \"dense\")\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = dense_layer_2d(intermediate_output, hidden_size,\n create_initializer(initializer_range),\n None, \"dense\")\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n return all_layer_outputs\n else:\n return all_layer_outputs[-1]\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n\ndef reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))\n"
] | [
[
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.math.multiply_no_nan",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.nn.avg_pool",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.stop_gradient",
"tensorflow.compat.v1.squeeze"
],
[
"numpy.ceil",
"numpy.array",
"numpy.ones"
],
[
"tensorflow.compat.v1.feature_column.numeric_column",
"tensorflow.compat.v1.equal",
"tensorflow.contrib.metrics.streaming_false_negative_rate",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.metrics.false_negatives",
"tensorflow.contrib.metrics.streaming_false_positive_rate",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.layers.Dense",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.metrics.precision",
"tensorflow.compat.v1.metrics.true_negatives",
"tensorflow.compat.v1.metrics.recall",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.feature_column.input_layer",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.estimator.EstimatorSpec",
"tensorflow.compat.v1.metrics.accuracy",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.metrics.true_positives",
"tensorflow.compat.v1.metrics.false_positives",
"tensorflow.compat.v1.losses.hinge_loss",
"tensorflow.compat.v1.metrics.auc",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.summary.histogram",
"tensorflow.compat.v1.greater"
],
[
"tensorflow.compat.v1.sparse_placeholder",
"numpy.mean",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.placeholder_with_default",
"tensorflow.compat.v1.app.run",
"numpy.hstack",
"numpy.arange",
"tensorflow.compat.v1.set_random_seed",
"numpy.zeros",
"numpy.array",
"numpy.sum",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.device",
"numpy.random.seed",
"numpy.random.shuffle",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.logging.info",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.random.normal",
"numpy.vectorize",
"numpy.random.rand",
"numpy.mod",
"numpy.repeat"
],
[
"tensorflow.clip_by_value",
"tensorflow.image.central_crop",
"tensorflow.image.random_flip_left_right",
"numpy.logical_and",
"numpy.abs",
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.cast",
"numpy.genfromtxt",
"numpy.concatenate",
"tensorflow.image.random_crop",
"numpy.ceil",
"tensorflow.image.resize",
"tensorflow.io.read_file",
"numpy.array",
"numpy.sum",
"tensorflow.image.decode_jpeg"
],
[
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.logging.set_verbosity"
],
[
"tensorflow.image.random_flip_left_right",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.maximum",
"tensorflow.random.uniform",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.ones_like",
"tensorflow.image.random_crop",
"tensorflow.pad",
"tensorflow.tile"
],
[
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.math.minimum",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.image.crop_to_bounding_box",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.image.convert_image_dtype",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.image.decode_image",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.io.read_file",
"tensorflow.compat.v1.name_scope"
],
[
"matplotlib.pylab.plt.legend",
"matplotlib.pylab.plt.ylim",
"numpy.random.seed",
"numpy.linspace",
"numpy.min",
"numpy.cumsum",
"matplotlib.pylab.plt.xlim",
"matplotlib.pylab.plt.ylabel",
"numpy.max",
"matplotlib.pylab.figure",
"matplotlib.pylab.plt.xlabel",
"matplotlib.pylab.savefig",
"numpy.array",
"numpy.sum",
"matplotlib.pylab.plt.cm.jet",
"matplotlib.pylab.plt.plot"
],
[
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.truncated_normal_initializer",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.config.experimental.set_visible_devices"
],
[
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.random.set_seed"
],
[
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.gfile.Open",
"numpy.mean",
"numpy.load",
"tensorflow.compat.v1.RaggedTensor.from_row_splits",
"tensorflow.compat.v1.name_scope"
],
[
"numpy.asarray",
"numpy.zeros"
],
[
"tensorflow.compat.v2.math.exp"
],
[
"numpy.ones_like",
"tensorflow.reduce_mean",
"tensorflow.keras.backend.set_floatx",
"tensorflow.keras.losses.MSE",
"tensorflow.keras.layers.Dense",
"sklearn.utils.shuffle",
"tensorflow.exp",
"tensorflow.math.log",
"tensorflow.keras.optimizers.Adam",
"numpy.zeros_like",
"numpy.exp",
"tensorflow.random.normal",
"tensorflow.GradientTape"
],
[
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.initializers.identity",
"tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.compat.v1.nn.dynamic_rnn"
],
[
"numpy.arange"
],
[
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.assert_less_equal",
"tensorflow.truncated_normal_initializer",
"tensorflow.squeeze",
"tensorflow.train.list_variables",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.pow",
"tensorflow.one_hot",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.einsum",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
shanky1947/Air-Draw | [
"ab370f96384414ba5c4e369f5465cd8e28b4f3f0",
"ab370f96384414ba5c4e369f5465cd8e28b4f3f0"
] | [
"Air Canvas (only)/Different Canvas Codes/detect1.py",
"Air Canvas (only)/Different Canvas Codes/detect6.py"
] | [
"# get hsv values using trackbar\nimport cv2\nimport numpy as np\nimport time\n\n# A required callback method that goes into the trackbar function.\ndef nothing(x):\n pass\n\n# Initializing the webcam feed.\ncap = cv2.VideoCapture(0)\ncap.set(3,1280)\ncap.set(4,720)\n\n# Create a window named trackbars.\ncv2.namedWindow(\"Trackbars\")\n\n# Now create 6 trackbars that will control the lower and upper range of \n# H,S and V channels. The Arguments are like this: Name of trackbar, \n# window name, range,callback function. For Hue the range is 0-179 and\n# for S,V its 0-255.\ncv2.createTrackbar(\"L - H\", \"Trackbars\", 0, 179, nothing)\ncv2.createTrackbar(\"L - S\", \"Trackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"L - V\", \"Trackbars\", 0, 255, nothing)\ncv2.createTrackbar(\"U - H\", \"Trackbars\", 179, 179, nothing)\ncv2.createTrackbar(\"U - S\", \"Trackbars\", 255, 255, nothing)\ncv2.createTrackbar(\"U - V\", \"Trackbars\", 255, 255, nothing)\n \n \nwhile True:\n \n # Start reading the webcam feed frame by frame.\n ret, frame = cap.read()\n if not ret:\n break\n # Flip the frame horizontally (Not required)\n frame = cv2.flip( frame, 1 ) \n \n # Convert the BGR image to HSV image.\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n # Get the new values of the trackbar in real time as the user changes \n # them\n l_h = cv2.getTrackbarPos(\"L - H\", \"Trackbars\")\n l_s = cv2.getTrackbarPos(\"L - S\", \"Trackbars\")\n l_v = cv2.getTrackbarPos(\"L - V\", \"Trackbars\")\n u_h = cv2.getTrackbarPos(\"U - H\", \"Trackbars\")\n u_s = cv2.getTrackbarPos(\"U - S\", \"Trackbars\")\n u_v = cv2.getTrackbarPos(\"U - V\", \"Trackbars\")\n \n # Set the lower and upper HSV range according to the value selected\n # by the trackbar\n lower_range = np.array([l_h, l_s, l_v])\n upper_range = np.array([u_h, u_s, u_v])\n \n # Filter the image and get the binary mask, where white represents \n # your target color\n mask = cv2.inRange(hsv, lower_range, upper_range)\n \n # You can also visualize the real part of the target color (Optional)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n \n # Converting the binary mask to 3 channel image, this is just so \n # we can stack it with the others\n mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\n \n # stack the mask, orginal frame and the filtered result\n stacked = np.hstack((mask_3,frame,res))\n \n # Show this stacked frame at 40% of the size.\n cv2.imshow('Trackbars',cv2.resize(stacked,None,fx=0.4,fy=0.4))\n \n # If the user presses ESC then exit the program\n key = cv2.waitKey(1)\n if key == 27:\n break\n \n # If the user presses `s` then print this array.\n if key == ord('s'):\n \n thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]\n print(thearray)\n \n # Also save this array as penval.npy\n np.save('penval',thearray)\n break\n \n# Release the camera & destroy the windows. \ncap.release()\ncv2.destroyAllWindows()",
"import cv2\nimport numpy as np\nimport time\n\nload_from_disk = True\nif load_from_disk:\n penval = np.load('penval.npy')\n\ncap = cv2.VideoCapture(0)\n\n# Load these 2 images and resize them to the same size.\npen_img = cv2.resize(cv2.imread('pen.png',1), (50, 50))\neraser_img = cv2.resize(cv2.imread('eraser.jpg',1), (50, 50))\n\nkernel = np.ones((5,5),np.uint8)\n\n# Making window size adjustable\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\n\n# This is the canvas on which we will draw upon\ncanvas = None\n\n# Create a background subtractor Object\nbackgroundobject = cv2.createBackgroundSubtractorMOG2(detectShadows = False)\n\n# This threshold determines the amount of disruption in the background.\nbackground_threshold = 600\n\n# A variable which tells you if you're using a pen or an eraser.\nswitch = 'Pen'\n\n# With this variable we will monitor the time between previous switch.\nlast_switch = time.time()\n\n# Initilize x1,y1 points\nx1,y1=0,0\n\n# Threshold for noise\nnoiseth = 800\n\n# Threshold for wiper, the size of the contour must be bigger than this for # us to clear the canvas\nwiper_thresh = 40000\n\n# A variable which tells when to clear canvas\nclear = False\n\nwhile(1):\n _, frame = cap.read()\n frame = cv2.flip( frame, 1 )\n \n # Initilize the canvas as a black image\n if canvas is None:\n canvas = np.zeros_like(frame)\n \n # Take the top left of the frame and apply the background subtractor\n # there \n top_left = frame[0: 50, 0: 50]\n fgmask = backgroundobject.apply(top_left)\n \n # Note the number of pixels that are white, this is the level of \n # disruption.\n switch_thresh = np.sum(fgmask==255)\n \n # If the disruption is greater than background threshold and there has \n # been some time after the previous switch then you. can change the \n # object type.\n if switch_thresh>background_threshold and (time.time()-last_switch) > 1:\n\n # Save the time of the switch. \n last_switch = time.time()\n \n if switch == 'Pen':\n switch = 'Eraser'\n else:\n switch = 'Pen'\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n # If you're reading from memory then load the upper and lower ranges \n # from there\n if load_from_disk:\n lower_range = penval[0]\n upper_range = penval[1]\n \n # Otherwise define your own custom values for upper and lower range.\n else: \n lower_range = np.array([26,80,147])\n upper_range = np.array([81,255,255])\n \n mask = cv2.inRange(hsv, lower_range, upper_range)\n \n # Perform morphological operations to get rid of the noise\n mask = cv2.erode(mask,kernel,iterations = 1)\n mask = cv2.dilate(mask,kernel,iterations = 2)\n \n # Find Contours\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n \n # Make sure there is a contour present and also it size is bigger than \n # noise threshold.\n if contours and cv2.contourArea(max(contours,\n key = cv2.contourArea)) > noiseth:\n \n c = max(contours, key = cv2.contourArea) \n x2,y2,w,h = cv2.boundingRect(c)\n \n # Get the area of the contour\n area = cv2.contourArea(c)\n \n # If there were no previous points then save the detected x2,y2 \n # coordinates as x1,y1. \n if x1 == 0 and y1 == 0:\n x1,y1= x2,y2\n \n else:\n if switch == 'Pen':\n # Draw the line on the canvas\n canvas = cv2.line(canvas, (x1,y1),\n (x2,y2), [255,0,0], 5)\n \n else:\n cv2.circle(canvas, (x2, y2), 20,\n (0,0,0), -1)\n \n \n \n # After the line is drawn the new points become the previous points.\n x1,y1= x2,y2\n \n # Now if the area is greater than the wiper threshold then set the \n # clear variable to True\n if area > wiper_thresh:\n cv2.putText(canvas,'Clearing Canvas',(0,200), \n cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 1, cv2.LINE_AA)\n clear = True \n\n else:\n # If there were no contours detected then make x1,y1 = 0\n x1,y1 =0,0\n \n \n # Now this piece of code is just for smooth drawing. (Optional)\n _ , mask = cv2.threshold(cv2.cvtColor (canvas, cv2.COLOR_BGR2GRAY), 20, \n 255, cv2.THRESH_BINARY)\n foreground = cv2.bitwise_and(canvas, canvas, mask = mask)\n background = cv2.bitwise_and(frame, frame,\n mask = cv2.bitwise_not(mask))\n frame = cv2.add(foreground,background)\n\n # Switch the images depending upon what we're using, pen or eraser.\n if switch != 'Pen':\n cv2.circle(frame, (x1, y1), 20, (255,255,255), -1)\n frame[0: 50, 0: 50] = eraser_img\n else:\n frame[0: 50, 0: 50] = pen_img\n\n cv2.imshow('image',frame)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n \n # Clear the canvas after 1 second, if the clear variable is true\n if clear == True: \n time.sleep(1)\n canvas = None\n \n # And then set clear to false\n clear = False\n \ncv2.destroyAllWindows()\ncap.release()"
] | [
[
"numpy.hstack",
"numpy.array",
"numpy.save"
],
[
"numpy.ones",
"numpy.zeros_like",
"numpy.load",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
123972/PCA-nutricion | [
"aff3c51a71c887c3fa367dbf9d599be5915c80cc",
"aff3c51a71c887c3fa367dbf9d599be5915c80cc",
"aff3c51a71c887c3fa367dbf9d599be5915c80cc",
"aff3c51a71c887c3fa367dbf9d599be5915c80cc"
] | [
"src/pca/todoJunto.py",
"environment/lib/python3.8/site-packages/sklearn/decomposition/tests/test_dict_learning.py",
"environment/lib/python3.8/site-packages/numba/tests/test_ir_inlining.py",
"environment/lib/python3.8/site-packages/seaborn/utils.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\nimport codecs\nimport sys\n\nimport sklearn as sk\nimport pandas as pd\nimport numpy as np \nimport math\n\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\n\nfrom src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf\nfrom src.pca.metodo_potencia_deflation import power_iteration\nfrom src.pca.metodo_potencia_deflation import power_deflation\n\n\ndef PCA_from_sklearn(X):\n \"\"\"\n componentes_principales(X): Función que devuelve las componentes principales.\n \n Parámetros\n ----------\n n_components: número de componentes. \n svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’}\n Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al \n solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing.\n \n Atributos\n ---------\n varianza_explicada: porcentaje de varianza explicada por cada componente.\n valores_singulares: valores singulares correspondientes a cada componente.\n pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos.\n eigenvalues: son los valores propios utilizando la matriz de covarianza.\n \n Método\n ---------\n fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos.\n \"\"\"\n X = pd.DataFrame(X)\n n_components = len(X.columns)\n pca_1 = PCA(n_components, svd_solver='full')\n componentesprincipales_1 = pca_1.fit_transform(X)\n pca_1.components_\n var_exp = pca_1.explained_variance_ratio_\n \n ##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%.\n var_acumulada = var_exp.cumsum()\n conteo = (var_acumulada) < 0.8\n n_componentes = conteo.sum() + 1\n pca = PCA(n_componentes, svd_solver='full')\n componentesprincipales = pca.fit_transform(X)\n pca.components_\n varianza_explicada = pca.explained_variance_ratio_\n eigenvalues = pca.explained_variance_\n val_sing = pca.singular_values_\n \n return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues\n\n\ndef PCA_from_SVD(A):\n \"\"\"\n Función para PCA a partir de la SVD de numpy \n params: A\t\t\tmatriz de datos\n num_componentes \tnúmero de componentes deseados\n\n return: valores_singulares\tLos valores singulares de la descomposición SVD\n\t componentes\t\tLos coeficientes para calcular los componentes principales\n\t Z\t\t\tLos datos transformados (componentes principales)\n\t varianza_explicada\tLa varianza explicada por cada componente principal\n \"\"\"\n \n # Centrar los datos\n A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame\n A_centered = A - A.mean(axis=0)\n \n # Calcular SVD\n U, S, Vt = np.linalg.svd(A_centered, full_matrices=False)\n \n # Los valores singulares\n valores_singulares = S\n \n # Los componentes (coeficientes)\n componentes = ((Vt))\n \n # Los datos transformados (componentes principales)\n Z = [email protected](Vt)\n \n # La varianza explicada\n varianza_explicada = S**2/np.sum(S**2)\n \n # Calcula número de componentes de manera automatica de acuerdo a la variana explicada\n # Threshold de 60%\n n = A.shape[1] #numero de columnas\n varianza_acumulada = varianza_explicada.cumsum()\n conteo = (varianza_acumulada) < 0.8\n num_componentes = conteo.sum() + 1\n \n # regresar 4 objetos\n return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]\n\n\ndef PCA_from_SVD_jacobi(A):\n \"\"\"\n Función para PCA a partir de la SVD \n params: A\t\t\tmatriz de datos\n num_componentes \tnúmero de componentes deseados\n return: valores_singulares\tLos valores singulares de la descomposición SVD\n\t componentes\t\tLos coeficientes para calcular los componentes principales\n\t Z\t\t\tLos datos transformados (componentes principales)\n\t varianza_explicada\tLa varianza explicada por cada componente principal\n \"\"\"\n \n # Centrar los datos\n A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame\n A_centered = A - A.mean(axis=0)\n \n # Modificar esta línea de código, mandar a llamar la función creada por el equipo \n # Calcular SVD\n U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500)\n \n # Los valores singulares\n valores_singulares = S\n \n # Los componentes (coeficientes)\n componentes = ((Vt))\n \n # Los datos transformados (componentes principales)\n Z = [email protected](Vt)\n \n # La varianza explicada\n varianza_explicada = S**2/np.sum(S**2)\n \n # Calcula número de componentes de manera automatica de acuerdo a la variana explicada\n # Threshold de 60%\n n = A.shape[1] #numero de columnas\n varianza_acumulada = varianza_explicada.cumsum()\n conteo = (varianza_acumulada) < 0.8\n num_componentes = conteo.sum() + 1 \n \n # regresar 4 objetos\n return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)]\n\n\ndef PCA_from_QR_vf(data,niter = 450):\n \"\"\"\n Función para PCA a partir de los eigenvectores \n params: data:\t\t\tmatriz de datos\n niter: número de iteraciones máximas \n \n \n return: componentes\t\tLos coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas)\n Z\t\t\tLos datos transformados (componentes principales)\n varianza_explicada\tLa varianza explicada por cada componente principal\n \n Depende de la función: eigenvectores_QR\n \"\"\"\n \n # convertir a array\n A = np.array(data)\n \n # Centrar los datos\n mean_vec = np.mean(A, axis=0)\n datos_centrados = (A - mean_vec)\n\n # Matriz de Covarianzas\n #C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1)\n C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1)\n \n # Calcular algoritmo QR\n E, Q = eigenvectores_eigenvalores_QR_vf(C,niter)\n \n \n # Los componentes (coeficientes)\n componentes = Q.T\n \n # Los datos transformados (componentes principales)\n # Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array\n Z = datos_centrados@Q\n \n # La varianza explicada\n varianza_explicada = E/np.sum(E)\n \n # Calcula número de componentes de manera automatica de acuerdo a la variana explicada\n # Threshold de 60%\n n = data.shape[1] #numero de columnas\n varianza_acumulada = varianza_explicada.cumsum()\n conteo = (varianza_acumulada) < 0.8\n num_componentes = conteo.sum() + 1\n \n # regresar 4 objetos\n return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes\n\ndef PCA_from_potencia(X):\n \"\"\"\n Función que calcula PCA a partir del método de la potencia y deflation de Hotteling \n params: A:\t\t\tmatriz de datos\n \n \n return: eigenvalues\t\tNumpy array con los eigenvectores de A\n eigenvectors\tNumpy array con los correspondientes eigenvectores de A \n \n \"\"\"\n \n prop = 0 # Proporción de varianza explicada\n comp = 1 \n cur_var = 0\n comp_vecs = np.zeros([X.shape[1], X.shape[1]])\n \n # convertir a array\n A = np.array(X)\n \n # Centrar los datos\n mean_vec = np.mean(A, axis=0)\n datos_centrados = (A - mean_vec)\n \n #Calculamos la matriz de covarianzas\n cov = np.dot(X.T, X)/X.shape[0]\n \n #Aplicamos el método de la potencia\n evalues_pow, evectors_pow = power_deflation(cov,2000)\n \n # La varianza explicada\n varianza_explicada = evalues_pow/np.sum(evalues_pow)\n \n # Los datos transformados (componentes principales)\n Z = datos_centrados@evectors_pow\n \n \n # Calcula número de componentes de manera automatica de acuerdo a la variana explicada\n # Threshold de 80%\n n = X.shape[1] #numero de columnas\n varianza_acumulada = varianza_explicada.cumsum()\n conteo = (varianza_acumulada) < 0.8\n num_componentes = conteo.sum() + 1\n \n return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]",
"import pytest\n\nimport numpy as np\nimport itertools\n\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom sklearn.utils import check_array\n\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils._testing import TempMemmap\n\nfrom sklearn.decomposition import DictionaryLearning\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\nfrom sklearn.decomposition import SparseCoder\nfrom sklearn.decomposition import dict_learning\nfrom sklearn.decomposition import dict_learning_online\nfrom sklearn.decomposition import sparse_encode\n\n\nrng_global = np.random.RandomState(0)\nn_samples, n_features = 10, 8\nX = rng_global.randn(n_samples, n_features)\n\n\ndef test_sparse_encode_shapes_omp():\n rng = np.random.RandomState(0)\n algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']\n for n_components, n_samples in itertools.product([1, 5], [1, 9]):\n X_ = rng.randn(n_samples, n_features)\n dictionary = rng.randn(n_components, n_features)\n for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):\n code = sparse_encode(X_, dictionary, algorithm=algorithm,\n n_jobs=n_jobs)\n assert code.shape == (n_samples, n_components)\n\n\ndef test_dict_learning_shapes():\n n_components = 5\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\n assert dico.components_.shape == (n_components, n_features)\n\n n_components = 1\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\n assert dico.components_.shape == (n_components, n_features)\n assert dico.transform(X).shape == (X.shape[0], n_components)\n\n\ndef test_dict_learning_overcomplete():\n n_components = 12\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\n assert dico.components_.shape == (n_components, n_features)\n\n\ndef test_max_iter():\n def ricker_function(resolution, center, width):\n \"\"\"Discrete sub-sampled Ricker (Mexican hat) wavelet\"\"\"\n x = np.linspace(0, resolution - 1, resolution)\n x = ((2 / (np.sqrt(3 * width) * np.pi ** .25))\n * (1 - (x - center) ** 2 / width ** 2)\n * np.exp(-(x - center) ** 2 / (2 * width ** 2)))\n return x\n\n def ricker_matrix(width, resolution, n_components):\n \"\"\"Dictionary of Ricker (Mexican hat) wavelets\"\"\"\n centers = np.linspace(0, resolution - 1, n_components)\n D = np.empty((n_components, resolution))\n for i, center in enumerate(centers):\n D[i] = ricker_function(resolution, center, width)\n D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]\n return D\n\n transform_algorithm = 'lasso_cd'\n resolution = 1024\n subsampling = 3 # subsampling factor\n n_components = resolution // subsampling\n\n # Compute a wavelet dictionary\n D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,\n n_components=n_components // 5)\n for w in (10, 50, 100, 500, 1000))]\n\n X = np.linspace(0, resolution - 1, resolution)\n first_quarter = X < resolution / 4\n X[first_quarter] = 3.\n X[np.logical_not(first_quarter)] = -1.\n X = X.reshape(1, -1)\n\n # check that the underlying model fails to converge\n with pytest.warns(ConvergenceWarning):\n model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,\n transform_max_iter=1)\n model.fit_transform(X)\n\n # check that the underlying model converges w/o warnings\n with pytest.warns(None) as record:\n model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,\n transform_max_iter=2000)\n model.fit_transform(X)\n assert not record.list\n\n\ndef test_dict_learning_lars_positive_parameter():\n n_components = 5\n alpha = 1\n err_msg = \"Positive constraint not supported for 'lars' coding method.\"\n with pytest.raises(ValueError, match=err_msg):\n dict_learning(X, n_components, alpha=alpha, positive_code=True)\n\n\[email protected](\"transform_algorithm\", [\n \"lasso_lars\",\n \"lasso_cd\",\n \"threshold\",\n])\[email protected](\"positive_code\", [False, True])\[email protected](\"positive_dict\", [False, True])\ndef test_dict_learning_positivity(transform_algorithm,\n positive_code,\n positive_dict):\n n_components = 5\n dico = DictionaryLearning(\n n_components, transform_algorithm=transform_algorithm, random_state=0,\n positive_code=positive_code, positive_dict=positive_dict,\n fit_algorithm=\"cd\").fit(X)\n\n code = dico.transform(X)\n if positive_dict:\n assert (dico.components_ >= 0).all()\n else:\n assert (dico.components_ < 0).any()\n if positive_code:\n assert (code >= 0).all()\n else:\n assert (code < 0).any()\n\n\[email protected](\"positive_dict\", [False, True])\ndef test_dict_learning_lars_dict_positivity(positive_dict):\n n_components = 5\n dico = DictionaryLearning(\n n_components, transform_algorithm=\"lars\", random_state=0,\n positive_dict=positive_dict, fit_algorithm=\"cd\").fit(X)\n\n if positive_dict:\n assert (dico.components_ >= 0).all()\n else:\n assert (dico.components_ < 0).any()\n\n\ndef test_dict_learning_lars_code_positivity():\n n_components = 5\n dico = DictionaryLearning(\n n_components, transform_algorithm=\"lars\", random_state=0,\n positive_code=True, fit_algorithm=\"cd\").fit(X)\n\n err_msg = \"Positive constraint not supported for '{}' coding method.\"\n err_msg = err_msg.format(\"lars\")\n with pytest.raises(ValueError, match=err_msg):\n dico.transform(X)\n\n\ndef test_dict_learning_reconstruction():\n n_components = 12\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\n transform_alpha=0.001, random_state=0)\n code = dico.fit(X).transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X)\n\n dico.set_params(transform_algorithm='lasso_lars')\n code = dico.transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\n\n # used to test lars here too, but there's no guarantee the number of\n # nonzero atoms is right.\n\n\ndef test_dict_learning_reconstruction_parallel():\n # regression test that parallel reconstruction works with n_jobs>1\n n_components = 12\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\n transform_alpha=0.001, random_state=0, n_jobs=4)\n code = dico.fit(X).transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X)\n\n dico.set_params(transform_algorithm='lasso_lars')\n code = dico.transform(X)\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\n\n\ndef test_dict_learning_lassocd_readonly_data():\n n_components = 12\n with TempMemmap(X) as X_read_only:\n dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',\n transform_alpha=0.001, random_state=0,\n n_jobs=4)\n with ignore_warnings(category=ConvergenceWarning):\n code = dico.fit(X_read_only).transform(X_read_only)\n assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,\n decimal=2)\n\n\ndef test_dict_learning_nonzero_coefs():\n n_components = 4\n dico = DictionaryLearning(n_components, transform_algorithm='lars',\n transform_n_nonzero_coefs=3, random_state=0)\n code = dico.fit(X).transform(X[np.newaxis, 1])\n assert len(np.flatnonzero(code)) == 3\n\n dico.set_params(transform_algorithm='omp')\n code = dico.transform(X[np.newaxis, 1])\n assert len(np.flatnonzero(code)) == 3\n\n\ndef test_dict_learning_unknown_fit_algorithm():\n n_components = 5\n dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')\n with pytest.raises(ValueError):\n dico.fit(X)\n\n\ndef test_dict_learning_split():\n n_components = 5\n dico = DictionaryLearning(n_components, transform_algorithm='threshold',\n random_state=0)\n code = dico.fit(X).transform(X)\n dico.split_sign = True\n split_code = dico.transform(X)\n\n assert_array_almost_equal(split_code[:, :n_components] -\n split_code[:, n_components:], code)\n\n\ndef test_dict_learning_online_shapes():\n rng = np.random.RandomState(0)\n n_components = 8\n code, dictionary = dict_learning_online(X, n_components=n_components,\n alpha=1, random_state=rng)\n assert code.shape == (n_samples, n_components)\n assert dictionary.shape == (n_components, n_features)\n assert np.dot(code, dictionary).shape == X.shape\n\n\ndef test_dict_learning_online_lars_positive_parameter():\n alpha = 1\n err_msg = \"Positive constraint not supported for 'lars' coding method.\"\n with pytest.raises(ValueError, match=err_msg):\n dict_learning_online(X, alpha=alpha, positive_code=True)\n\n\[email protected](\"transform_algorithm\", [\n \"lasso_lars\",\n \"lasso_cd\",\n \"threshold\",\n])\[email protected](\"positive_code\", [False, True])\[email protected](\"positive_dict\", [False, True])\ndef test_minibatch_dictionary_learning_positivity(transform_algorithm,\n positive_code,\n positive_dict):\n n_components = 8\n dico = MiniBatchDictionaryLearning(\n n_components, transform_algorithm=transform_algorithm, random_state=0,\n positive_code=positive_code, positive_dict=positive_dict,\n fit_algorithm='cd').fit(X)\n\n code = dico.transform(X)\n if positive_dict:\n assert (dico.components_ >= 0).all()\n else:\n assert (dico.components_ < 0).any()\n if positive_code:\n assert (code >= 0).all()\n else:\n assert (code < 0).any()\n\n\[email protected](\"positive_dict\", [False, True])\ndef test_minibatch_dictionary_learning_lars(positive_dict):\n n_components = 8\n\n dico = MiniBatchDictionaryLearning(\n n_components, transform_algorithm=\"lars\", random_state=0,\n positive_dict=positive_dict, fit_algorithm='cd').fit(X)\n\n if positive_dict:\n assert (dico.components_ >= 0).all()\n else:\n assert (dico.components_ < 0).any()\n\n\[email protected](\"positive_code\", [False, True])\[email protected](\"positive_dict\", [False, True])\ndef test_dict_learning_online_positivity(positive_code,\n positive_dict):\n rng = np.random.RandomState(0)\n n_components = 8\n\n code, dictionary = dict_learning_online(X, n_components=n_components,\n method=\"cd\",\n alpha=1, random_state=rng,\n positive_dict=positive_dict,\n positive_code=positive_code)\n if positive_dict:\n assert (dictionary >= 0).all()\n else:\n assert (dictionary < 0).any()\n if positive_code:\n assert (code >= 0).all()\n else:\n assert (code < 0).any()\n\n\ndef test_dict_learning_online_verbosity():\n n_components = 5\n # test verbosity\n from io import StringIO\n import sys\n\n old_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,\n random_state=0)\n dico.fit(X)\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,\n random_state=0)\n dico.fit(X)\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,\n random_state=0)\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,\n random_state=0)\n finally:\n sys.stdout = old_stdout\n\n assert dico.components_.shape == (n_components, n_features)\n\n\ndef test_dict_learning_online_estimator_shapes():\n n_components = 5\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)\n dico.fit(X)\n assert dico.components_.shape == (n_components, n_features)\n\n\ndef test_dict_learning_online_overcomplete():\n n_components = 12\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20,\n random_state=0).fit(X)\n assert dico.components_.shape == (n_components, n_features)\n\n\ndef test_dict_learning_online_initialization():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features)\n dico = MiniBatchDictionaryLearning(n_components, n_iter=0,\n dict_init=V, random_state=0).fit(X)\n assert_array_equal(dico.components_, V)\n\n\ndef test_dict_learning_online_readonly_initialization():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features)\n V.setflags(write=False)\n MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,\n random_state=0, shuffle=False).fit(X)\n\n\ndef test_dict_learning_online_partial_fit():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),\n batch_size=1,\n alpha=1, shuffle=False, dict_init=V,\n random_state=0).fit(X)\n dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,\n n_iter=1, dict_init=V,\n random_state=0)\n for i in range(10):\n for sample in X:\n dict2.partial_fit(sample[np.newaxis, :])\n\n assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)\n assert_array_almost_equal(dict1.components_, dict2.components_,\n decimal=2)\n\n\ndef test_sparse_encode_shapes():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):\n code = sparse_encode(X, V, algorithm=algo)\n assert code.shape == (n_samples, n_components)\n\n\[email protected](\"algo\", [\n 'lasso_lars',\n 'lasso_cd',\n 'threshold'\n])\[email protected](\"positive\", [False, True])\ndef test_sparse_encode_positivity(algo, positive):\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n code = sparse_encode(X, V, algorithm=algo, positive=positive)\n if positive:\n assert (code >= 0).all()\n else:\n assert (code < 0).any()\n\n\[email protected](\"algo\", ['lars', 'omp'])\ndef test_sparse_encode_unavailable_positivity(algo):\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n err_msg = \"Positive constraint not supported for '{}' coding method.\"\n err_msg = err_msg.format(algo)\n with pytest.raises(ValueError, match=err_msg):\n sparse_encode(X, V, algorithm=algo, positive=True)\n\n\ndef test_sparse_encode_input():\n n_components = 100\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n Xf = check_array(X, order='F')\n for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):\n a = sparse_encode(X, V, algorithm=algo)\n b = sparse_encode(Xf, V, algorithm=algo)\n assert_array_almost_equal(a, b)\n\n\ndef test_sparse_encode_error():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n code = sparse_encode(X, V, alpha=0.001)\n assert not np.all(code == 0)\n assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1\n\n\ndef test_sparse_encode_error_default_sparsity():\n rng = np.random.RandomState(0)\n X = rng.randn(100, 64)\n D = rng.randn(2, 64)\n code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',\n n_nonzero_coefs=None)\n assert code.shape == (100, 2)\n\n\ndef test_unknown_method():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n with pytest.raises(ValueError):\n sparse_encode(X, V, algorithm=\"<unknown>\")\n\n\ndef test_sparse_coder_estimator():\n n_components = 12\n rng = np.random.RandomState(0)\n V = rng.randn(n_components, n_features) # random init\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\n code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',\n transform_alpha=0.001).transform(X)\n assert not np.all(code == 0)\n assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1\n\n\ndef test_sparse_coder_parallel_mmap():\n # Non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/5956\n # Test that SparseCoder does not error by passing reading only\n # arrays to child processes\n\n rng = np.random.RandomState(777)\n n_components, n_features = 40, 64\n init_dict = rng.rand(n_components, n_features)\n # Ensure that `data` is >2M. Joblib memory maps arrays\n # if they are larger than 1MB. The 4 accounts for float32\n # data type\n n_samples = int(2e6) // (4 * n_features)\n data = np.random.rand(n_samples, n_features).astype(np.float32)\n\n sc = SparseCoder(init_dict, transform_algorithm='omp', n_jobs=2)\n sc.fit_transform(data)\n\n\ndef test_sparse_coder_n_features_in():\n d = np.array([[1, 2, 3], [1, 2, 3]])\n sc = SparseCoder(d)\n assert sc.n_features_in_ == d.shape[1]\n",
"\"\"\"\nThis tests the inline kwarg to @jit and @overload etc, it has nothing to do with\nLLVM or low level inlining.\n\"\"\"\n\n\nimport numpy as np\n\nfrom numba import njit, typeof\nfrom numba.core import types, ir, ir_utils\nfrom numba.core.extending import (\n overload,\n overload_method,\n overload_attribute,\n register_model,\n typeof_impl,\n unbox,\n NativeValue,\n register_jitable,\n)\nfrom numba.core.datamodel.models import OpaqueModel\nfrom numba.core.cpu import InlineOptions\nfrom numba.core.compiler import DefaultPassBuilder, CompilerBase\nfrom numba.core.typed_passes import IRLegalization\nfrom numba.core.untyped_passes import PreserveIR\nfrom itertools import product\nfrom numba.tests.support import (TestCase, unittest, skip_py38_or_later,\n MemoryLeakMixin)\n\n\nclass InlineTestPipeline(CompilerBase):\n \"\"\" Same as the standard pipeline, but preserves the func_ir into the\n metadata store\"\"\"\n\n def define_pipelines(self):\n pipeline = DefaultPassBuilder.define_nopython_pipeline(\n self.state, \"inliner_custom_pipe\")\n # mangle the default pipeline and inject DCE and IR preservation ahead\n # of legalisation\n\n # TODO: add a way to not do this! un-finalizing is not a good idea\n pipeline._finalized = False\n pipeline.add_pass_after(PreserveIR, IRLegalization)\n\n pipeline.finalize()\n return [pipeline]\n\n\n# this global has the same name as the global in inlining_usecases.py, it\n# is here to check that inlined functions bind to their own globals\n_GLOBAL1 = -50\n\n\n@njit(inline='always')\ndef _global_func(x):\n return x + 1\n\n\n# to be overloaded\ndef _global_defn(x):\n return x + 1\n\n\n@overload(_global_defn, inline='always')\ndef _global_overload(x):\n return _global_defn\n\n\nclass InliningBase(TestCase):\n\n _DEBUG = False\n\n inline_opt_as_bool = {'always': True, 'never': False}\n\n # --------------------------------------------------------------------------\n # Example cost model\n\n def sentinel_17_cost_model(self, func_ir):\n # sentinel 17 cost model, this is a fake cost model that will return\n # True (i.e. inline) if the ir.FreeVar(17) is found in the func_ir,\n for blk in func_ir.blocks.values():\n for stmt in blk.body:\n if isinstance(stmt, ir.Assign):\n if isinstance(stmt.value, ir.FreeVar):\n if stmt.value.value == 17:\n return True\n return False\n\n # --------------------------------------------------------------------------\n\n def check(self, test_impl, *args, **kwargs):\n inline_expect = kwargs.pop('inline_expect', None)\n assert inline_expect\n block_count = kwargs.pop('block_count', 1)\n assert not kwargs\n for k, v in inline_expect.items():\n assert isinstance(k, str)\n assert isinstance(v, bool)\n\n j_func = njit(pipeline_class=InlineTestPipeline)(test_impl)\n\n # check they produce the same answer first!\n self.assertEqual(test_impl(*args), j_func(*args))\n\n # make sure IR doesn't have branches\n fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir']\n fir.blocks = ir_utils.simplify_CFG(fir.blocks)\n if self._DEBUG:\n print(\"FIR\".center(80, \"-\"))\n fir.dump()\n if block_count != 'SKIP':\n self.assertEqual(len(fir.blocks), block_count)\n block = next(iter(fir.blocks.values()))\n\n # if we don't expect the function to be inlined then make sure there is\n # 'call' present still\n exprs = [x for x in block.find_exprs()]\n assert exprs\n for k, v in inline_expect.items():\n found = False\n for expr in exprs:\n if getattr(expr, 'op', False) == 'call':\n func_defn = fir.get_definition(expr.func)\n found |= func_defn.name == k\n self.assertFalse(found == v)\n\n return fir # for use in further analysis\n\n\n# used in _gen_involved\n_GLOBAL = 1234\n\n\ndef _gen_involved():\n _FREEVAR = 0xCAFE\n\n def foo(a, b, c=12, d=1j, e=None):\n f = a + b\n a += _FREEVAR\n g = np.zeros(c, dtype=np.complex64)\n h = f + g\n i = 1j / d\n # For SSA, zero init, n and t\n n = 0\n t = 0\n if np.abs(i) > 0:\n k = h / i\n l = np.arange(1, c + 1)\n m = np.sqrt(l - g) + e * k\n if np.abs(m[0]) < 1:\n for o in range(a):\n n += 0\n if np.abs(n) < 3:\n break\n n += m[2]\n p = g / l\n q = []\n for r in range(len(p)):\n q.append(p[r])\n if r > 4 + 1:\n s = 123\n t = 5\n if s > 122 - c:\n t += s\n t += q[0] + _GLOBAL\n\n return f + o + r + t + r + a + n\n\n return foo\n\n\nclass TestFunctionInlining(MemoryLeakMixin, InliningBase):\n\n def test_basic_inline_never(self):\n @njit(inline='never')\n def foo():\n return\n\n def impl():\n return foo()\n self.check(impl, inline_expect={'foo': False})\n\n def test_basic_inline_always(self):\n @njit(inline='always')\n def foo():\n return\n\n def impl():\n return foo()\n self.check(impl, inline_expect={'foo': True})\n\n def test_basic_inline_combos(self):\n\n def impl():\n x = foo()\n y = bar()\n z = baz()\n return x, y, z\n\n opts = (('always'), ('never'))\n\n for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):\n\n @njit(inline=inline_foo)\n def foo():\n return\n\n @njit(inline=inline_bar)\n def bar():\n return\n\n @njit(inline=inline_baz)\n def baz():\n return\n\n inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],\n 'bar': self.inline_opt_as_bool[inline_bar],\n 'baz': self.inline_opt_as_bool[inline_baz]}\n self.check(impl, inline_expect=inline_expect)\n\n @unittest.skip(\"Need to work out how to prevent this\")\n def test_recursive_inline(self):\n\n @njit(inline='always')\n def foo(x):\n if x == 0:\n return 12\n else:\n foo(x - 1)\n\n a = 3\n\n def impl():\n b = 0\n if a > 1:\n b += 1\n foo(5)\n if b < a:\n b -= 1\n\n self.check(impl, inline_expect={'foo': True})\n\n def test_freevar_bindings(self):\n\n def factory(inline, x, y):\n z = x + 12\n @njit(inline=inline)\n def func():\n return (x, y + 3, z)\n return func\n\n def impl():\n x = foo()\n y = bar()\n z = baz()\n return x, y, z\n\n opts = (('always'), ('never'))\n\n for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):\n\n foo = factory(inline_foo, 10, 20)\n bar = factory(inline_bar, 30, 40)\n baz = factory(inline_baz, 50, 60)\n\n inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],\n 'bar': self.inline_opt_as_bool[inline_bar],\n 'baz': self.inline_opt_as_bool[inline_baz]}\n self.check(impl, inline_expect=inline_expect)\n\n def test_global_binding(self):\n\n def impl():\n x = 19\n return _global_func(x)\n\n self.check(impl, inline_expect={'_global_func': True})\n\n def test_inline_from_another_module(self):\n\n from .inlining_usecases import bar\n\n def impl():\n z = _GLOBAL1 + 2\n return bar(), z\n\n self.check(impl, inline_expect={'bar': True})\n\n def test_inline_from_another_module_w_getattr(self):\n\n import numba.tests.inlining_usecases as iuc\n\n def impl():\n z = _GLOBAL1 + 2\n return iuc.bar(), z\n\n self.check(impl, inline_expect={'bar': True})\n\n def test_inline_from_another_module_w_2_getattr(self):\n\n import numba.tests.inlining_usecases # noqa forces registration\n import numba.tests as nt\n\n def impl():\n z = _GLOBAL1 + 2\n return nt.inlining_usecases.bar(), z\n\n self.check(impl, inline_expect={'bar': True})\n\n def test_inline_from_another_module_as_freevar(self):\n\n def factory():\n from .inlining_usecases import bar\n @njit(inline='always')\n def tmp():\n return bar()\n return tmp\n\n baz = factory()\n\n def impl():\n z = _GLOBAL1 + 2\n return baz(), z\n\n self.check(impl, inline_expect={'bar': True})\n\n def test_inline_w_freevar_from_another_module(self):\n\n from .inlining_usecases import baz_factory\n\n def gen(a, b):\n bar = baz_factory(a)\n\n def impl():\n z = _GLOBAL1 + a * b\n return bar(), z, a\n return impl\n\n impl = gen(10, 20)\n self.check(impl, inline_expect={'bar': True})\n\n def test_inlining_models(self):\n\n def s17_caller_model(expr, caller_info, callee_info):\n self.assertIsInstance(expr, ir.Expr)\n self.assertEqual(expr.op, \"call\")\n return self.sentinel_17_cost_model(caller_info)\n\n def s17_callee_model(expr, caller_info, callee_info):\n self.assertIsInstance(expr, ir.Expr)\n self.assertEqual(expr.op, \"call\")\n return self.sentinel_17_cost_model(callee_info)\n\n # caller has sentinel\n for caller, callee in ((11, 17), (17, 11)):\n\n @njit(inline=s17_caller_model)\n def foo():\n return callee\n\n def impl(z):\n x = z + caller\n y = foo()\n return y + 3, x\n\n self.check(impl, 10, inline_expect={'foo': caller == 17})\n\n # callee has sentinel\n for caller, callee in ((11, 17), (17, 11)):\n\n @njit(inline=s17_callee_model)\n def bar():\n return callee\n\n def impl(z):\n x = z + caller\n y = bar()\n return y + 3, x\n\n self.check(impl, 10, inline_expect={'bar': callee == 17})\n\n def test_inline_inside_loop(self):\n @njit(inline='always')\n def foo():\n return 12\n\n def impl():\n acc = 0.0\n for i in range(5):\n acc += foo()\n return acc\n\n self.check(impl, inline_expect={'foo': True}, block_count=4)\n\n def test_inline_inside_closure_inside_loop(self):\n @njit(inline='always')\n def foo():\n return 12\n\n def impl():\n acc = 0.0\n for i in range(5):\n def bar():\n return foo() + 7\n acc += bar()\n return acc\n\n self.check(impl, inline_expect={'foo': True}, block_count=4)\n\n def test_inline_closure_inside_inlinable_inside_closure(self):\n @njit(inline='always')\n def foo(a):\n def baz():\n return 12 + a\n return baz() + 8\n\n def impl():\n z = 9\n\n def bar(x):\n return foo(z) + 7 + x\n return bar(z + 2)\n\n self.check(impl, inline_expect={'foo': True}, block_count=1)\n\n @skip_py38_or_later\n def test_inline_involved(self):\n\n fortran = njit(inline='always')(_gen_involved())\n\n @njit(inline='always')\n def boz(j):\n acc = 0\n\n def biz(t):\n return t + acc\n for x in range(j):\n acc += biz(8 + acc) + fortran(2., acc, 1, 12j, biz(acc))\n return acc\n\n @njit(inline='always')\n def foo(a):\n acc = 0\n for p in range(12):\n tmp = fortran(1, 1, 1, 1, 1)\n\n def baz(x):\n return 12 + a + x + tmp\n acc += baz(p) + 8 + boz(p) + tmp\n return acc + baz(2)\n\n def impl():\n z = 9\n\n def bar(x):\n return foo(z) + 7 + x\n return bar(z + 2)\n\n self.check(impl, inline_expect={'foo': True, 'boz': True,\n 'fortran': True}, block_count=37)\n\n\nclass TestRegisterJitableInlining(MemoryLeakMixin, InliningBase):\n\n def test_register_jitable_inlines(self):\n\n @register_jitable(inline='always')\n def foo():\n return 1\n\n def impl():\n foo()\n\n self.check(impl, inline_expect={'foo': True})\n\n\nclass TestOverloadInlining(MemoryLeakMixin, InliningBase):\n\n def test_basic_inline_never(self):\n def foo():\n pass\n\n @overload(foo, inline='never')\n def foo_overload():\n def foo_impl():\n pass\n return foo_impl\n\n def impl():\n return foo()\n\n self.check(impl, inline_expect={'foo': False})\n\n def test_basic_inline_always(self):\n\n def foo():\n pass\n\n @overload(foo, inline='always')\n def foo_overload():\n def impl():\n pass\n return impl\n\n def impl():\n return foo()\n\n self.check(impl, inline_expect={'foo': True})\n\n def test_inline_always_kw_no_default(self):\n # pass call arg by name that doesn't have default value\n def foo(a, b):\n return a + b\n\n @overload(foo, inline='always')\n def overload_foo(a, b):\n return lambda a, b: a + b\n\n def impl():\n return foo(3, b=4)\n\n self.check(impl, inline_expect={'foo': True})\n\n def test_inline_stararg_error(self):\n def foo(a, *b):\n return a + b[0]\n\n @overload(foo, inline='always')\n def overload_foo(a, *b):\n return lambda a, *b: a + b[0]\n\n def impl():\n return foo(3, 3, 5)\n\n with self.assertRaises(NotImplementedError) as e:\n self.check(impl, inline_expect={'foo': True})\n\n self.assertIn(\"Stararg not supported in inliner for arg 1 *b\",\n str(e.exception))\n\n def test_basic_inline_combos(self):\n\n def impl():\n x = foo()\n y = bar()\n z = baz()\n return x, y, z\n\n opts = (('always'), ('never'))\n\n for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):\n\n def foo():\n pass\n\n def bar():\n pass\n\n def baz():\n pass\n\n @overload(foo, inline=inline_foo)\n def foo_overload():\n def impl():\n return\n return impl\n\n @overload(bar, inline=inline_bar)\n def bar_overload():\n def impl():\n return\n return impl\n\n @overload(baz, inline=inline_baz)\n def baz_overload():\n def impl():\n return\n return impl\n\n inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],\n 'bar': self.inline_opt_as_bool[inline_bar],\n 'baz': self.inline_opt_as_bool[inline_baz]}\n self.check(impl, inline_expect=inline_expect)\n\n def test_freevar_bindings(self):\n\n def impl():\n x = foo()\n y = bar()\n z = baz()\n return x, y, z\n\n opts = (('always'), ('never'))\n\n for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):\n # need to repeatedly clobber definitions of foo, bar, baz so\n # @overload binds to the right instance WRT inlining\n\n def foo():\n x = 10\n y = 20\n z = x + 12\n return (x, y + 3, z)\n\n def bar():\n x = 30\n y = 40\n z = x + 12\n return (x, y + 3, z)\n\n def baz():\n x = 60\n y = 80\n z = x + 12\n return (x, y + 3, z)\n\n def factory(target, x, y, inline=None):\n z = x + 12\n @overload(target, inline=inline)\n def func():\n def impl():\n return (x, y + 3, z)\n return impl\n\n factory(foo, 10, 20, inline=inline_foo)\n factory(bar, 30, 40, inline=inline_bar)\n factory(baz, 60, 80, inline=inline_baz)\n\n inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],\n 'bar': self.inline_opt_as_bool[inline_bar],\n 'baz': self.inline_opt_as_bool[inline_baz]}\n\n self.check(impl, inline_expect=inline_expect)\n\n def test_global_overload_binding(self):\n\n def impl():\n z = 19\n return _global_defn(z)\n\n self.check(impl, inline_expect={'_global_defn': True})\n\n def test_inline_from_another_module(self):\n\n from .inlining_usecases import baz\n\n def impl():\n z = _GLOBAL1 + 2\n return baz(), z\n\n self.check(impl, inline_expect={'baz': True})\n\n def test_inline_from_another_module_w_getattr(self):\n\n import numba.tests.inlining_usecases as iuc\n\n def impl():\n z = _GLOBAL1 + 2\n return iuc.baz(), z\n\n self.check(impl, inline_expect={'baz': True})\n\n def test_inline_from_another_module_w_2_getattr(self):\n\n import numba.tests.inlining_usecases # noqa forces registration\n import numba.tests as nt\n\n def impl():\n z = _GLOBAL1 + 2\n return nt.inlining_usecases.baz(), z\n\n self.check(impl, inline_expect={'baz': True})\n\n def test_inline_from_another_module_as_freevar(self):\n\n def factory():\n from .inlining_usecases import baz\n @njit(inline='always')\n def tmp():\n return baz()\n return tmp\n\n bop = factory()\n\n def impl():\n z = _GLOBAL1 + 2\n return bop(), z\n\n self.check(impl, inline_expect={'baz': True})\n\n def test_inline_w_freevar_from_another_module(self):\n\n from .inlining_usecases import bop_factory\n\n def gen(a, b):\n bar = bop_factory(a)\n\n def impl():\n z = _GLOBAL1 + a * b\n return bar(), z, a\n return impl\n\n impl = gen(10, 20)\n self.check(impl, inline_expect={'bar': True})\n\n def test_inlining_models(self):\n\n def s17_caller_model(expr, caller_info, callee_info):\n self.assertIsInstance(expr, ir.Expr)\n self.assertEqual(expr.op, \"call\")\n return self.sentinel_17_cost_model(caller_info.func_ir)\n\n def s17_callee_model(expr, caller_info, callee_info):\n self.assertIsInstance(expr, ir.Expr)\n self.assertEqual(expr.op, \"call\")\n return self.sentinel_17_cost_model(callee_info.func_ir)\n\n # caller has sentinel\n for caller, callee in ((10, 11), (17, 11)):\n\n def foo():\n return callee\n\n @overload(foo, inline=s17_caller_model)\n def foo_ol():\n def impl():\n return callee\n return impl\n\n def impl(z):\n x = z + caller\n y = foo()\n return y + 3, x\n\n self.check(impl, 10, inline_expect={'foo': caller == 17})\n\n # callee has sentinel\n for caller, callee in ((11, 17), (11, 10)):\n\n def bar():\n return callee\n\n @overload(bar, inline=s17_callee_model)\n def bar_ol():\n def impl():\n return callee\n return impl\n\n def impl(z):\n x = z + caller\n y = bar()\n return y + 3, x\n\n self.check(impl, 10, inline_expect={'bar': callee == 17})\n\n def test_multiple_overloads_with_different_inline_characteristics(self):\n # check that having different inlining options for different overloads\n # of the same function works ok\n\n # this is the Python equiv of the overloads below\n def bar(x):\n if isinstance(typeof(x), types.Float):\n return x + 1234\n else:\n return x + 1\n\n @overload(bar, inline='always')\n def bar_int_ol(x):\n if isinstance(x, types.Integer):\n def impl(x):\n return x + 1\n return impl\n\n @overload(bar, inline='never')\n def bar_float_ol(x):\n if isinstance(x, types.Float):\n def impl(x):\n return x + 1234\n return impl\n\n def always_inline_cost_model(*args):\n return True\n\n @overload(bar, inline=always_inline_cost_model)\n def bar_complex_ol(x):\n if isinstance(x, types.Complex):\n def impl(x):\n return x + 1\n return impl\n\n def impl():\n a = bar(1) # integer literal, should inline\n b = bar(2.3) # float literal, should not inline\n # complex literal, should inline by virtue of cost model\n c = bar(3j)\n return a + b + c\n\n # there should still be a `bar` not inlined\n fir = self.check(impl, inline_expect={'bar': False}, block_count=1)\n\n # check there is one call left in the IR\n block = next(iter(fir.blocks.items()))[1]\n calls = [x for x in block.find_exprs(op='call')]\n self.assertTrue(len(calls) == 1)\n\n # check that the constant \"1234\" is not in the IR\n consts = [x.value for x in block.find_insts(ir.Assign)\n if isinstance(getattr(x, 'value', None), ir.Const)]\n for val in consts:\n self.assertNotEqual(val.value, 1234)\n\n\nclass TestOverloadMethsAttrsInlining(InliningBase):\n def setUp(self):\n # Use test_id to makesure no collision is possible.\n test_id = self.id()\n DummyType = type('DummyTypeFor{}'.format(test_id), (types.Opaque,), {})\n\n dummy_type = DummyType(\"my_dummy\")\n register_model(DummyType)(OpaqueModel)\n\n class Dummy(object):\n pass\n\n @typeof_impl.register(Dummy)\n def typeof_Dummy(val, c):\n return dummy_type\n\n @unbox(DummyType)\n def unbox_index(typ, obj, c):\n return NativeValue(c.context.get_dummy_value())\n\n self.Dummy = Dummy\n self.DummyType = DummyType\n\n def check_method(self, test_impl, args, expected, block_count,\n expects_inlined=True):\n j_func = njit(pipeline_class=InlineTestPipeline)(test_impl)\n # check they produce the same answer first!\n self.assertEqual(j_func(*args), expected)\n\n # make sure IR doesn't have branches\n fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir']\n fir.blocks = fir.blocks\n self.assertEqual(len(fir.blocks), block_count)\n if expects_inlined:\n # assert no calls\n for block in fir.blocks.values():\n calls = list(block.find_exprs('call'))\n self.assertFalse(calls)\n else:\n # assert has call\n allcalls = []\n for block in fir.blocks.values():\n allcalls += list(block.find_exprs('call'))\n self.assertTrue(allcalls)\n\n def check_getattr(self, test_impl, args, expected, block_count,\n expects_inlined=True):\n j_func = njit(pipeline_class=InlineTestPipeline)(test_impl)\n # check they produce the same answer first!\n self.assertEqual(j_func(*args), expected)\n\n # make sure IR doesn't have branches\n fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir']\n fir.blocks = fir.blocks\n self.assertEqual(len(fir.blocks), block_count)\n if expects_inlined:\n # assert no getattr\n for block in fir.blocks.values():\n getattrs = list(block.find_exprs('getattr'))\n self.assertFalse(getattrs)\n else:\n # assert has getattr\n allgetattrs = []\n for block in fir.blocks.values():\n allgetattrs += list(block.find_exprs('getattr'))\n self.assertTrue(allgetattrs)\n\n def test_overload_method_default_args_always(self):\n @overload_method(self.DummyType, \"inline_method\", inline='always')\n def _get_inlined_method(obj, val=None, val2=None):\n def get(obj, val=None, val2=None):\n return (\"THIS IS INLINED\", val, val2)\n return get\n\n def foo(obj):\n return obj.inline_method(123), obj.inline_method(val2=321)\n\n self.check_method(\n test_impl=foo,\n args=[self.Dummy()],\n expected=((\"THIS IS INLINED\", 123, None),\n (\"THIS IS INLINED\", None, 321)),\n block_count=1,\n )\n\n def make_overload_method_test(self, costmodel, should_inline):\n def costmodel(*args):\n return should_inline\n\n @overload_method(self.DummyType, \"inline_method\", inline=costmodel)\n def _get_inlined_method(obj, val):\n def get(obj, val):\n return (\"THIS IS INLINED!!!\", val)\n return get\n\n def foo(obj):\n return obj.inline_method(123)\n\n self.check_method(\n test_impl=foo,\n args=[self.Dummy()],\n expected=(\"THIS IS INLINED!!!\", 123),\n block_count=1,\n expects_inlined=should_inline,\n )\n\n def test_overload_method_cost_driven_always(self):\n self.make_overload_method_test(\n costmodel='always',\n should_inline=True,\n )\n\n def test_overload_method_cost_driven_never(self):\n self.make_overload_method_test(\n costmodel='never',\n should_inline=False,\n )\n\n def test_overload_method_cost_driven_must_inline(self):\n self.make_overload_method_test(\n costmodel=lambda *args: True,\n should_inline=True,\n )\n\n def test_overload_method_cost_driven_no_inline(self):\n self.make_overload_method_test(\n costmodel=lambda *args: False,\n should_inline=False,\n )\n\n def make_overload_attribute_test(self, costmodel, should_inline):\n @overload_attribute(self.DummyType, \"inlineme\", inline=costmodel)\n def _get_inlineme(obj):\n def get(obj):\n return \"MY INLINED ATTRS\"\n return get\n\n def foo(obj):\n return obj.inlineme\n\n self.check_getattr(\n test_impl=foo,\n args=[self.Dummy()],\n expected=\"MY INLINED ATTRS\",\n block_count=1,\n expects_inlined=should_inline,\n )\n\n def test_overload_attribute_always(self):\n self.make_overload_attribute_test(\n costmodel='always',\n should_inline=True,\n )\n\n def test_overload_attribute_never(self):\n self.make_overload_attribute_test(\n costmodel='never',\n should_inline=False,\n )\n\n def test_overload_attribute_costmodel_must_inline(self):\n self.make_overload_attribute_test(\n costmodel=lambda *args: True,\n should_inline=True,\n )\n\n def test_overload_attribute_costmodel_no_inline(self):\n self.make_overload_attribute_test(\n costmodel=lambda *args: False,\n should_inline=False,\n )\n\n\nclass TestGeneralInlining(MemoryLeakMixin, InliningBase):\n\n def test_with_inlined_and_noninlined_variants(self):\n # This test is contrived and was to demonstrate fixing a bug in the\n # template walking logic where inlinable and non-inlinable definitions\n # would not mix.\n\n @overload(len, inline='always')\n def overload_len(A):\n if False:\n return lambda A: 10\n\n def impl():\n return len([2, 3, 4])\n\n # len(list) won't be inlined because the overload above doesn't apply\n self.check(impl, inline_expect={'len': False})\n\n def test_with_kwargs(self):\n\n def foo(a, b=3, c=5):\n return a + b + c\n\n @overload(foo, inline='always')\n def overload_foo(a, b=3, c=5):\n def impl(a, b=3, c=5):\n return a + b + c\n return impl\n\n def impl():\n return foo(3, c=10)\n\n self.check(impl, inline_expect={'foo': True})\n\n def test_with_kwargs2(self):\n\n @njit(inline='always')\n def bar(a, b=12, c=9):\n return a + b\n\n def impl(a, b=7, c=5):\n return bar(a + b, c=19)\n\n self.check(impl, 3, 4, inline_expect={'bar': True})\n\n def test_inlining_optional_constant(self):\n # This testcase causes `b` to be a Optional(bool) constant once it is\n # inlined into foo().\n @njit(inline='always')\n def bar(a=None, b=None):\n if b is None:\n b = 123 # this changes the type of `b` due to lack of SSA\n return (a, b)\n\n def impl():\n return bar(), bar(123), bar(b=321)\n\n self.check(impl, block_count='SKIP', inline_expect={'bar': True})\n\n\nclass TestInlineOptions(TestCase):\n\n def test_basic(self):\n always = InlineOptions('always')\n self.assertTrue(always.is_always_inline)\n self.assertFalse(always.is_never_inline)\n self.assertFalse(always.has_cost_model)\n self.assertEqual(always.value, 'always')\n\n never = InlineOptions('never')\n self.assertFalse(never.is_always_inline)\n self.assertTrue(never.is_never_inline)\n self.assertFalse(never.has_cost_model)\n self.assertEqual(never.value, 'never')\n\n def cost_model(x):\n return x\n model = InlineOptions(cost_model)\n self.assertFalse(model.is_always_inline)\n self.assertFalse(model.is_never_inline)\n self.assertTrue(model.has_cost_model)\n self.assertIs(model.value, cost_model)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Small plotting-related utility functions.\"\"\"\nimport colorsys\nimport os\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.colors as mplcol\nimport matplotlib.pyplot as plt\n\nimport warnings\nfrom urllib.request import urlopen, urlretrieve\nfrom http.client import HTTPException\n\n\n__all__ = [\"desaturate\", \"saturate\", \"set_hls_values\",\n \"despine\", \"get_dataset_names\", \"get_data_home\", \"load_dataset\"]\n\n\ndef remove_na(arr):\n \"\"\"Helper method for removing NA values from array-like.\n\n Parameters\n ----------\n arr : array-like\n The array-like from which to remove NA values.\n\n Returns\n -------\n clean_arr : array-like\n The original array with NA values removed.\n\n \"\"\"\n return arr[pd.notnull(arr)]\n\n\ndef sort_df(df, *args, **kwargs):\n \"\"\"Wrapper to handle different pandas sorting API pre/post 0.17.\"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg)\n try:\n return df.sort_values(*args, **kwargs)\n except AttributeError:\n return df.sort(*args, **kwargs)\n\n\ndef ci_to_errsize(cis, heights):\n \"\"\"Convert intervals to error arguments relative to plot heights.\n\n Parameters\n ----------\n cis: 2 x n sequence\n sequence of confidence interval limits\n heights : n sequence\n sequence of plot heights\n\n Returns\n -------\n errsize : 2 x n array\n sequence of error size relative to height values in correct\n format as argument for plt.bar\n\n \"\"\"\n cis = np.atleast_2d(cis).reshape(2, -1)\n heights = np.atleast_1d(heights)\n errsize = []\n for i, (low, high) in enumerate(np.transpose(cis)):\n h = heights[i]\n elow = h - low\n ehigh = high - h\n errsize.append([elow, ehigh])\n\n errsize = np.asarray(errsize).T\n return errsize\n\n\ndef pmf_hist(a, bins=10):\n \"\"\"Return arguments to plt.bar for pmf-like histogram of an array.\n\n DEPRECATED: will be removed in a future version.\n\n Parameters\n ----------\n a: array-like\n array to make histogram of\n bins: int\n number of bins\n\n Returns\n -------\n x: array\n left x position of bars\n h: array\n height of bars\n w: float\n width of bars\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg)\n n, x = np.histogram(a, bins)\n h = n / n.sum()\n w = x[1] - x[0]\n return x[:-1], h, w\n\n\ndef desaturate(color, prop):\n \"\"\"Decrease the saturation channel of a color by some percent.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n prop : float\n saturation channel of color will be multiplied by this value\n\n Returns\n -------\n new_color : rgb tuple\n desaturated color code in RGB tuple representation\n\n \"\"\"\n # Check inputs\n if not 0 <= prop <= 1:\n raise ValueError(\"prop must be between 0 and 1\")\n\n # Get rgb tuple rep\n rgb = mplcol.colorConverter.to_rgb(color)\n\n # Convert to hls\n h, l, s = colorsys.rgb_to_hls(*rgb)\n\n # Desaturate the saturation channel\n s *= prop\n\n # Convert back to rgb\n new_color = colorsys.hls_to_rgb(h, l, s)\n\n return new_color\n\n\ndef saturate(color):\n \"\"\"Return a fully saturated color with the same hue.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n\n Returns\n -------\n new_color : rgb tuple\n saturated color code in RGB tuple representation\n\n \"\"\"\n return set_hls_values(color, s=1)\n\n\ndef set_hls_values(color, h=None, l=None, s=None): # noqa\n \"\"\"Independently manipulate the h, l, or s channels of a color.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n h, l, s : floats between 0 and 1, or None\n new values for each channel in hls space\n\n Returns\n -------\n new_color : rgb tuple\n new color code in RGB tuple representation\n\n \"\"\"\n # Get an RGB tuple representation\n rgb = mplcol.colorConverter.to_rgb(color)\n vals = list(colorsys.rgb_to_hls(*rgb))\n for i, val in enumerate([h, l, s]):\n if val is not None:\n vals[i] = val\n\n rgb = colorsys.hls_to_rgb(*vals)\n return rgb\n\n\ndef axlabel(xlabel, ylabel, **kwargs):\n \"\"\"Grab current axis and label it.\"\"\"\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n\n\ndef despine(fig=None, ax=None, top=True, right=True, left=False,\n bottom=False, offset=None, trim=False):\n \"\"\"Remove the top and right spines from plot(s).\n\n fig : matplotlib figure, optional\n Figure to despine all axes of, default uses current figure.\n ax : matplotlib axes, optional\n Specific axes object to despine.\n top, right, left, bottom : boolean, optional\n If True, remove that spine.\n offset : int or dict, optional\n Absolute distance, in points, spines should be moved away\n from the axes (negative values move spines inward). A single value\n applies to all spines; a dict can be used to set offset values per\n side.\n trim : bool, optional\n If True, limit spines to the smallest and largest major tick\n on each non-despined axis.\n\n Returns\n -------\n None\n\n \"\"\"\n # Get references to the axes we want\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n # Toggle the spine objects\n is_visible = not locals()[side]\n ax_i.spines[side].set_visible(is_visible)\n if offset is not None and is_visible:\n try:\n val = offset.get(side, 0)\n except AttributeError:\n val = offset\n ax_i.spines[side].set_position(('outward', val))\n\n # Potentially move the ticks\n if left and not right:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.minorTicks\n )\n ax_i.yaxis.set_ticks_position(\"right\")\n for t in ax_i.yaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.yaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if bottom and not top:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.minorTicks\n )\n ax_i.xaxis.set_ticks_position(\"top\")\n for t in ax_i.xaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.xaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if trim:\n # clip off the parts of the spines that extend past major ticks\n xticks = np.asarray(ax_i.get_xticks())\n if xticks.size:\n firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n xticks)[0]\n lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n xticks)[-1]\n ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n ax_i.spines['top'].set_bounds(firsttick, lasttick)\n newticks = xticks.compress(xticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_xticks(newticks)\n\n yticks = np.asarray(ax_i.get_yticks())\n if yticks.size:\n firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n yticks)[0]\n lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n yticks)[-1]\n ax_i.spines['left'].set_bounds(firsttick, lasttick)\n ax_i.spines['right'].set_bounds(firsttick, lasttick)\n newticks = yticks.compress(yticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_yticks(newticks)\n\n\ndef _kde_support(data, bw, gridsize, cut, clip):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n return np.linspace(support_min, support_max, gridsize)\n\n\ndef percentiles(a, pcts, axis=None):\n \"\"\"Like scoreatpercentile but can take and return array of percentiles.\n\n DEPRECATED: will be removed in a future version.\n\n Parameters\n ----------\n a : array\n data\n pcts : sequence of percentile values\n percentile or percentiles to find score at\n axis : int or None\n if not None, computes scores over this axis\n\n Returns\n -------\n scores: array\n array of scores at requested percentiles\n first dimension is length of object passed to ``pcts``\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg)\n\n scores = []\n try:\n n = len(pcts)\n except TypeError:\n pcts = [pcts]\n n = 0\n for i, p in enumerate(pcts):\n if axis is None:\n score = stats.scoreatpercentile(a.ravel(), p)\n else:\n score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)\n scores.append(score)\n scores = np.asarray(scores)\n if not n:\n scores = scores.squeeze()\n return scores\n\n\ndef ci(a, which=95, axis=None):\n \"\"\"Return a percentile range from an array of values.\"\"\"\n p = 50 - which / 2, 50 + which / 2\n return np.percentile(a, p, axis)\n\n\ndef sig_stars(p):\n \"\"\"Return a R-style significance string corresponding to p values.\n\n DEPRECATED: will be removed in a future version.\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg)\n\n if p < 0.001:\n return \"***\"\n elif p < 0.01:\n return \"**\"\n elif p < 0.05:\n return \"*\"\n elif p < 0.1:\n return \".\"\n return \"\"\n\n\ndef iqr(a):\n \"\"\"Calculate the IQR for an array of numbers.\"\"\"\n a = np.asarray(a)\n q1 = stats.scoreatpercentile(a, 25)\n q3 = stats.scoreatpercentile(a, 75)\n return q3 - q1\n\n\ndef get_dataset_names():\n \"\"\"Report available example datasets, useful for reporting issues.\"\"\"\n # delayed import to not demand bs4 unless this function is actually used\n from bs4 import BeautifulSoup\n http = urlopen('https://github.com/mwaskom/seaborn-data/')\n gh_list = BeautifulSoup(http)\n\n return [l.text.replace('.csv', '')\n for l in gh_list.find_all(\"a\", {\"class\": \"js-navigation-open\"})\n if l.text.endswith('.csv')]\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return a path to the cache directory for example datasets.\n\n This directory is then used by :func:`load_dataset`.\n\n If the ``data_home`` argument is not specified, it tries to read from the\n ``SEABORN_DATA`` environment variable and defaults to ``~/seaborn-data``.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get('SEABORN_DATA',\n os.path.join('~', 'seaborn-data'))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\ndef load_dataset(name, cache=True, data_home=None, **kws):\n \"\"\"Load an example dataset from the online repository (requires internet).\n\n This function provides quick access to a small number of example datasets\n that are useful for documenting seaborn or generating reproducible examples\n for bug reports. It is not necessary for normal usage.\n\n Note that some of the datasets have a small amount of preprocessing applied\n to define a proper ordering for categorical variables.\n\n Use :func:`get_dataset_names` to see a list of available datasets.\n\n Parameters\n ----------\n name : str\n Name of the dataset (``{name}.csv`` on\n https://github.com/mwaskom/seaborn-data).\n cache : boolean, optional\n If True, try to load from the local cache first, and save to the cache\n if a download is required.\n data_home : string, optional\n The directory in which to cache data; see :func:`get_data_home`.\n kws : keys and values, optional\n Additional keyword arguments are passed to passed through to\n :func:`pandas.read_csv`.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data, possibly with some preprocessing applied.\n\n \"\"\"\n path = (\"https://raw.githubusercontent.com/\"\n \"mwaskom/seaborn-data/master/{}.csv\")\n full_path = path.format(name)\n\n if cache:\n cache_path = os.path.join(get_data_home(data_home),\n os.path.basename(full_path))\n if not os.path.exists(cache_path):\n urlretrieve(full_path, cache_path)\n full_path = cache_path\n\n df = pd.read_csv(full_path, **kws)\n if df.iloc[-1].isnull().all():\n df = df.iloc[:-1]\n\n # Set some columns as a categorical type with ordered levels\n\n if name == \"tips\":\n df[\"day\"] = pd.Categorical(df[\"day\"], [\"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n df[\"sex\"] = pd.Categorical(df[\"sex\"], [\"Male\", \"Female\"])\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"Lunch\", \"Dinner\"])\n df[\"smoker\"] = pd.Categorical(df[\"smoker\"], [\"Yes\", \"No\"])\n\n if name == \"flights\":\n df[\"month\"] = pd.Categorical(df[\"month\"], df.month.unique())\n\n if name == \"exercise\":\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"1 min\", \"15 min\", \"30 min\"])\n df[\"kind\"] = pd.Categorical(df[\"kind\"], [\"rest\", \"walking\", \"running\"])\n df[\"diet\"] = pd.Categorical(df[\"diet\"], [\"no fat\", \"low fat\"])\n\n if name == \"titanic\":\n df[\"class\"] = pd.Categorical(df[\"class\"], [\"First\", \"Second\", \"Third\"])\n df[\"deck\"] = pd.Categorical(df[\"deck\"], list(\"ABCDEFG\"))\n\n return df\n\n\ndef axis_ticklabels_overlap(labels):\n \"\"\"Return a boolean for whether the list of ticklabels have overlaps.\n\n Parameters\n ----------\n labels : list of matplotlib ticklabels\n\n Returns\n -------\n overlap : boolean\n True if any of the labels overlap.\n\n \"\"\"\n if not labels:\n return False\n try:\n bboxes = [l.get_window_extent() for l in labels]\n overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n return max(overlaps) > 1\n except RuntimeError:\n # Issue on macos backend raises an error in the above code\n return False\n\n\ndef axes_ticklabels_overlap(ax):\n \"\"\"Return booleans for whether the x and y ticklabels on an Axes overlap.\n\n Parameters\n ----------\n ax : matplotlib Axes\n\n Returns\n -------\n x_overlap, y_overlap : booleans\n True when the labels on that axis overlap.\n\n \"\"\"\n return (axis_ticklabels_overlap(ax.get_xticklabels()),\n axis_ticklabels_overlap(ax.get_yticklabels()))\n\n\ndef categorical_order(values, order=None):\n \"\"\"Return a list of unique data values.\n\n Determine an ordered list of levels in ``values``.\n\n Parameters\n ----------\n values : list, array, Categorical, or Series\n Vector of \"categorical\" values\n order : list-like, optional\n Desired order of category levels to override the order determined\n from the ``values`` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is None:\n if hasattr(values, \"categories\"):\n order = values.categories\n else:\n try:\n order = values.cat.categories\n except (TypeError, AttributeError):\n try:\n order = values.unique()\n except AttributeError:\n order = pd.unique(values)\n try:\n np.asarray(values).astype(np.float)\n order = np.sort(order)\n except (ValueError, TypeError):\n order = order\n order = filter(pd.notnull, order)\n return list(order)\n\n\ndef locator_to_legend_entries(locator, limits, dtype):\n \"\"\"Return levels and formatted levels for brief numeric legends.\"\"\"\n raw_levels = locator.tick_values(*limits).astype(dtype)\n\n class dummy_axis:\n def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\ndef get_color_cycle():\n \"\"\"Return the list of colors in the current matplotlib color cycle\n\n Parameters\n ----------\n None\n\n Returns\n -------\n colors : list\n List of matplotlib colors in the current cycle, or dark gray if\n the current color cycle is empty.\n \"\"\"\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n\n\ndef relative_luminance(color):\n \"\"\"Calculate the relative luminance of a color according to W3C standards\n\n Parameters\n ----------\n color : matplotlib color or sequence of matplotlib colors\n Hex code, rgb-tuple, or html color name.\n\n Returns\n -------\n luminance : float(s) between 0 and 1\n\n \"\"\"\n rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)\n lum = rgb.dot([.2126, .7152, .0722])\n try:\n return lum.item()\n except ValueError:\n return lum\n\n\ndef to_utf8(obj):\n \"\"\"Return a string representing a Python object.\n\n Strings (i.e. type ``str``) are returned unchanged.\n\n Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.\n\n For other objects, the method ``__str__()`` is called, and the result is\n returned as a string.\n\n Parameters\n ----------\n obj : object\n Any Python object\n\n Returns\n -------\n s : str\n UTF-8-decoded string representation of ``obj``\n\n \"\"\"\n if isinstance(obj, str):\n return obj\n try:\n return obj.decode(encoding=\"utf-8\")\n except AttributeError: # obj is not bytes-like\n return str(obj)\n\n\ndef _network(t=None, url='https://google.com'):\n \"\"\"\n Decorator that will skip a test if `url` is unreachable.\n\n Parameters\n ----------\n t : function, optional\n url : str, optional\n\n \"\"\"\n import nose\n\n if t is None:\n return lambda x: _network(x, url=url)\n\n def wrapper(*args, **kwargs):\n # attempt to connect\n try:\n f = urlopen(url)\n except (IOError, HTTPException):\n raise nose.SkipTest()\n else:\n f.close()\n return t(*args, **kwargs)\n return wrapper\n"
] | [
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.sum",
"pandas.DataFrame",
"numpy.mean",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"sklearn.decomposition.PCA"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"sklearn.utils._testing.ignore_warnings",
"sklearn.decomposition.sparse_encode",
"numpy.all",
"numpy.exp",
"sklearn.decomposition.dict_learning",
"numpy.flatnonzero",
"sklearn.utils._testing.assert_array_equal",
"numpy.logical_not",
"sklearn.utils._testing.TempMemmap",
"sklearn.decomposition.MiniBatchDictionaryLearning",
"sklearn.decomposition.SparseCoder",
"numpy.random.rand",
"sklearn.decomposition.dict_learning_online",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"sklearn.decomposition.DictionaryLearning",
"sklearn.utils.check_array",
"sklearn.utils._testing.assert_array_almost_equal",
"numpy.empty"
],
[
"numpy.arange",
"numpy.sqrt",
"numpy.zeros",
"numpy.abs"
],
[
"numpy.linspace",
"numpy.asarray",
"numpy.histogram",
"numpy.where",
"matplotlib.pyplot.gca",
"pandas.read_csv",
"matplotlib.pyplot.gcf",
"numpy.atleast_1d",
"numpy.apply_along_axis",
"matplotlib.ticker.LogFormatter",
"matplotlib.ticker.ScalarFormatter",
"pandas.notnull",
"pandas.Categorical",
"matplotlib.colors.colorConverter.to_rgb",
"numpy.atleast_2d",
"scipy.stats.scoreatpercentile",
"pandas.unique",
"numpy.transpose",
"matplotlib.colors.colorConverter.to_rgba_array",
"numpy.percentile",
"numpy.sort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
mnabavi84/dcamp-intro-python | [
"218b67106061d45cfa18a1b1d46487900f9aa539"
] | [
"11-Numpy Basic Statistics.py"
] | [
"# np_baseball is available\r\n\r\n# Import numpy\r\nimport numpy as np\r\n\r\n# Create np_height_in from np_baseball\r\nnp_height_in = np_baseball[:,0]\r\n\r\n# Print out the mean of np_height_in\r\nprint(np.mean(np_height_in))\r\n\r\n# Print out the median of np_height_in\r\nprint(np.median(np_height_in))\r\n\r\n\r\n# np_baseball is available\r\n\r\n# Import numpy\r\nimport numpy as np\r\n\r\n# Print mean height (first column)\r\navg = np.mean(np_baseball[:,0])\r\nprint(\"Average: \" + str(avg))\r\n\r\n# Print median height. Replace 'None'\r\nmed = np.median(np_baseball[:,0])\r\nprint(\"Median: \" + str(med))\r\n\r\n# Print out the standard deviation on height. Replace 'None'\r\nstddev = np.std(np_baseball[:,0])\r\nprint(\"Standard Deviation: \" + str(stddev))\r\n\r\n# Print out correlation between first and second column. Replace 'None'\r\ncorr = np.corrcoef(np_baseball[:,0], np_baseball[:,1])\r\nprint(\"Correlation: \" + str(corr))\r\n\r\n\r\n# heights and positions are available as lists\r\n\r\n# Import numpy\r\nimport numpy as np\r\n\r\n# Convert positions and heights to numpy arrays: np_positions, np_heights\r\nnp_positions = np.array(positions)\r\nnp_heights = np.array(heights)\r\n\r\n# Heights of the goalkeepers: gk_heights\r\ngk_heights = np_heights[np_positions == 'GK']\r\n\r\n# Heights of the other players: other_heights\r\nother_heights = np_heights[np_positions != 'GK']\r\n\r\n# Print out the median height of goalkeepers. Replace 'None'\r\nprint(\"Median height of goalkeepers: \" + str(np.median(gk_heights)))\r\n\r\n# Print out the median height of other players. Replace 'None'\r\nprint(\"Median height of other players: \" + str(np.median(other_heights)))\r\n"
] | [
[
"numpy.median",
"numpy.std",
"numpy.mean",
"numpy.corrcoef",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kili-technology/active-learning | [
"72dce7d91b988264dd7fa1a972d9af45e9648c4c",
"72dce7d91b988264dd7fa1a972d9af45e9648c4c",
"72dce7d91b988264dd7fa1a972d9af45e9648c4c",
"72dce7d91b988264dd7fa1a972d9af45e9648c4c",
"72dce7d91b988264dd7fa1a972d9af45e9648c4c"
] | [
"experiments/mnist_simple/class_imbalance.py",
"al/algorithms/uncertainty.py",
"al/dataset/pascal_voc.py",
"al/model/unet.py",
"al/model/model_zoo/nasnet.py"
] | [
"import os\nimport logging\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport al\nfrom al.dataset import mnist\nfrom al.model.model_zoo.simple_cnn import ConvModel\nfrom al.model.mnist import MnistLearner\nfrom al.dataset.mnist import MnistDataset\nfrom al.train.active_train import ActiveTrain\nfrom al.helpers.experiment import set_up_experiment, load_config\nfrom al.experiments import set_up_learner\n\n\nDATASET = 'mnist'\n\nFOLDER_PATH = os.path.dirname(__file__)\nOUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(\n __file__, FOLDER_PATH, logging_lvl=20)\n\nlogger.info('-------------------------')\nlogger.info('--LAUNCHING EXPERIMENTS--')\nlogger.info('-------------------------')\n\nconfig = load_config(FOLDER_PATH, DATASET)\nsetupper = set_up_learner(DATASET)\n\nconfig['active_learning']['output_dir'] = OUTPUT_DIR\nconfig['experiment']['logger_name'] = logger_name\nmodel_name = 'simple_cnn'\n\nstrategies = ['random_sampling', 'margin_sampling']\nrepeats = 1\nscore_data = {}\nconfig['active_learning']['assets_per_query'] = 20\nconfig['active_learning']['n_iter'] = 5\nconfig['active_learning']['init_size'] = 100\n\nconfig['train_parameters']['batch_size'] = 16\nconfig['train_parameters']['iterations'] = 100\n\nconfig['experiment']['n_classes'] = 2\n\nraw_dataset, _ = setupper(config, OUTPUT_DIR, logger,\n index_train=np.arange(60000))\nfull_train_dataset = raw_dataset.dataset\n\nfirst_class = 1\nsecond_class = 2\nfirst_classes = []\nsecond_classes = []\np = 0.1\n\nfor i in range(len(full_train_dataset)):\n if full_train_dataset[i][1].numpy() == first_class:\n first_classes.append(i)\n elif full_train_dataset[i][1].numpy() == second_class and np.random.rand() < p:\n second_classes.append(i)\n\ntrain_indices = np.array(first_classes + second_classes)\nnp.random.permutation(train_indices)\n\nfor i in range(repeats):\n logger.info('---------------------------')\n logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')\n logger.info('---------------------------')\n for strategy in strategies:\n dataset, learner = setupper(\n config, OUTPUT_DIR, logger, index_train=train_indices)\n logger.info('---------------------------')\n logger.info(f'----STRATEGY : {strategy}----')\n logger.info('---------------------------')\n trainer = ActiveTrain(learner, dataset, strategy, logger_name)\n scores = trainer.train(\n config['train_parameters'], **config['active_learning'])\n score_data[(strategy, i)] = scores\n logger.info(f'----DONE----\\n')\n logger.info('---------------------------')\n logger.info(f'--------DONE--------')\n logger.info('---------------------------\\n\\n\\n')\n\n\n# data = []\n# for (strategy, experiment_number), scores_experiment in score_data.items():\n# for step_result in scores_experiment:\n# val_step_result = step_result['val']\n# step = step_result['step']\n# data.append(\n# {'strategy': strategy,\n# 'experiment': experiment_number,\n# 'step': step,\n# **val_step_result})\n# df = pd.DataFrame(data)\n\n# plot_dir = os.path.join(os.path.dirname(__file__), 'figures')\n\n# plt.figure(num=0, figsize=(12, 5))\n# sns.lineplot(x='step', y='accuracy', hue='strategy', data=df)\n# plt.ylabel('Accuracy')\n# plt.show()\n# plt.savefig(os.path.join(plot_dir, 'accuracy_imbalance.png'))\n",
"import numpy as np\n\nfrom .baseline import Strategy\nfrom ..helpers.time import timeit\n\n\nclass BaseUncertaintyStrategy(Strategy):\n\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n raise NotImplementedError\n\n def return_top_indices(self, dataset, learner, top, log_time={}):\n scores = self.score_dataset(dataset, learner, log_time=log_time)\n sorted_idx = np.argsort(scores)\n return sorted_idx[-top:]\n\n\nclass UncertaintyStrategy(BaseUncertaintyStrategy):\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n inference_result = learner.inference(dataset)\n probabilities = inference_result['class_probabilities']\n assert len(probabilities) == len(dataset)\n top_prediction = np.max(probabilities, axis=1)\n return 1 - top_prediction\n\n\nclass MarginStrategy(BaseUncertaintyStrategy):\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n inference_result = learner.inference(dataset)\n probabilities = inference_result['class_probabilities']\n assert len(probabilities) == len(dataset)\n sorted_preds = np.argsort(probabilities, axis=1)\n top_preds = probabilities[np.arange(\n len(probabilities)), sorted_preds[:, -1]]\n second_preds = probabilities[np.arange(\n len(probabilities)), sorted_preds[:, -2]]\n difference = top_preds - second_preds\n return - difference\n\n\nclass EntropyStrategy(BaseUncertaintyStrategy):\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n inference_result = learner.inference(dataset)\n probabilities = inference_result['class_probabilities']\n assert len(probabilities) == len(dataset)\n entropies = -np.sum(probabilities * np.log(probabilities), axis=1)\n return entropies\n\n\nclass SemanticEntropyStrategy(BaseUncertaintyStrategy):\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n inference_result = learner.inference(dataset)\n probabilities = inference_result['class_probabilities']\n bs, _, _, _ = probabilities.shape\n probabilities = np.reshape(probabilities, (bs, -1))\n entropies = -np.sum(probabilities * np.log(probabilities), axis=1)\n return entropies\n\n\nclass SemanticMinEntropyStrategy(BaseUncertaintyStrategy):\n @timeit\n def score_dataset(self, dataset, learner, log_time={}):\n inference_result = learner.inference(dataset)\n probabilities = inference_result['class_probabilities']\n bs, _, _, _ = probabilities.shape\n entropies = -np.sum(probabilities * np.log(probabilities), axis=3)\n return np.mean(entropies, axis=(1, 2))\n",
"import os\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\n\nimport numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset, Dataset\nfrom ptsemseg.loader import get_loader\nfrom ptsemseg.augmentations import get_composed_augmentations\n\nfrom .active_dataset import ActiveDataset, MaskDataset\nfrom ..helpers.constants import DATA_ROOT\nfrom ..model.model_zoo.ssd import Container, get_transforms, get_transforms_semantic\n\n\nclass PascalVOCObjectDataset(ActiveDataset):\n class_names = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n def __init__(self, indices, n_init=100, output_dir=None, train=True, year='2012', cfg=None, queries_name='queries.txt'):\n self.data_dir = os.path.join(DATA_ROOT, f'VOCdevkit/VOC{year}')\n self.cfg = cfg\n self.init_dataset = self._get_initial_dataset(train, year)\n super().__init__(self.get_dataset(indices), n_init=n_init,\n output_dir=output_dir, queries_name=queries_name)\n\n def _get_initial_dataset(self, train=True, year='2012'):\n transform, target_transform = get_transforms(self.cfg, train)\n if train:\n image_set = 'train'\n else:\n image_set = 'val'\n if not os.path.exists(self.data_dir):\n torchvision.datasets.VOCDetection(\n root=DATA_ROOT, year=year, image_set=image_set, download=True)\n split = 'train' if train else 'val'\n if train:\n return VOCDataset(self.data_dir, split, transform=transform, target_transform=target_transform, keep_difficult=not train)\n else:\n return VOCDataset(self.data_dir, split, transform=transform, target_transform=target_transform, keep_difficult=not train)\n\n def get_dataset(self, indices):\n return MaskDataset(self.init_dataset, indices)\n\n\nclass PascalVOCSemanticDataset(ActiveDataset):\n class_names = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n def __init__(self, indices, n_init=100, output_dir=None, train=True, queries_name='queries.txt'):\n self.data_path = os.path.join(DATA_ROOT, f'VOCdevkit/VOC2012')\n self.sbd_path = os.path.join(DATA_ROOT, f'benchmark_RELEASE')\n self.data_aug = get_composed_augmentations(None)\n self.data_loader = get_loader('pascal')\n self.init_dataset = self._get_initial_dataset(train)\n super().__init__(self.get_dataset(indices), n_init=n_init,\n output_dir=output_dir, queries_name=queries_name)\n\n def _get_initial_dataset(self, train=True):\n if train:\n split = 'train'\n augmentations = self.data_aug\n else:\n split = 'val'\n augmentations = None\n return self.data_loader(\n self.data_path, is_transform=True, split=split, img_size=256,\n augmentations=augmentations, sbd_path=self.sbd_path)\n\n def get_dataset(self, indices):\n return MaskDataset(self.init_dataset, indices)\n\n\nclass VOCDataset(torch.utils.data.Dataset):\n class_names = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n def __init__(self, data_dir, split, transform=None, target_transform=None, keep_difficult=False):\n \"\"\"Dataset for VOC data.\n Args:\n data_dir: the root of the VOC2007 or VOC2012 dataset, the directory contains the following sub-directories:\n Annotations, ImageSets, JPEGImages, SegmentationClass, SegmentationObject.\n \"\"\"\n self.data_dir = data_dir\n self.split = split\n self.transform = transform\n self.target_transform = target_transform\n image_sets_file = os.path.join(\n self.data_dir, \"ImageSets\", \"Main\", \"%s.txt\" % self.split)\n self.ids = VOCDataset._read_image_ids(image_sets_file)\n self.keep_difficult = keep_difficult\n\n self.class_dict = {class_name: i for i,\n class_name in enumerate(self.class_names)}\n\n def __getitem__(self, index):\n image_id = self.ids[index]\n boxes, labels, is_difficult = self._get_annotation(image_id)\n if not self.keep_difficult:\n boxes = boxes[is_difficult == 0]\n labels = labels[is_difficult == 0]\n image = self._read_image(image_id)\n if self.transform:\n image, boxes, labels = self.transform(image, boxes, labels)\n if self.target_transform:\n boxes, labels = self.target_transform(boxes, labels)\n targets = Container(\n boxes=boxes,\n labels=labels,\n )\n return image, targets, index\n\n def get_annotation(self, index):\n image_id = self.ids[index]\n return image_id, self._get_annotation(image_id)\n\n def __len__(self):\n return len(self.ids)\n\n @staticmethod\n def _read_image_ids(image_sets_file):\n ids = []\n with open(image_sets_file) as f:\n for _, line in enumerate(f):\n ids.append(line.rstrip())\n return ids\n\n def _get_annotation(self, image_id):\n annotation_file = os.path.join(\n self.data_dir, \"Annotations\", \"%s.xml\" % image_id)\n objects = ET.parse(annotation_file).findall(\"object\")\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n bbox = obj.find('bndbox')\n # VOC dataset format follows Matlab, in which indexes start from 0\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n boxes.append([x1, y1, x2, y2])\n labels.append(self.class_dict[class_name])\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str)\n if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))\n\n def get_img_info(self, index):\n img_id = self.ids[index]\n annotation_file = os.path.join(\n self.data_dir, \"Annotations\", \"%s.xml\" % img_id)\n anno = ET.parse(annotation_file).getroot()\n size = anno.find(\"size\")\n im_info = tuple(\n map(int, (size.find(\"height\").text, size.find(\"width\").text)))\n return {\"height\": im_info[0], \"width\": im_info[1]}\n\n def _read_image(self, image_id):\n image_file = os.path.join(\n self.data_dir, \"JPEGImages\", \"%s.jpg\" % image_id)\n image = Image.open(image_file).convert(\"RGB\")\n image = np.array(image)\n return image\n\n\nclass SmallVOCDataset(VOCDataset):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __len__(self):\n return 30\n",
"import logging\n\nimport numpy as np\nimport tqdm\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import BatchSampler, SequentialSampler\nfrom ptsemseg.models import get_model\nfrom ptsemseg.loss import get_loss_function\nfrom ptsemseg.optimizers import get_optimizer\nfrom ptsemseg.metrics import runningScore\n\nfrom .active_model import ActiveLearner\nfrom .model_zoo.ssd import *\nfrom ..helpers.time import timeit\nfrom ..helpers.samplers import IterationBasedBatchSampler\n\n\nclass SemanticLearner(ActiveLearner):\n\n def __init__(self, model, cfg, logger_name=None, device=0, config=None):\n super().__init__(device=device)\n self.cfg = cfg\n self.model = model\n if self.cuda_available:\n self.model.cuda()\n\n self.criterion = get_loss_function(config)\n optimizer_cls = get_optimizer(config)\n optimizer_params = {\n k: v for k, v in config[\"training\"][\"optimizer\"].items() if k != \"name\"}\n self.optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n\n self.logger = logging.getLogger(logger_name)\n self.inference_img_size = 16\n self.reducer = nn.AdaptiveAvgPool2d(self.inference_img_size)\n\n def get_predictions(self, dataset):\n self.model.eval()\n batch_sampler = BatchSampler(\n sampler=self.get_base_sampler(len(dataset), shuffle=False), batch_size=16, drop_last=False)\n loader = torch.utils.data.DataLoader(\n dataset, batch_sampler=batch_sampler)\n predictions = []\n with torch.no_grad():\n for (images, _) in tqdm.tqdm(loader, disable=self.logger.level > 15):\n if self.cuda_available:\n images = images.cuda()\n masks = self.model(images)\n if self.cuda_available:\n masks = masks.detach().cpu()\n reduced_masks = self.reducer(masks)\n predictions.append(reduced_masks.data)\n return torch.cat(predictions).numpy()\n\n def inference(self, dataset):\n self.model.eval()\n predictions = self.get_predictions(dataset)\n probabilities = nn.Softmax2d()(torch.from_numpy(predictions)).numpy()\n return {'predictions': predictions, 'class_probabilities': probabilities}\n\n @staticmethod\n def get_base_sampler(size, shuffle):\n if shuffle:\n order = np.random.permutation(np.arange(size))\n return SequentialSampler(order)\n else:\n return SequentialSampler(range(size))\n\n @timeit\n def fit(self, dataset, batch_size, learning_rate, momentum, weight_decay, iterations, shuffle=True, *args, **kwargs):\n self.model.train()\n batch_sampler = BatchSampler(\n sampler=self.get_base_sampler(len(dataset), shuffle), batch_size=batch_size, drop_last=False)\n batch_sampler = IterationBasedBatchSampler(\n batch_sampler, num_iterations=iterations, start_iter=0)\n loader = torch.utils.data.DataLoader(\n dataset, batch_sampler=batch_sampler, num_workers=self.cfg.DATA_LOADER.NUM_WORKERS)\n for step, (images, label_image) in tqdm.tqdm(\n enumerate(loader), disable=self.logger.level > 15, total=len(loader)):\n if self.cuda_available:\n images = images.cuda()\n label_image = label_image.cuda()\n self.model.zero_grad()\n mask_preds = self.model(images)\n self.optimizer.zero_grad()\n loss = self.criterion(input=mask_preds, target=label_image)\n loss.backward()\n self.optimizer.step()\n\n def score(self, dataset, batch_size, *args, **kwargs):\n self.model.eval()\n running_metrics_val = runningScore(\n dataset.dataset.n_classes)\n batch_sampler = BatchSampler(\n sampler=self.get_base_sampler(len(dataset), shuffle=False), batch_size=batch_size, drop_last=False)\n loader = torch.utils.data.DataLoader(\n dataset, batch_sampler=batch_sampler)\n with torch.no_grad():\n for (images, labels_val) in tqdm.tqdm(loader, disable=self.logger.level > 15):\n if self.cuda_available:\n images = images.cuda()\n labels_val = labels_val.cuda()\n outputs = self.model(images)\n pred = outputs.data.max(1)[1].cpu().numpy()\n gt = labels_val.data.cpu().numpy()\n running_metrics_val.update(gt, pred)\n score, class_iou = running_metrics_val.get_scores()\n return {**score, **class_iou}",
"\"\"\"nasnet in pytorch\n[1] Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le\n Learning Transferable Architectures for Scalable Image Recognition\n https://arxiv.org/abs/1707.07012\nCode from https://github.com/weiaicunzai/pytorch-cifar100\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\n\nclass SeperableConv2d(nn.Module):\n\n def __init__(self, input_channels, output_channels, kernel_size, **kwargs):\n\n super().__init__()\n self.depthwise = nn.Conv2d(\n input_channels,\n input_channels,\n kernel_size,\n groups=input_channels,\n **kwargs\n )\n\n self.pointwise = nn.Conv2d(\n input_channels,\n output_channels,\n 1\n )\n\n def forward(self, x):\n x = self.depthwise(x)\n x = self.pointwise(x)\n\n return x\n\n\nclass SeperableBranch(nn.Module):\n\n def __init__(self, input_channels, output_channels, kernel_size, **kwargs):\n \"\"\"Adds 2 blocks of [relu-separable conv-batchnorm].\"\"\"\n super().__init__()\n self.block1 = nn.Sequential(\n nn.ReLU(),\n SeperableConv2d(input_channels, output_channels,\n kernel_size, **kwargs),\n nn.BatchNorm2d(output_channels)\n )\n\n self.block2 = nn.Sequential(\n nn.ReLU(),\n SeperableConv2d(output_channels, output_channels,\n kernel_size, stride=1, padding=int(kernel_size / 2)),\n nn.BatchNorm2d(output_channels)\n )\n\n def forward(self, x):\n x = self.block1(x)\n x = self.block2(x)\n\n return x\n\n\nclass Fit(nn.Module):\n \"\"\"Make the cell outputs compatible\n Args:\n prev_filters: filter number of tensor prev, needs to be modified\n filters: filter number of normal cell branch output filters\n \"\"\"\n\n def __init__(self, prev_filters, filters):\n super().__init__()\n self.relu = nn.ReLU()\n\n self.p1 = nn.Sequential(\n nn.AvgPool2d(1, stride=2),\n nn.Conv2d(prev_filters, int(filters / 2), 1)\n )\n\n # make sure there is no information loss\n self.p2 = nn.Sequential(\n nn.ConstantPad2d((0, 1, 0, 1), 0),\n nn.ConstantPad2d((-1, 0, -1, 0), 0), # cropping\n nn.AvgPool2d(1, stride=2),\n nn.Conv2d(prev_filters, int(filters / 2), 1)\n )\n\n self.bn = nn.BatchNorm2d(filters)\n\n self.dim_reduce = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(prev_filters, filters, 1),\n nn.BatchNorm2d(filters)\n )\n\n self.filters = filters\n\n def forward(self, inputs):\n x, prev = inputs\n if prev is None:\n return x\n\n # image size does not match\n elif x.size(2) != prev.size(2):\n prev = self.relu(prev)\n p1 = self.p1(prev)\n p2 = self.p2(prev)\n prev = torch.cat([p1, p2], 1)\n prev = self.bn(prev)\n\n elif prev.size(1) != self.filters:\n prev = self.dim_reduce(prev)\n\n return prev\n\n\nclass NormalCell(nn.Module):\n\n def __init__(self, x_in, prev_in, output_channels):\n super().__init__()\n\n self.dem_reduce = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(x_in, output_channels, 1, bias=False),\n nn.BatchNorm2d(output_channels)\n )\n\n self.block1_left = SeperableBranch(\n output_channels,\n output_channels,\n kernel_size=3,\n padding=1,\n bias=False\n )\n self.block1_right = nn.Sequential()\n\n self.block2_left = SeperableBranch(\n output_channels,\n output_channels,\n kernel_size=3,\n padding=1,\n bias=False\n )\n self.block2_right = SeperableBranch(\n output_channels,\n output_channels,\n kernel_size=5,\n padding=2,\n bias=False\n )\n\n self.block3_left = nn.AvgPool2d(3, stride=1, padding=1)\n self.block3_right = nn.Sequential()\n\n self.block4_left = nn.AvgPool2d(3, stride=1, padding=1)\n self.block4_right = nn.AvgPool2d(3, stride=1, padding=1)\n\n self.block5_left = SeperableBranch(\n output_channels,\n output_channels,\n kernel_size=5,\n padding=2,\n bias=False\n )\n self.block5_right = SeperableBranch(\n output_channels,\n output_channels,\n kernel_size=3,\n padding=1,\n bias=False\n )\n\n self.fit = Fit(prev_in, output_channels)\n\n def forward(self, x):\n x, prev = x\n\n # return transformed x as new x, and original x as prev\n # only prev tensor needs to be modified\n prev = self.fit((x, prev))\n\n h = self.dem_reduce(x)\n\n x1 = self.block1_left(h) + self.block1_right(h)\n x2 = self.block2_left(prev) + self.block2_right(h)\n x3 = self.block3_left(h) + self.block3_right(h)\n x4 = self.block4_left(prev) + self.block4_right(prev)\n x5 = self.block5_left(prev) + self.block5_right(prev)\n\n return torch.cat([prev, x1, x2, x3, x4, x5], 1), x\n\n\nclass ReductionCell(nn.Module):\n\n def __init__(self, x_in, prev_in, output_channels):\n super().__init__()\n\n self.dim_reduce = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(x_in, output_channels, 1),\n nn.BatchNorm2d(output_channels)\n )\n\n # block1\n self.layer1block1_left = SeperableBranch(\n output_channels, output_channels, 7, stride=2, padding=3)\n self.layer1block1_right = SeperableBranch(\n output_channels, output_channels, 5, stride=2, padding=2)\n\n # block2\n self.layer1block2_left = nn.MaxPool2d(3, stride=2, padding=1)\n self.layer1block2_right = SeperableBranch(\n output_channels, output_channels, 7, stride=2, padding=3)\n\n # block3\n self.layer1block3_left = nn.AvgPool2d(3, 2, 1)\n self.layer1block3_right = SeperableBranch(\n output_channels, output_channels, 5, stride=2, padding=2)\n\n # block5\n self.layer2block1_left = nn.MaxPool2d(3, 2, 1)\n self.layer2block1_right = SeperableBranch(\n output_channels, output_channels, 3, stride=1, padding=1)\n\n # block4\n self.layer2block2_left = nn.AvgPool2d(3, 1, 1)\n self.layer2block2_right = nn.Sequential()\n\n self.fit = Fit(prev_in, output_channels)\n\n def forward(self, x):\n x, prev = x\n prev = self.fit((x, prev))\n\n h = self.dim_reduce(x)\n\n layer1block1 = self.layer1block1_left(\n prev) + self.layer1block1_right(h)\n layer1block2 = self.layer1block2_left(\n h) + self.layer1block2_right(prev)\n layer1block3 = self.layer1block3_left(\n h) + self.layer1block3_right(prev)\n layer2block1 = self.layer2block1_left(\n h) + self.layer2block1_right(layer1block1)\n layer2block2 = self.layer2block2_left(\n layer1block1) + self.layer2block2_right(layer1block2)\n\n return torch.cat([\n layer1block2, # https://github.com/keras-team/keras-applications/blob/master/keras_applications/nasnet.py line 739\n layer1block3,\n layer2block1,\n layer2block2\n ], 1), x\n\n\nclass NasNetA(nn.Module):\n\n def __init__(self, repeat_cell_num, reduction_num, filters, stemfilter, class_num=100):\n super().__init__()\n\n self.stem = nn.Sequential(\n nn.Conv2d(3, stemfilter, 3, padding=1, bias=False),\n nn.BatchNorm2d(stemfilter)\n )\n\n self.prev_filters = stemfilter\n self.x_filters = stemfilter\n self.filters = filters\n\n self.cell_layers = self._make_layers(repeat_cell_num, reduction_num)\n\n self.relu = nn.ReLU()\n self.avg = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(self.filters * 6, class_num)\n\n def _make_normal(self, block, repeat, output):\n \"\"\"make normal cell\n Args:\n block: cell type\n repeat: number of repeated normal cell\n output: output filters for each branch in normal cell\n Returns:\n stacked normal cells\n \"\"\"\n\n layers = []\n for r in range(repeat):\n layers.append(block(self.x_filters, self.prev_filters, output))\n self.prev_filters = self.x_filters\n self.x_filters = output * 6 # concatenate 6 branches\n\n return layers\n\n def _make_reduction(self, block, output):\n \"\"\"make normal cell\n Args:\n block: cell type\n output: output filters for each branch in reduction cell\n Returns:\n reduction cell\n \"\"\"\n\n reduction = block(self.x_filters, self.prev_filters, output)\n self.prev_filters = self.x_filters\n self.x_filters = output * 4 # stack for 4 branches\n\n return reduction\n\n def _make_layers(self, repeat_cell_num, reduction_num):\n\n layers = []\n for i in range(reduction_num):\n\n layers.extend(self._make_normal(\n NormalCell, repeat_cell_num, self.filters))\n self.filters *= 2\n layers.append(self._make_reduction(ReductionCell, self.filters))\n\n layers.extend(self._make_normal(\n NormalCell, repeat_cell_num, self.filters))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, features=False):\n\n x = self.stem(x)\n prev = None\n x, prev = self.cell_layers((x, prev))\n x = self.relu(x)\n x = self.avg(x)\n feature = x.view(x.size(0), -1)\n x = self.fc(feature)\n if features:\n return x, feature\n return x\n\n\ndef nasnet(config):\n\n # stem filters must be 44, it's a pytorch workaround, cant change to other number\n return NasNetA(4, 2, 44, 44, class_num=config['experiment']['size'])\n"
] | [
[
"numpy.random.permutation",
"numpy.arange",
"numpy.array",
"numpy.random.rand"
],
[
"numpy.log",
"numpy.reshape",
"numpy.max",
"numpy.mean",
"numpy.argsort"
],
[
"numpy.array"
],
[
"torch.cat",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.nn.Softmax2d",
"torch.no_grad",
"torch.nn.AdaptiveAvgPool2d",
"torch.utils.data.sampler.SequentialSampler"
],
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.ConstantPad2d",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuancaimaiyi/gtsfm | [
"cc5781c35af23498d45cd96a1818e4786c5cca80",
"cc5781c35af23498d45cd96a1818e4786c5cca80"
] | [
"gtsfm/common/gtsfm_data.py",
"tests/utils/test_geometry_comparisons.py"
] | [
"\"\"\"Class to hold the tracks and cameras of a 3D scene.\nThis can be the output of either data association or of bundle adjustment.\n\nAuthors: Ayush Baid, John Lambert, Xiaolong Wu\n\"\"\"\nimport itertools\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack\n\nimport gtsfm.utils.graph as graph_utils\nimport gtsfm.utils.logger as logger_utils\nimport gtsfm.utils.reprojection as reproj_utils\n\nlogger = logger_utils.get_logger()\n\nEQUALITY_TOLERANCE = 1e-5\nPRINT_NUM_SIG_FIGS = 2\n\n\nclass GtsfmData:\n \"\"\"Class containing cameras and tracks, essentially describing the complete 3D scene.\n\n This class is needed over GTSAM's SfmData type because GTSAM's type does not allow for non-contiguous cameras.\n The situation of non-contiguous cameras can exists because of failures in front-end.\n \"\"\"\n\n def __init__(self, number_images: int) -> None:\n \"\"\"Initializes the class.\n\n Args:\n number_images: number of images/cameras in the scene.\n \"\"\"\n self._cameras: Dict[int, PinholeCameraCal3Bundler] = {}\n self._tracks: List[SfmTrack] = []\n self._number_images = number_images\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Checks equality with the other object.\"\"\"\n\n if not isinstance(other, GtsfmData):\n return False\n\n if self._number_images != other.number_images():\n return False\n\n for i, cam in self._cameras.items():\n other_cam = other.get_camera(i)\n if not cam.equals(other_cam, EQUALITY_TOLERANCE):\n return False\n\n for j in range(self.number_tracks()):\n track = self.get_track(j)\n other_track = other.get_track(j)\n\n if track.number_measurements() != other_track.number_measurements():\n return False\n\n for k in range(track.number_measurements()):\n i, uv = track.measurement(k)\n other_i, other_uv = other_track.measurement(k)\n\n if i != other_i:\n return False\n if not np.allclose(uv, other_uv):\n return False\n\n return True\n\n def number_images(self) -> int:\n \"\"\"Getter for the number of images.\n\n Returns:\n Number of images.\n \"\"\"\n return self._number_images\n\n def number_tracks(self) -> int:\n \"\"\"Getter for the number of tracks.\n\n Returns:\n Number of tracks.\n \"\"\"\n return len(self._tracks)\n\n def get_valid_camera_indices(self) -> List[int]:\n \"\"\"Getter for image indices where there is a valid (not None) camera.\n\n Returns:\n List of indices with a valid camera.\n \"\"\"\n return list(self._cameras.keys())\n\n def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]:\n \"\"\"Getter for camera.\n\n Args:\n index: the image index to fetch the camera for.\n\n Returns:\n The camera if it is a valid one, None otherwise.\n \"\"\"\n return self._cameras.get(index)\n\n def get_camera_poses(self) -> List[Optional[Pose3]]:\n \"\"\"Getter for camera poses wTi.\n\n This function returns the pose for all cameras (equal to number_images in GtsfmData), even if they were not\n computed by the pipeline.\n\n Returns:\n camera poses as a list, each representing wTi\n \"\"\"\n cameras = [self.get_camera(i) for i in range(self.number_images())]\n poses = [camera.pose() if camera is not None else None for camera in cameras]\n\n return poses\n\n def get_track(self, index: int) -> SfmTrack:\n \"\"\"Getter for the track.\n\n Args:\n index: track index to fetch.\n\n Returns:\n Requested track.\n \"\"\"\n return self._tracks[index]\n\n def add_track(self, track: SfmTrack) -> bool:\n \"\"\"Add a track, after checking if all the cameras in the track are already added.\n\n Args:\n track: track to add.\n\n Returns:\n Flag indicating the success of adding operation.\n \"\"\"\n # check if all cameras are already added\n for j in range(track.number_measurements()):\n i, _ = track.measurement(j)\n\n if i not in self._cameras:\n return False\n\n self._tracks.append(track)\n return True\n\n def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None:\n \"\"\"Adds a camera.\n\n Args:\n index: the index associated with this camera.\n camera: camera object to it.\n\n Raises:\n ValueError: if the camera to be added is not a valid camera object.\n \"\"\"\n if camera is None:\n raise ValueError(\"Camera cannot be None, should be a valid camera\")\n self._cameras[index] = camera\n\n def get_track_length_statistics(self) -> Tuple[float, float]:\n \"\"\"Compute mean and median lengths of all the tracks.\n\n Returns:\n Mean track length.\n Median track length.\n \"\"\"\n if self.number_tracks() == 0:\n return 0, 0\n\n track_lengths = self.get_track_lengths()\n return np.mean(track_lengths), np.median(track_lengths)\n\n def get_track_lengths(self) -> np.ndarray:\n \"\"\"Get an array containing the lengths of all tracks.\n\n Returns:\n Array containing all track lengths.\n \"\"\"\n if self.number_tracks() == 0:\n return np.array([], dtype=np.uint32)\n\n track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())]\n return np.array(track_lengths, dtype=np.uint32)\n\n def select_largest_connected_component(self) -> \"GtsfmData\":\n \"\"\"Selects the subset of data belonging to the largest connected component of the graph where the edges are\n between cameras which feature in the same track.\n\n Returns:\n New GtSfmData object with the subset of tracks and cameras.\n \"\"\"\n camera_edges = []\n for sfm_track in self._tracks:\n cameras_in_use = []\n for m_idx in range(sfm_track.number_measurements()):\n i, _ = sfm_track.measurement(m_idx)\n cameras_in_use.append(i)\n\n # Recreate track connectivity from track information\n # For example: a track has cameras [0, 2, 5]. In that case we will add pairs (0, 2), (0, 5), (2, 5)\n camera_edges += list(itertools.combinations(cameras_in_use, 2))\n\n if len(camera_edges) == 0:\n return GtsfmData(self._number_images)\n\n cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges)\n logger.info(\n \"Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)\".format(\n len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images\n )\n )\n return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc)\n\n @classmethod\n def from_selected_cameras(cls, gtsfm_data: \"GtsfmData\", camera_indices: List[int]) -> \"GtsfmData\":\n \"\"\"Selects the cameras in the input list and the tracks associated with those cameras.\n\n Args:\n gtsfm_data: data to pick the cameras from.\n camera_indices: camera indices to select and keep in the new data.\n\n Returns:\n New object with the selected cameras and associated tracks.\n \"\"\"\n new_data = cls(gtsfm_data.number_images())\n\n for i in gtsfm_data.get_valid_camera_indices():\n if i in camera_indices:\n new_data.add_camera(i, gtsfm_data.get_camera(i))\n\n new_camera_indices = new_data.get_valid_camera_indices()\n\n # add tracks which have all the camera present in new data\n for j in range(gtsfm_data.number_tracks()):\n track = gtsfm_data.get_track(j)\n is_valid = True\n for k in range(track.number_measurements()):\n i, _ = track.measurement(k)\n if i not in new_camera_indices:\n is_valid = False\n break\n if is_valid:\n new_data.add_track(track)\n\n return new_data\n\n def get_scene_reprojection_errors(self) -> np.ndarray:\n \"\"\"Get the scene reprojection errors for all 3D points and all associated measurements.\n\n Returns:\n Reprojection errors as a 1D numpy array.\n \"\"\"\n scene_reproj_errors: List[float] = []\n for track in self._tracks:\n track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track)\n scene_reproj_errors.extend(track_errors)\n\n return np.array(scene_reproj_errors)\n\n\n def aggregate_metrics(self) -> Dict[str, Any]:\n \"\"\"Aggregate metrics about the reprojection errors and 3d track lengths (summary stats).\n\n Args:\n ba_data: bundle adjustment result\n\n Returns:\n dictionary containing metrics of bundle adjustment result\n \"\"\"\n track_lengths_3d = self.get_track_lengths()\n scene_reproj_errors = self.get_scene_reprojection_errors()\n\n convert_to_rounded_float = lambda x: float(np.round(x, 3))\n\n stats_dict = {}\n stats_dict[\"number_tracks\"] = self.number_tracks()\n stats_dict[\"3d_track_lengths\"] = {\n \"min\": convert_to_rounded_float(track_lengths_3d.min()),\n \"mean\": convert_to_rounded_float(np.mean(track_lengths_3d)),\n \"median\": convert_to_rounded_float(np.median(track_lengths_3d)),\n \"max\": convert_to_rounded_float(track_lengths_3d.max()),\n }\n stats_dict[\"reprojection_errors\"] = {\n \"min\": convert_to_rounded_float(np.min(scene_reproj_errors)),\n \"mean\": convert_to_rounded_float(np.mean(scene_reproj_errors)),\n \"median\": convert_to_rounded_float(np.median(scene_reproj_errors)),\n \"max\": convert_to_rounded_float(np.max(scene_reproj_errors)),\n }\n return stats_dict\n\n def get_avg_scene_reprojection_error(self) -> float:\n \"\"\"Get average reprojection error for all 3d points in the entire scene\n\n Returns:\n Average of reprojection errors for every 3d point to its 2d measurements\n \"\"\"\n scene_reproj_errors = self.get_scene_reprojection_errors()\n scene_avg_reproj_error = np.mean(scene_reproj_errors)\n return scene_avg_reproj_error\n\n def log_scene_reprojection_error_stats(self) -> None:\n \"\"\"Logs reprojection error stats for all 3d points in the entire scene.\"\"\"\n scene_reproj_errors = self.get_scene_reprojection_errors()\n logger.info(\"Min scene reproj error: %.3f\", np.min(scene_reproj_errors))\n logger.info(\"Avg scene reproj error: %.3f\", np.mean(scene_reproj_errors))\n logger.info(\"Median scene reproj error: %.3f\", np.median(scene_reproj_errors))\n logger.info(\"Max scene reproj error: %.3f\", np.max(scene_reproj_errors))\n\n def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool:\n \"\"\"Validates a track based on reprojection errors and cheirality checks.\n\n Args:\n track: track with 3D landmark and measurements.\n reproj_err_thresh: reprojection err threshold for each measurement.\n\n Returns:\n validity of the track.\n \"\"\"\n errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track)\n # track is valid as all measurements have error below the threshold\n cheirality_success = np.all(~np.isnan(errors))\n return np.all(errors < reproj_err_thresh) and cheirality_success\n\n def filter_landmarks(self, reproj_err_thresh: float = 5) -> \"GtsfmData\":\n \"\"\"Filters out landmarks with high reprojection error\n\n Args:\n reproj_err_thresh: reprojection err threshold for each measurement.\n \"\"\"\n # TODO: move this function to utils or GTSAM\n filtered_data = GtsfmData(self.number_images())\n\n # add all the cameras\n for i in self.get_valid_camera_indices():\n filtered_data.add_camera(i, self.get_camera(i))\n\n for j in range(self.number_tracks()):\n track = self.get_track(j)\n\n if self.__validate_track(track, reproj_err_thresh):\n filtered_data.add_track(track)\n\n return filtered_data\n",
"\"\"\"Unit tests for comparison functions for geometry types.\n\nAuthors: Ayush Baid\n\"\"\"\nimport unittest\nfrom typing import List\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom gtsam import Cal3_S2, Point3, Pose3, Rot3, Similarity3, Unit3\nfrom gtsam.examples import SFMdata\n\nimport gtsfm.utils.geometry_comparisons as geometry_comparisons\nimport tests.data.sample_poses as sample_poses\n\nPOSE_LIST = SFMdata.createPoses(Cal3_S2())\n\nROT3_EULER_ANGLE_ERROR_THRESHOLD = 1e-2\nPOINT3_RELATIVE_ERROR_THRESH = 1e-1\nPOINT3_ABS_ERROR_THRESH = 1e-2\n\n\ndef rot3_compare(R: Rot3, R_: Rot3, msg=None) -> bool:\n return np.allclose(R.xyz(), R_.xyz(), atol=1e-2)\n\n\ndef point3_compare(t: Point3, t_: Point3, msg=None) -> bool:\n return np.allclose(t, t_, rtol=POINT3_RELATIVE_ERROR_THRESH, atol=POINT3_ABS_ERROR_THRESH)\n\n\nclass TestGeometryComparisons(unittest.TestCase):\n \"\"\"Unit tests for comparison functions for geometry types.\"\"\"\n\n def __assert_equality_on_rot3s(self, computed: List[Rot3], expected: List[Rot3]) -> None:\n\n self.assertEqual(len(computed), len(expected))\n\n for R, R_ in zip(computed, expected):\n self.assertEqual(R, R_)\n\n def __assert_equality_on_point3s(self, computed: List[Point3], expected: List[Point3]) -> None:\n\n self.assertEqual(len(computed), len(expected))\n\n for t, t_ in zip(computed, expected):\n np.testing.assert_allclose(t, t_, rtol=POINT3_RELATIVE_ERROR_THRESH, atol=POINT3_ABS_ERROR_THRESH)\n\n def __assert_equality_on_pose3s(self, computed: List[Pose3], expected: List[Pose3]) -> None:\n\n self.assertEqual(len(computed), len(expected))\n\n computed_rot3s = [x.rotation() for x in computed]\n computed_point3s = [x.translation() for x in computed]\n expected_rot3s = [x.rotation() for x in expected]\n expected_point3s = [x.translation() for x in expected]\n\n self.__assert_equality_on_rot3s(computed_rot3s, expected_rot3s)\n self.__assert_equality_on_point3s(computed_point3s, expected_point3s)\n\n def setUp(self):\n super().setUp()\n\n self.addTypeEqualityFunc(Rot3, rot3_compare)\n self.addTypeEqualityFunc(Point3, point3_compare)\n\n def test_align_rotations(self):\n \"\"\"Tests the alignment of rotations.\"\"\"\n\n # using rotation along just the Y-axis so that angles can be linearly added.\n input_list = [\n Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(-10), 0),\n Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0),\n ]\n ref_list = [\n Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0),\n Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(-40), 0),\n ]\n\n computed = geometry_comparisons.align_rotations(input_list, ref_list)\n expected = [\n Rot3.RzRyRx(0, np.deg2rad(80), 0),\n Rot3.RzRyRx(0, np.deg2rad(120), 0),\n ]\n\n self.__assert_equality_on_rot3s(computed, expected)\n\n def test_align_poses_after_sim3_transform(self):\n \"\"\"Test for alignment of poses after applying a SIM3 transformation.\"\"\"\n\n translation_shift = np.array([5, 10, -5])\n rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))\n scaling_factor = 0.7\n\n transform = Similarity3(rotation_shift, translation_shift, scaling_factor)\n ref_list = [transform.transformFrom(x) for x in sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES]\n\n computed_poses = geometry_comparisons.align_poses_sim3(sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES, ref_list)\n self.__assert_equality_on_pose3s(computed_poses, sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES)\n\n def test_align_poses_on_panorama_after_sim3_transform(self):\n \"\"\"Test for alignment of poses after applying a forward motion transformation.\"\"\"\n\n translation_shift = np.array([0, 5, 0])\n rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))\n scaling_factor = 1.0\n\n aTi_list = sample_poses.PANORAMA_GLOBAL_POSES\n bSa = Similarity3(rotation_shift, translation_shift, scaling_factor)\n bTi_list = [bSa.transformFrom(x) for x in aTi_list]\n\n aTi_list_ = geometry_comparisons.align_poses_sim3(aTi_list, bTi_list)\n self.__assert_equality_on_pose3s(aTi_list_, aTi_list)\n\n @patch(\n \"gtsfm.utils.geometry_comparisons.align_rotations\",\n return_value=[\n Rot3.RzRyRx(0, np.deg2rad(32), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n Rot3.RzRyRx(0, 0, np.deg2rad(83)),\n ], # compared with aRi_list\n )\n def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked):\n \"\"\"Tests the comparison results on list of rotations.\"\"\"\n\n aRi_list = [\n Rot3.RzRyRx(0, np.deg2rad(25), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-20)),\n Rot3.RzRyRx(0, 0, np.deg2rad(80)),\n ]\n bRi_list = [\n Rot3.RzRyRx(0, np.deg2rad(31), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n Rot3.RzRyRx(0, 0, np.deg2rad(77.5)),\n ] # meaningless as align function is mocked\n\n # test with threshold of 10 degrees, which satisfies all the rotations.\n self.assertTrue(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 10))\n align_rotations_mocked.assert_called_once()\n\n @patch(\n \"gtsfm.utils.geometry_comparisons.align_rotations\",\n return_value=[\n Rot3.RzRyRx(0, np.deg2rad(32), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n Rot3.RzRyRx(0, 0, np.deg2rad(83)),\n ], # compared with aRi_list\n )\n def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked):\n \"\"\"Tests the comparison results on list of rotations.\"\"\"\n\n aRi_list = [\n Rot3.RzRyRx(0, np.deg2rad(25), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-20)),\n Rot3.RzRyRx(0, 0, np.deg2rad(80)),\n ]\n bRi_list = [\n Rot3.RzRyRx(0, np.deg2rad(31), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n Rot3.RzRyRx(0, 0, np.deg2rad(77.5)),\n ] # meaningless as align function is mocked\n\n # test with threshold of 5 degrees, which fails one rotation and hence the overall comparison\n self.assertFalse(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 5))\n align_rotations_mocked.assert_called_once()\n\n @patch(\n \"gtsfm.utils.geometry_comparisons.align_rotations\",\n return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad(-20))], # compared with aRi_list\n )\n def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked):\n \"\"\"Tests the comparison results on list of rotations.\"\"\"\n\n list1 = [\n Rot3.RzRyRx(0, np.deg2rad(25), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-20)),\n None,\n ]\n list2 = [\n Rot3.RzRyRx(0, np.deg2rad(31), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n None,\n ]\n threshold_degrees = 10\n\n # test with threshold of 10 degrees, which satisfies all the rotations.\n self.assertTrue(geometry_comparisons.compare_rotations(list1, list2, threshold_degrees))\n align_rotations_mocked.assert_called_once()\n\n @patch(\"gtsfm.utils.geometry_comparisons.align_rotations\", return_value=None)\n def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked):\n \"\"\"Tests the comparison results on list of rotations.\"\"\"\n\n list1 = [\n Rot3.RzRyRx(0, np.deg2rad(25), 0),\n Rot3.RzRyRx(0, 0, np.deg2rad(-20)),\n None,\n ]\n list2 = [\n Rot3.RzRyRx(0, np.deg2rad(31), 0),\n None,\n Rot3.RzRyRx(0, 0, np.deg2rad(-22)),\n ]\n\n # test with threshold of 10 degrees, which satisfies all the rotations.\n self.assertFalse(geometry_comparisons.compare_rotations(list1, list2, 10))\n aligned_rotations_mocked.assert_not_called()\n\n def test_compute_relative_rotation_angle(self):\n \"\"\"Tests the relative angle between two rotations.\"\"\"\n\n R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5))\n R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5))\n\n # returns angle in degrees\n computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2)\n expected_deg = 45\n\n np.testing.assert_allclose(computed_deg, expected_deg, rtol=1e-3, atol=1e-3)\n\n def test_compute_relative_unit_translation_angle(self):\n \"\"\"Tests the relative angle between two unit-translations.\"\"\"\n\n U_1 = Unit3(np.array([1, 0, 0]))\n U_2 = Unit3(np.array([0.5, 0.5, 0]))\n\n # returns angle in degrees\n computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2)\n expected_deg = 45\n\n self.assertAlmostEqual(computed_deg, expected_deg, places=3)\n\n def test_compute_translation_to_direction_angle_is_zero(self):\n i2Ui1_measured = Unit3(Point3(1, 0, 0))\n wTi2_estimated = Pose3(Rot3(), Point3(0, 0, 0))\n wTi1_estimated = Pose3(Rot3(), Point3(2, 0, 0))\n self.assertEqual(\n geometry_comparisons.compute_translation_to_direction_angle(i2Ui1_measured, wTi2_estimated, wTi1_estimated),\n 0.0,\n )\n\n def test_compute_translation_to_direction_angle_is_nonzero(self):\n rz = np.deg2rad(90)\n wRi2 = Rot3.RzRyRx(0, 0, rz) # x-axis of i2 points along y in world frame\n wTi2_estimated = Pose3(wRi2, Point3(0, 0, 0))\n wTi1_estimated = Pose3(Rot3(), Point3(-1, 0, 0)) # At (0, 1, 0) in i2 frame, rotation of i1 is irrelevant here.\n i2Ui1_measured = Unit3(Point3(1, 0, 0))\n # Estimated relative translation of i1 in i2 frame is (0, 1, 0), and the measurement in i2 frame is (1, 0, 0).\n # Expected angle between the two is 90 degrees.\n self.assertTrue(\n geometry_comparisons.compute_translation_to_direction_angle(i2Ui1_measured, wTi2_estimated, wTi1_estimated),\n 90.0,\n )\n\n def test_compute_points_distance_l2_is_zero(self):\n self.assertEqual(\n geometry_comparisons.compute_points_distance_l2(wti1=Point3(1, -2, 3), wti2=Point3(1, -2, 3)), 0.0\n )\n\n def test_compute_points_distance_l2_is_none(self):\n self.assertEqual(geometry_comparisons.compute_points_distance_l2(wti1=Point3(0, 0, 0), wti2=None), None)\n\n def test_compute_points_distance_l2_is_nonzero(self):\n wti1 = Point3(1, 1, 1)\n wti2 = Point3(1, 1, -1)\n self.assertEqual(geometry_comparisons.compute_points_distance_l2(wti1, wti2), 2)\n\n def test_align_poses_sim3_ignore_missing(self):\n \"\"\"Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses.\"\"\"\n wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3))\n wT1 = Pose3(Rot3(np.eye(3)), np.ones(3))\n wT2 = Pose3(Rot3(np.eye(3)), np.ones(3) * 2)\n wT3 = Pose3(Rot3(np.eye(3)), np.ones(3) * 3)\n\n # `a` frame is the target/reference frame\n aTi_list = [wT0, wT1, wT2, wT3]\n # `b` frame contains the estimates\n bTi_list = [None, wT1, None, wT3]\n aTi_list_ = geometry_comparisons.align_poses_sim3_ignore_missing(aTi_list, bTi_list)\n\n # indices 0 and 2 should still have no estimated pose, even after alignment\n assert aTi_list_[0] is None\n assert aTi_list_[2] is None\n\n # identity alignment should preserve poses, should still match GT/targets at indices 1 and 3\n self.__assert_equality_on_pose3s(computed=[aTi_list_[1], aTi_list_[3]], expected=[aTi_list[1], aTi_list[3]])\n\n\ndef test_get_points_within_radius_of_cameras():\n \"\"\"Verify that points that fall outside of 10 meter radius of two camera poses.\n\n Cameras are placed at (0,0,0) and (10,0,0).\n \"\"\"\n wTi0 = Pose3(Rot3(), np.zeros(3))\n wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))\n wTi_list = [wTi0, wTi1]\n points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])\n radius = 10.0\n nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)\n\n expected_nearby_points_3d = np.array([[-5, 0, 0], [15, 0, 0]])\n np.testing.assert_allclose(nearby_points_3d, expected_nearby_points_3d)\n\n\ndef test_get_points_within_radius_of_cameras_negative_radius():\n \"\"\"Catch degenerate input.\"\"\"\n wTi0 = Pose3(Rot3(), np.zeros(3))\n wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))\n wTi_list = [wTi0, wTi1]\n points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])\n radius = -5\n nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)\n assert nearby_points_3d is None, \"Non-positive radius is not allowed\"\n\n\ndef test_get_points_within_radius_of_cameras_no_points():\n \"\"\"Catch degenerate input.\"\"\"\n\n wTi0 = Pose3(Rot3(), np.zeros(3))\n wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))\n wTi_list = [wTi0, wTi1]\n points_3d = np.zeros((0, 3))\n radius = 10.0\n\n nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)\n assert nearby_points_3d is None, \"At least one 3d point must be provided\"\n\n\ndef test_get_points_within_radius_of_cameras_no_poses():\n \"\"\"Catch degenerate input.\"\"\"\n wTi_list = []\n points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])\n radius = 10.0\n\n nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)\n assert nearby_points_3d is None, \"At least one camera pose must be provided\"\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.allclose",
"numpy.min",
"numpy.isnan",
"numpy.median",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.mean",
"numpy.array"
],
[
"numpy.allclose",
"numpy.eye",
"numpy.ones",
"numpy.deg2rad",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EmergentSystemLabStudent/Prosodic-DAA | [
"068af5db337ed977c059e788353414d3aa9a8ac8"
] | [
"prosodic_daa/sample/pyhlm_sample_murakami.py"
] | [
"import os\nimport numpy as np\nfrom pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython\nfrom pyhlm.internals.hlm_states import WeakLimitHDPHLMStates\nfrom pyhlm.word_model import LetterHSMM, LetterHSMMPython\nimport pyhsmm\nimport warnings\nfrom tqdm import trange\nwarnings.filterwarnings('ignore')\nimport time\n\n#%%\ndef load_datas(dataset_dir):\n data = []\n names = np.loadtxt(dataset_dir + \"files.txt\", dtype=str)\n files = names\n for name in names:\n mfcc = np.loadtxt(dataset_dir + \"DATA/\" + name + \".txt\")\n delta = np.loadtxt(dataset_dir + \"DATA/\" + name + \"_d.txt\")\n delta_delta = np.loadtxt(dataset_dir + \"DATA/\" + name + \"_dd.txt\")\n data.append(np.hstack((mfcc, np.hstack((delta,delta_delta)))))\n return data\n\ndef unpack_durations(dur):\n unpacked = np.zeros(dur.sum())\n d = np.cumsum(dur[:-1])\n unpacked[d-1] = 1.0\n return unpacked\n\ndef save_stateseq(model, dataset_dir):\n # Save sampled states sequences.\n names = np.loadtxt(dataset_dir + \"files.txt\", dtype=str)\n for i, s in enumerate(model.states_list):\n with open(\"results/\" + names[i] + \"_s.txt\", \"a\") as f:\n np.savetxt(f, s.stateseq)\n with open(\"results/\" + names[i] + \"_l.txt\", \"a\") as f:\n np.savetxt(f, s.letter_stateseq)\n with open(\"results/\" + names[i] + \"_d.txt\", \"a\") as f:\n np.savetxt(f, unpack_durations(s.durations_censored))\n\ndef save_params(itr_idx, model):\n with open(\"parameters/ITR_{0:04d}.txt\".format(itr_idx), \"w\") as f:\n f.write(str(model.params))\n\ndef save_loglikelihood(model):\n with open(\"summary_files/log_likelihood.txt\", \"a\") as f:\n f.write(str(model.log_likelihood()) + \"\\n\")\n\ndef save_resample_times(resample_time):\n with open(\"summary_files/resample_times.txt\", \"a\") as f:\n f.write(str(resample_time) + \"\\n\")\n\n\n#%%\nif not os.path.exists('results'):\n os.mkdir('results')\n\nif not os.path.exists('parameters'):\n os.mkdir('parameters')\n\nif not os.path.exists('summary_files'):\n os.mkdir('summary_files')\n\n#%%\ndataset_dir = \"murakami_dataset/\"\n\n#%%\nthread_num = 64\npre_train_iter = 1\ntrain_iter = 100\ntrunc = 120\nobs_dim = 9\nletter_upper = 50\nword_upper = 50\nmodel_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10}\nobs_hypparams = {\n 'mu_0':np.zeros(obs_dim),\n 'sigma_0':np.identity(obs_dim),\n 'kappa_0':0.01,\n 'nu_0':obs_dim+2\n}\ndur_hypparams = {\n 'alpha_0':200,\n 'beta_0':10\n}\n\n#%%\nletter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)]\nletter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)]\ndur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)]\nlength_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3)\n\n#%%\nletter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)\nmodel = WeakLimitHDPHLM(model_hypparams, letter_hsmm, dur_distns, length_distn)\n\n#%%\nfiles = np.loadtxt(dataset_dir + \"files.txt\", dtype=str)\ndatas = load_datas(dataset_dir)\n\n#%% Pre training.\nfor d in datas:\n letter_hsmm.add_data(d, trunc=trunc)\nfor t in trange(pre_train_iter):\n letter_hsmm.resample_model(num_procs=1)\nletter_hsmm.states_list = []\n\n#%%\nprint(\"Add datas...\")\nfor d in datas:\n model.add_data(d, trunc=trunc, generate=False)\nmodel.resample_states(num_procs=thread_num)\n# # or\n# for d in datas:\n# model.add_data(d, trunc=trunc, initialize_from_prior=False)\nprint(\"Done!\")\n\n#%% Save init params and pyper params\nwith open(\"parameters/hypparams.txt\", \"w\") as f:\n f.write(str(model.hypparams))\nsave_params(0, model)\nsave_loglikelihood(model)\n\n#%%\nfor t in trange(train_iter):\n st = time.time()\n model.resample_model(num_procs=thread_num)\n resample_model_time = time.time() - st\n save_stateseq(model, dataset_dir)\n save_loglikelihood(model)\n save_params(t+1, model)\n save_resample_times(resample_model_time)\n print(model.word_list)\n print(model.word_counts())\n print(\"log_likelihood:{}\".format(model.log_likelihood()))\n print(\"resample_model:{}\".format(resample_model_time))\n"
] | [
[
"numpy.hstack",
"numpy.cumsum",
"numpy.identity",
"numpy.savetxt",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trojanjay/sfa-numpy | [
"bff5737ef429f31228d20a9e1d0ce7d46d3080d3",
"bff5737ef429f31228d20a9e1d0ce7d46d3080d3"
] | [
"examples/modal_beamforming_open_circular_array.py",
"micarray/util.py"
] | [
"\"\"\"\n Compute the plane wave decomposition for an incident broadband plane wave\n on an open circular array using a modal beamformer of finite order.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport micarray\nfrom micarray.util import db\n\nNsf = 50 # order of the incident sound field\nN = 30 # order of modal beamformer/microphone array\npw_angle = 1.23 * np.pi # incidence angle of plane wave\npol_pwd = np.linspace(0, 2*np.pi, 180, endpoint=False) # angles for plane wave decomposition\nk = np.linspace(0, 20, 100) # wavenumber vector\nr = 1 # radius of array\n\n# get uniform grid (microphone positions) of order N\npol, weights = micarray.modal.angular.grid_equal_polar_angle(N)\n\n# pressure on the surface of an open cylinder for an incident plane wave\nBn = micarray.modal.radial.circular_pw(Nsf, k, r, setup='open')\nD = micarray.modal.radial.circ_diagonal_mode_mat(Bn)\nPsi_p = micarray.modal.angular.cht_matrix(Nsf, pol)\nPsi_pw = micarray.modal.angular.cht_matrix(Nsf, pw_angle)\np = np.matmul(np.matmul(Psi_p, D), np.conj(Psi_pw.T))\np = np.squeeze(p)\n\n# incident plane wave exhibiting infinite spatial bandwidth\n# p = np.exp(1j * k[:, np.newaxis]*r * np.cos(pol - pw_angle))\n\n# plane wave decomposition using modal beamforming\nBn = micarray.modal.radial.circular_pw(N, k, r, setup='open')\nDn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip')\nD = micarray.modal.radial.circ_diagonal_mode_mat(Dn)\nPsi_p = micarray.modal.angular.cht_matrix(N, pol, weights)\nPsi_q = micarray.modal.angular.cht_matrix(N, pol_pwd)\nA_pwd = np.matmul(np.matmul(Psi_q, D), np.conj(Psi_p.T))\nq_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(p, 2)))\nq_pwd_t = np.fft.fftshift(np.fft.irfft(q_pwd, axis=0), axes=0)\n\n# visualize plane wave decomposition (aka beampattern)\nplt.figure()\nplt.pcolormesh(k, pol_pwd/np.pi, db(q_pwd.T), vmin=-40)\nplt.colorbar()\nplt.xlabel(r'$kr$')\nplt.ylabel(r'$\\phi / \\pi$')\nplt.title('Plane wave docomposition by modal beamformer (frequency domain)')\nplt.savefig('modal_beamforming_open_circular_array_fd.png')\n\nplt.figure()\nplt.pcolormesh(range(2*len(k)-2), pol_pwd/np.pi, db(q_pwd_t.T), vmin=-40)\nplt.colorbar()\nplt.ylabel(r'$\\phi / \\pi$')\nplt.title('Plane wave docomposition by modal beamformer (time domain)')\nplt.savefig('modal_beamforming_open_circular_array_td.png')\n",
"import numpy as np\nfrom scipy import linalg\n\n\ndef norm_of_columns(A, p=2):\n \"\"\"Vector p-norm of each column of a matrix.\n\n Parameters\n ----------\n A : array_like\n Input matrix.\n p : int, optional\n p-th norm.\n\n Returns\n -------\n array_like\n p-norm of each column of A.\n \"\"\"\n _, N = A.shape\n return np.asarray([linalg.norm(A[:, j], ord=p) for j in range(N)])\n\n\ndef coherence_of_columns(A):\n \"\"\"Mutual coherence of columns of A.\n\n Parameters\n ----------\n A : array_like\n Input matrix.\n p : int, optional\n p-th norm.\n\n Returns\n -------\n array_like\n Mutual coherence of columns of A.\n \"\"\"\n A = np.asmatrix(A)\n _, N = A.shape\n A = A * np.asmatrix(np.diag(1/norm_of_columns(A)))\n Gram_A = A.H*A\n for j in range(N):\n Gram_A[j, j] = 0\n return np.max(np.abs(Gram_A))\n\n\ndef asarray_1d(a, **kwargs):\n \"\"\"Squeeze the input and check if the result is one-dimensional.\n\n Returns *a* converted to a `numpy.ndarray` and stripped of\n all singleton dimensions. Scalars are \"upgraded\" to 1D arrays.\n The result must have exactly one dimension.\n If not, an error is raised.\n\n \"\"\"\n result = np.squeeze(np.asarray(a, **kwargs))\n if result.ndim == 0:\n result = result.reshape((1,))\n elif result.ndim > 1:\n raise ValueError(\"array must be one-dimensional\")\n return result\n\n\ndef matdiagmul(A, b):\n \"\"\"Efficient multiplication of matrix and diagonal matrix .\n\n Returns the multiplication of a matrix *A* and a diagonal matrix. The\n diagonal matrix is given by the vector *b* containing its elements on\n the main diagonal. If *b* is a matrix, it is treated as a stack of vectors\n residing in the last index and broadcast accordingly.\n\n Parameters\n ----------\n A : array_like\n Input matrix.\n b : array_like\n Main diagonal elements or stack of main diagonal elements.\n\n Returns\n -------\n array_like\n Result of matrix multiplication.\n \"\"\"\n if len(b.shape) == 1:\n b = b[np.newaxis, :]\n K, N = b.shape\n M, N = A.shape\n\n C = np.zeros([K, M, N], dtype=A.dtype)\n for k in range(K):\n C[k, :, :] = A * b[k, :]\n return C\n\n\ndef db(x, power=False):\n \"\"\"Convert *x* to decibel.\n\n Parameters\n ----------\n x : array_like\n Input data. Values of 0 lead to negative infinity.\n power : bool, optional\n If ``power=False`` (the default), *x* is squared before\n conversion.\n\n \"\"\"\n with np.errstate(divide='ignore'):\n return 10 if power else 20 * np.log10(np.abs(x))\n"
] | [
[
"numpy.expand_dims",
"numpy.fft.irfft",
"numpy.conj",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.squeeze",
"numpy.matmul",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"numpy.abs",
"numpy.asarray",
"numpy.asmatrix",
"scipy.linalg.norm",
"numpy.errstate",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
uclyyu/over9000 | [
"9e2e0aa4be9da941372a21ea627c38a3eb7be617"
] | [
"ralamb.py"
] | [
"import torch, math\nfrom torch.optim.optimizer import Optimizer\n\n# RAdam + LARS\nclass Ralamb(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(Ralamb, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(Ralamb, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Ralamb does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n\n if state['step'] == buffered[0]:\n N_sma, radam_step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n radam_step_size = 1.0 / (1 - beta1 ** state['step'])\n buffered[2] = radam_step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n radam_step = p_data_fp32.clone()\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)\n else:\n radam_step.add_(-radam_step_size * group['lr'], exp_avg)\n\n radam_norm = radam_step.pow(2).sum().sqrt()\n weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)\n if weight_norm == 0 or radam_norm == 0:\n trust_ratio = 1\n else:\n trust_ratio = weight_norm / radam_norm\n\n state['weight_norm'] = weight_norm\n state['adam_norm'] = radam_norm\n state['trust_ratio'] = trust_ratio\n\n if N_sma >= 5:\n p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)\n else:\n p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n"
] | [
[
"torch.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
micbia/tools21cm | [
"72081e94e4d83511380baacce427d79d13da2fa5"
] | [
"t2c/segmentation.py"
] | [
"\"\"\"\nCreated by Michele Bianco, 9 July 2021\n\"\"\"\n\nimport numpy as np, pkg_resources\nfrom tqdm import tqdm\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import backend as K\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.framework import ops \nfrom tensorflow.python.ops import array_ops \nfrom tensorflow.python.ops import math_ops \n\ndef sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None):\n nn_ops._ensure_xent_args(\"sigmoid_cross_entropy_with_logits\", _sentinel,labels, logits)\n with ops.name_scope(name, \"logistic_loss\", [logits, labels]) as name: \n logits = ops.convert_to_tensor(logits, name=\"logits\") \n labels = ops.convert_to_tensor(labels, name=\"labels\") \n try:\n labels.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError(\"logits and labels must have the same shape (%s vs %s)\" %(logits.get_shape(), labels.get_shape())) \n zeros = array_ops.zeros_like(logits, dtype=logits.dtype) \n cond = (logits >= zeros) \n relu_logits = array_ops.where(cond, logits, zeros) \n neg_abs_logits = array_ops.where(cond, -logits, logits) \n balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels)\n return tf.reduce_mean(balanced_cross_entropy)\n\ndef balanced_cross_entropy(y_true, y_pred):\n \"\"\"\n To decrease the number of false negatives, set beta>1. To decrease the number of false positives, set beta<1.\n \"\"\"\n beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon())\n y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())\n y_pred = K.log(y_pred / (1 - y_pred))\n return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta)\n\n\ndef iou(y_true, y_pred):\n \"\"\"\n Return the Intersection over Union (IoU) for a given label.\n Args:\n y_true: the expected y values as a one-hot\n y_pred: the predicted y values as a one-hot or softmax output\n label: the label to return the IoU for\n Returns:\n the IoU for the given label\n \"\"\"\n\n intersection = K.sum(K.abs(y_true * y_pred))\n #intersection = K.sum(y_true * y_pred)\n union = K.sum(y_true) + K.sum(y_pred) - intersection\n # avoid divide by zero - if the union is zero, return 1, otherwise, return the intersection over union\n return K.switch(K.equal(union, 0), 1.0, intersection / union)\n\n\ndef dice_coef(y_true, y_pred, smooth=1):\n \"\"\"\n Dice = (2*|X & Y|)/ (|X|+ |Y|)\n = 2*sum(|A*B|)/(sum(A^2)+sum(B^2))\n ref: https://arxiv.org/pdf/1606.04797v1.pdf\n \"\"\"\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)\n\n\n\n################################################################\n\nclass segunet21cm:\n def __init__(self, tta=1, verbose=False):\n \"\"\" SegU-Net: segmentation of 21cm images with U-shape network (Bianco et al. 2021, https://arxiv.org/abs/2102.06713)\n - tta (int): default 0 (super-fast, no pixel-error map) implement the error map\n with time-test aumentated techique in the prediction process\n - verbose (bool): default False, activate verbosity\n \n Description:\n tta = 0 : fast (~7 sec), it tends to be a few percent less accurate (<2%) then the other two cases, no pixel-error map (no TTA manipulation)\n tta = 1 : medium (~17 sec), accurate and preferable than tta=0, with pixel-error map (3 samples)\n tta = 2 : slow (~10 min), accurate, with pixel-error map (~100 samples)\n \n Returns:\n - X_seg (ndarray) : recovered binary field (1 = neutral and 0 = ionized regions)\n - X_err (ndarray) : pixel-error map of the recovered binary field\n \n Example:\n $ from tools21cm import segmentation\n $ seg = segmentation.segunet21cm(tta=1, verbose=True) # load model (need to be done once)\n $ Xseg, Xseg_err = seg.prediction(x=dT3)\n\n Print of the Network's Configuration file:\n [TRAINING]\n BATCH_SIZE = 64\n AUGMENT = NOISESMT\n IMG_SHAPE = 128, 128\n CHAN_SIZE = 256\n DROPOUT = 0.05\n KERNEL_SIZE = 3\n EPOCHS = 100\n LOSS = balanced_cross_entropy\n METRICS = iou, dice_coef, binary_accuracy, binary_crossentropy\n LR = 1e-3\n RECOMP = False\n GPUS = 2\n PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/inputs/data2D_128_030920/\n \n [RESUME]\n RESUME_PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/outputs/new/02-10T23-52-36_128slice/\n BEST_EPOCH = 56\n RESUME_EPOCH = 66\n\n \"\"\"\n self.TTA = tta\n self.VERBOSE = verbose\n\n if(self.TTA == 2):\n # slow\n self.MANIP = self.IndependentOperations(verbose=self.VERBOSE)\n elif(self.TTA == 1):\n # fast\n self.MANIP = {'opt0': [lambda a: a, 0, 0]}\n elif(self.TTA == 0):\n # super-fast\n self.MANIP = {'opt0': [lambda a: a, 0, 0]}\n \n self.NR_MANIP = len(self.MANIP)\n\n # load model\n MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5')\n if (os.path.exists(MODEL_NAME)):\n pass\n else:\n if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME)\n\n MODEL_EPOCH = 56\n METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef} \n self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS)\n \n if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME)\n\n def UniqueRows(self, arr):\n \"\"\" Remove duplicate row array in 2D data \n - arr (narray): array with duplicate row\n \n Example:\n >> d = np.array([[0,1,2],[0,1,2],[0,0,0],[0,0,2],[0,1,2]])\n >> UniqueRows(d) \n \n array([[0, 0, 0],\n [0, 0, 2],\n [0, 1, 2]])\n \"\"\"\n arr = np.array(arr)\n\n if(arr.ndim == 2):\n arr = np.ascontiguousarray(arr)\n unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1]))\n new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1]))\n elif(arr.ndim == 1):\n new_arr = np.array(list(dict.fromkeys(arr)))\n\n return new_arr\n\n\n def IndependentOperations(self, verbose=False):\n ''' How many unique manipulations (horzontal and vertical flip, rotation, etc...) \n can we operate on a cube? \n Each indipendent operation is considered as an additional rappresentation\n of the same coeval data, so that it can be considered for errorbar with SegU-Net '''\n\n data = np.array(range(3**3)).reshape((3,3,3)) \n\n func = [lambda a: a,\n np.fliplr, \n np.flipud, \n lambda a: np.flipud(np.fliplr(a)),\n lambda a: np.fliplr(np.flipud(a))]\n axis = [0,1,2] \n angl_rot = [0,1,2,3] \n\n\n tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size)) \n tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))} \n\n i = 0 \n for f in func: \n cube = f(data)\n for rotax in axis: \n ax_tup = [0,1,2] \n ax_tup.remove(rotax)\n for rot in angl_rot:\n tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten() \n # function, axis of rotation, angle of rotation, slice index\n tot_operations['opt%d' %i] = [f, rotax, rot] \n i += 1 \n\n uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int)\n uniq_operations = {}\n\n for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat):\n for itmdf, tot_mdf in enumerate(tot_manipl_data_flat):\n if(all(uniq_mdf == tot_mdf)):\n uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf]\n break\n \n assert uniq_manipl_data_flat.shape[0] == len(uniq_operations)\n if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations)))\n\n return uniq_operations\n\n\n def prediction(self, x):\n img_shape = x.shape\n if(self.TTA == 2):\n X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))\n elif(self.TTA == 1):\n X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))\n elif(self.TTA == 0):\n X_tta = np.zeros((np.append(len(self.MANIP), img_shape)))\n \n if(self.VERBOSE):\n loop = tqdm(range(len(self.MANIP)))\n else:\n loop = range(len(self.MANIP))\n\n for iopt in loop:\n opt, rotax, rot = self.MANIP['opt%d' %iopt]\n ax_tup = [0,1,2] \n ax_tup.remove(rotax)\n\n cube = np.rot90(opt(x), k=rot, axes=ax_tup) \n X = cube[np.newaxis, ..., np.newaxis]\n\n for j in range(img_shape[0]):\n if(self.TTA == 0):\n X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()\n else:\n X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()\n X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze()\n X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze()\n\n for itta in range(X_tta.shape[0]):\n opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))]\n ax_tup = [0,1,2] \n ax_tup.remove(rotax)\n X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup))\n\n X_seg = np.round(np.mean(X_tta, axis=0))\n X_err = np.std(X_tta, axis=0)\n\n return X_seg, X_err\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.python.ops.nn_ops._ensure_xent_args",
"numpy.flipud",
"tensorflow.python.ops.math_ops.exp",
"numpy.mean",
"tensorflow.keras.backend.log",
"numpy.fliplr",
"tensorflow.python.ops.array_ops.where",
"numpy.std",
"tensorflow.keras.backend.square",
"numpy.rot90",
"numpy.ascontiguousarray",
"tensorflow.keras.backend.sum",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.keras.backend.abs",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.reduce_mean",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.keras.backend.equal",
"tensorflow.keras.backend.epsilon"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
jrobertojunior/face-parsing.PyTorch | [
"d34f39c9ae9726ac8eaf39ecff824a14ec4e15b9"
] | [
"preprocessing/main.py"
] | [
"import cv2 as cv\nimport numpy as np\nimport os\n\ndef preprocess(labels_path, sep_labels_path):\n # list all files on labels_path\n labels_filenames = os.listdir(labels_path)\n\n count = 0\n for label_filename in labels_filenames:\n label_path = os.path.join(labels_path, label_filename)\n\n print(f'segmenting {label_filename}')\n masks = segment_labels(label_path)\n\n for att in masks:\n mask = masks[att]\n path = f\"{sep_labels_path}/{label_filename[:-4]}_{att}.png\"\n print(f'{count} - writing {path}')\n cv.imwrite(path, mask)\n\n count += 1\n # cv.imwrite(f'{label_filename[:-4]}_{mask}', mask)\n\n\ndef segment_labels(label_path):\n atts = {\n \"background\": (0, 0, 0),\n \"mouth\": (255, 0, 0),\n \"eyes\": (0, 255, 0),\n \"nose\": (0, 0, 255),\n \"face\": (128, 128, 128),\n \"hair\": (255, 255, 0),\n \"eyebrows\": (255, 0, 255),\n \"ears\": (0, 255, 255),\n \"teeth\": (255, 255, 255),\n \"beard\": (255, 192, 192),\n \"sunglasses\": (0, 128, 128),\n }\n\n label = cv.imread(label_path)\n mask = np.zeros(label.shape, dtype=np.uint8)\n\n masks = {}\n\n for att in atts:\n color = atts[att]\n\n mask = cv.inRange(label, color, color)\n masks[att] = mask\n # cv.imshow(att, mask)\n # cv.waitKey(0)\n\n # cv.imwrite(f\"{sep_labels_path}/{label_path[:-4]}_{att}.png\", mask)\n\n return masks\n\n\n# separate_masks(\"./labels.png\")\npreprocess(\"./organized_dataset/labels\", \"./organized_dataset/segmented_labels\")\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Principe92/contextualbandits | [
"43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5",
"43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5",
"43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5",
"43cf5be10b3d39d74f9da5c5fe1cfae5bc2dd6f5"
] | [
"example/loc3/rewards.py",
"example/loc9/loc9.py",
"example/loc18/rewards.py",
"example/loc19/rewards.py"
] | [
"import pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\nfrom pylab import rcParams\n\n\ndf = pandas.read_csv('rewards_loc3.csv')\n\nucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\\\ndf['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']\n\n#y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \\\n#,np.mean(ovr), np.mean(egr), np.mean(egr2) \\\n#,np.mean(agr), np.mean(agr2), np.mean(efr) \\\n#,np.mean(ac), np.mean(aac), np.mean(sft)\n\ndef get_mean_reward(reward_lst):\n mean_rew=list()\n for r in range(len(reward_lst)):\n mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))\n return mean_rew\n\ny1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \\\n,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \\\n,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \\\n,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)\n\nx1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]\nx3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]\nx5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]\nx7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]\nx9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]\nx11 = [index for index in range(len(df['sft']))]\n\n\ndef CI_model(y, confidence = 0.95):\n std_err_y = st.sem(y)\n n_y = len(y)\n h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)\n return h_y\n\nh_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\\\nCI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)\nplt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')\nplt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')\nplt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')\nplt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')\nplt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')\nplt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')\nplt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')\nplt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')\nplt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')\nplt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')\nplt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')\n#plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)')\n\nax = plt.subplot(111)\n\n\nplt.xlabel('Rounds (models were updated every 50 rounds)', size=10)\nplt.ylabel('Cummulative Mean Reward', size=10)\nplt.title('Comparison of Online Contextual Bandit Policies in location 3')\n# Shrink current axis by 20%\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n# Put a legend to the right of the current axis\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.savefig(\"location_3.png\", bbox_inches='tight', dpi = 600)\n",
"import pandas as pd, numpy as np, re\nimport dill\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.linear_model import LogisticRegression\nfrom contextualbandits.online import BootstrappedUCB, BootstrappedTS, SeparateClassifiers,\\\n EpsilonGreedy, AdaptiveGreedy, ExploreFirst, ActiveExplorer, SoftmaxExplorer\nfrom copy import deepcopy\n\nfrom sklearn.linear_model import SGDClassifier\n\ndef parse_data(file_name):\n features = list()\n labels = list()\n with open(file_name, 'rt') as f:\n f.readline()\n for l in f:\n if bool(re.search(\"^[0-9]\", l)):\n g = re.search(\"^(([0-9]{1,2},?)+)\\s(.*)$\", l)\n labels.append([int(i) for i in g.group(1).split(\",\")])\n features.append(eval(\"{\" + re.sub(\"\\s\", \",\", g.group(3)) + \"}\"))\n else:\n l = l.strip()\n labels.append([])\n features.append(eval(\"{\" + re.sub(\"\\s\", \",\", l) + \"}\"))\n features = pd.DataFrame.from_dict(features).fillna(0).iloc[:,:].values\n mlb = MultiLabelBinarizer()\n y = mlb.fit_transform(labels)\n return features, y\n\nX, y = parse_data(\"loc_9.txt\")\n\n\nnchoices = y.shape[1]\nbase_algorithm = SGDClassifier(random_state=123, loss='log')\nbeta_prior = ((3, 7), 2) # until there are at least 2 observations of each class, will use prior Beta(3, 7)\n\n## The base algorithm is embedded in different metaheuristics\nbootstrapped_ucb = BootstrappedUCB(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\nbootstrapped_ts = BootstrappedTS(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\none_vs_rest = SeparateClassifiers(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\nepsilon_greedy = EpsilonGreedy(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\nepsilon_greedy_nodecay = EpsilonGreedy(deepcopy(base_algorithm), nchoices = nchoices,\n beta_prior=beta_prior, decay=None, batch_train=True)\nadaptive_greedy_thr = AdaptiveGreedy(deepcopy(base_algorithm), nchoices=nchoices,\n decay_type='threshold', batch_train=True)\nadaptive_greedy_perc = AdaptiveGreedy(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior,\n decay_type='percentile', decay=0.9997, batch_train=True)\nexplore_first = ExploreFirst(deepcopy(base_algorithm), nchoices = nchoices,\n beta_prior=None, explore_rounds=1500, batch_train=True)\nactive_explorer = ActiveExplorer(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\nadaptive_active_greedy = AdaptiveGreedy(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior,\n active_choice='weighted', decay_type='percentile', decay=0.9997, batch_train=True)\nsoftmax_explorer = SoftmaxExplorer(deepcopy(base_algorithm), nchoices = nchoices, beta_prior=beta_prior, batch_train=True)\n\nmodels = [bootstrapped_ucb, bootstrapped_ts, one_vs_rest, epsilon_greedy, epsilon_greedy_nodecay,\n adaptive_greedy_thr, adaptive_greedy_perc, explore_first, active_explorer,\n adaptive_active_greedy, softmax_explorer]\n\n# These lists will keep track of the rewards obtained by each policy\nrewards_ucb, rewards_ts, rewards_ovr, rewards_egr, rewards_egr2, \\\nrewards_agr, rewards_agr2, rewards_efr, rewards_ac, \\\nrewards_aac, rewards_sft = [list() for i in range(len(models))]\n\nlst_rewards = [rewards_ucb, rewards_ts, rewards_ovr, rewards_egr, rewards_egr2,\n rewards_agr, rewards_agr2, rewards_efr, rewards_ac,\n rewards_aac, rewards_sft]\n\n# batch size - algorithms will be refit after N rounds\nbatch_size=50\n\n# initial seed - all policies start with the same small random selection of actions/rewards\nfirst_batch = X[:batch_size, :]\naction_chosen = np.random.randint(nchoices, size=batch_size)\nrewards_received = y[np.arange(batch_size), action_chosen]\n\n# fitting models for the first time\nfor model in models:\n np.random.seed(123)\n model.fit(X=first_batch, a=action_chosen, r=rewards_received)\n \n# these lists will keep track of which actions does each policy choose\nlst_a_ucb, lst_a_ts, lst_a_ovr, lst_a_egr, lst_a_egr2, lst_a_agr, \\\nlst_a_agr2, lst_a_efr, lst_a_ac, lst_a_aac, \\\nlst_a_sft = [action_chosen.copy() for i in range(len(models))]\n\nlst_actions = [lst_a_ucb, lst_a_ts, lst_a_ovr, lst_a_egr, lst_a_egr2, lst_a_agr,\n lst_a_agr2, lst_a_efr, lst_a_ac, lst_a_aac,lst_a_sft]\n\n# rounds are simulated from the full dataset\ndef simulate_rounds_stoch(model, rewards, actions_hist, X_batch, y_batch, rnd_seed):\n np.random.seed(rnd_seed)\n \n ## choosing actions for this batch\n actions_this_batch = model.predict(X_batch).astype('uint8')\n \n # keeping track of the sum of rewards received\n rewards.append(y_batch[np.arange(y_batch.shape[0]), actions_this_batch].sum())\n \n # adding this batch to the history of selected actions\n new_actions_hist = np.append(actions_hist, actions_this_batch)\n \n # rewards obtained now\n rewards_batch = y_batch[np.arange(y_batch.shape[0]), actions_this_batch]\n \n # now refitting the algorithms after observing these new rewards\n np.random.seed(rnd_seed)\n model.partial_fit(X_batch, actions_this_batch, rewards_batch)\n \n return new_actions_hist\n\n# now running all the simulation\nfor i in range(int(np.floor(X.shape[0] / batch_size))):\n batch_st = (i + 1) * batch_size\n batch_end = (i + 2) * batch_size\n batch_end = np.min([batch_end, X.shape[0]])\n \n X_batch = X[batch_st:batch_end, :]\n y_batch = y[batch_st:batch_end, :]\n \n for model in range(len(models)):\n lst_actions[model] = simulate_rounds_stoch(models[model],\n lst_rewards[model],\n lst_actions[model],\n X_batch, y_batch,\n rnd_seed = batch_st)\n\n for model in range(len(models)):\n \tdill.dump(models[model], open(\"model_%d_loc9.dill\" % (model), \"wb\"))\n\n \n#plotting\n\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\n\ndef get_mean_reward(reward_lst, batch_size=batch_size):\n mean_rew=list()\n for r in range(len(reward_lst)):\n mean_rew.append(sum(reward_lst[:r+1]) * 1.0 / ((r+1)*batch_size))\n return mean_rew\n\n\nimport scipy.stats as st\ny1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(rewards_ucb), get_mean_reward(rewards_ts) \\\n,get_mean_reward(rewards_ovr), get_mean_reward(rewards_egr), get_mean_reward(rewards_egr2) \\\n,get_mean_reward(rewards_agr), get_mean_reward(rewards_agr2), get_mean_reward(rewards_efr) \\\n,get_mean_reward(rewards_ac), get_mean_reward(rewards_aac), get_mean_reward(rewards_sft)\nx1, x2 = [index for index in range(len(rewards_ucb))], [index for index in range(len(rewards_ts))]\nx3, x4 = [index for index in range(len(rewards_ovr))], [index for index in range(len(rewards_egr))]\nx5, x6 = [index for index in range(len(rewards_egr2))], [index for index in range(len(rewards_agr))]\nx7, x8 = [index for index in range(len(rewards_agr2))], [index for index in range(len(rewards_efr))]\nx9, x10 = [index for index in range(len(rewards_ac))], [index for index in range(len(rewards_aac))]\nx11 = [index for index in range(len(rewards_sft))]\n\n\ndef CI_model(y, confidence = 0.95):\n std_err_y = st.sem(y1)\n n_y = len(y1)\n h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)\n return h_y\n\nh_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(y1), CI_model(y2), CI_model(y3),\\\nCI_model(y4), CI_model(y5), CI_model(y6), CI_model(y7), CI_model(y8), CI_model(y9), CI_model(y10), CI_model(y11)\nplt.errorbar(x1, y1, yerr= h_y1)\nplt.errorbar(x2, y2, yerr= h_y2)\nplt.errorbar(x3, y3, yerr= h_y3)\nplt.errorbar(x4, y4, yerr= h_y4)\nplt.errorbar(x5, y5, yerr= h_y5)\nplt.errorbar(x6, y6, yerr= h_y6)\nplt.errorbar(x7, y7, yerr= h_y7)\nplt.errorbar(x8, y8, yerr= h_y8)\nplt.errorbar(x9, y9, yerr= h_y9)\nplt.errorbar(x10, y10, yerr= h_y10)\nplt.errorbar(x11, y11, yerr= h_y11)\nplt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed')\n\n\nplt.xlabel('Rounds (models were updated every 50 rounds)', size=10)\nplt.ylabel('Cummulative Mean Reward', size=10)\n#plt.title('Comparison of Online Contextual Bandit Policies in location 7\\n(Base Algorithm is Logistic Regression with data fit in streams)\\n\\nDataset\\n(159 categories, 1836 attributes)',size=30)\nplt.savefig(\"loc_9.png\", bbox_inches='tight', dpi = 600)\n\n\nfrom numpy import array\nucb, ts, ovr, egr, egr2, agr, agr2, efr, ac, aac, sft = array(rewards_ucb), array(rewards_ts), array(rewards_ovr), \\\narray(rewards_egr), array(rewards_egr2), array(rewards_agr), array(rewards_agr2), array(rewards_efr), \\\narray(rewards_ac), array(rewards_aac), array(rewards_sft)\ndf = pd.DataFrame({\"ucb\" : ucb, \"ts\" : ts,\"ovr\":ovr, \"egr\":egr,\"egr2\":egr2,\"agr\":agr,\"agr2\":agr2,\"efr\":efr, \\\n \"ac\":ac, \"aac\":aac, \"sft\":sft})\ndf.to_csv(\"rewards_loc9.csv\", index=False)",
"import pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\nfrom pylab import rcParams\n\n\ndf = pandas.read_csv('rewards_loc18.csv')\n\nucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\\\ndf['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']\n\n#y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \\\n#,np.mean(ovr), np.mean(egr), np.mean(egr2) \\\n#,np.mean(agr), np.mean(agr2), np.mean(efr) \\\n#,np.mean(ac), np.mean(aac), np.mean(sft)\n\ndef get_mean_reward(reward_lst):\n mean_rew=list()\n for r in range(len(reward_lst)):\n mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))\n return mean_rew\n\ny1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \\\n,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \\\n,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \\\n,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)\n\nx1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]\nx3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]\nx5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]\nx7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]\nx9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]\nx11 = [index for index in range(len(df['sft']))]\n\n\ndef CI_model(y, confidence = 0.95):\n std_err_y = st.sem(y)\n n_y = len(y)\n h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)\n return h_y\n\nh_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\\\nCI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)\nplt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')\nplt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')\nplt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')\nplt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')\nplt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')\nplt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')\nplt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')\nplt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')\nplt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')\nplt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')\nplt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')\n#plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)')\n\nax = plt.subplot(111)\n\n\nplt.xlabel('Rounds (models were updated every 50 rounds)', size=10)\nplt.ylabel('Cummulative Mean Reward', size=10)\nplt.title('Comparison of Online Contextual Bandit Policies in location 18')\n# Shrink current axis by 20%\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n# Put a legend to the right of the current axis\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.savefig(\"location_18.png\", bbox_inches='tight', dpi = 600)\n",
"import pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as st\nfrom pylab import rcParams\n\n\ndf = pandas.read_csv('rewards_loc19.csv')\n\nucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\\\ndf['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']\n\n#y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \\\n#,np.mean(ovr), np.mean(egr), np.mean(egr2) \\\n#,np.mean(agr), np.mean(agr2), np.mean(efr) \\\n#,np.mean(ac), np.mean(aac), np.mean(sft)\n\ndef get_mean_reward(reward_lst):\n mean_rew=list()\n for r in range(len(reward_lst)):\n mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))\n return mean_rew\n\ny1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \\\n,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \\\n,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \\\n,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)\n\nx1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]\nx3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]\nx5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]\nx7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]\nx9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]\nx11 = [index for index in range(len(df['sft']))]\n\n\ndef CI_model(y, confidence = 0.95):\n std_err_y = st.sem(y)\n n_y = len(y)\n h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)\n return h_y\n\nh_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\\\nCI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)\nplt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')\nplt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')\nplt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')\nplt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')\nplt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')\nplt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')\nplt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')\nplt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')\nplt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')\nplt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')\nplt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')\n#plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)')\n\nax = plt.subplot(111)\n\n\nplt.xlabel('Rounds (models were updated every 50 rounds)', size=10)\nplt.ylabel('Cummulative Mean Reward', size=10)\nplt.title('Comparison of Online Contextual Bandit Policies in location 19')\n# Shrink current axis by 20%\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n# Put a legend to the right of the current axis\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\nplt.savefig(\"location_19.png\", bbox_inches='tight', dpi = 600)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"scipy.stats.t.ppf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"scipy.stats.sem",
"matplotlib.pyplot.ylabel"
],
[
"numpy.random.seed",
"numpy.min",
"numpy.arange",
"sklearn.preprocessing.MultiLabelBinarizer",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"scipy.stats.t.ppf",
"numpy.append",
"numpy.random.randint",
"matplotlib.pyplot.errorbar",
"numpy.floor",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.xlabel",
"scipy.stats.sem",
"numpy.array",
"sklearn.linear_model.SGDClassifier",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"scipy.stats.t.ppf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"scipy.stats.sem",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"scipy.stats.t.ppf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"scipy.stats.sem",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Jesmine0902/TSP_CPLEX_2 | [
"8853d6837bd5408b8925eb5f45e21c79945a5904"
] | [
"Add/add_heuristic_engine.py"
] | [
"import pandas as pd\n\n__author__ = 'slei'\n\n\nclass AddHeuristicTSP:\n \"\"\" Finds the shortest path using a heuristic method \"\"\"\n\n def __init__(self, cities_df):\n self.df = cities_df\n self.edges = list((t.origin, t.destination) for t in df.itertuples())\n self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()])\n self.cities = list(set(df['destination']))\n self.cities_lst = []\n self.tour_lst = []\n self.distance_lst = []\n self.tour_leg_distances_lst = []\n self._final_df = None\n self._shortest_distance = None\n self._shortest_tour = None\n\n def find_subtour(self, starting_city):\n \"\"\" Given a starting city, finds a tour by selecting next shortest distance from list of unvisited cities \"\"\"\n tour = []\n tour_distance_lst = [0]\n cities_unvisited = list(set(self.df['destination']))\n initial_city = starting_city\n current_city = initial_city\n tour.append(current_city)\n cities_unvisited.pop(0)\n total_distance = 0\n count = 0\n\n while len(cities_unvisited) > 0:\n # remove any city that has already been visited from consideration\n df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)]\n\n # filter for rows based on first criterion\n is_current = df_unvisited['origin'] == current_city\n df2 = df_unvisited[is_current]\n\n # find the nearest city\n index_min = df2['distance'].idxmin()\n min_row = df2.loc[index_min]\n d = min_row.distance\n destination = min_row.destination\n\n # update next city and tour and total distance\n current_city = destination\n total_distance = total_distance + d\n tour_distance_lst.append(d)\n\n # update city tracker lists\n tour.append(current_city)\n index_i = cities_unvisited.index(current_city)\n cities_unvisited.pop(index_i)\n count = count + 1\n\n # check\n print(\"next destination: \", destination)\n print(\"distance: \", d)\n print(\"total_distance: \", total_distance)\n print(\"tour: \", tour)\n print(\"tour_distance_lst: \", tour_distance_lst)\n print(\"cities_unvisited: \", cities_unvisited)\n print()\n\n # adding the distance from last city back to initial city\n last_city = tour[-1]\n last_mile = (initial_city, last_city)\n last_mile_distance = self.distance[last_mile]\n tour.append(initial_city)\n total_distance = total_distance + last_mile_distance\n tour_distance_lst.append(last_mile_distance)\n\n # check\n print(\"last_mile: \", last_mile)\n print(\"last_mile_distance: \", last_mile_distance)\n print(\"tour: \", tour)\n print(\"total_distance: \", total_distance)\n print(\"tour_leg_distances_lst: \", tour_distance_lst)\n\n # update lists\n self.tour_lst.append(tour)\n self.distance_lst.append(total_distance)\n self.tour_leg_distances_lst.append(tour_distance_lst)\n\n @property\n def final_df(self):\n \"\"\" Add description here\"\"\"\n if self._final_df is None:\n self._final_df = self._generate_final_df()\n return self._final_df\n\n def _generate_final_df(self):\n for c in self.cities: # for every city in the dataset\n print(\"city: \", c) # generate a tour for each\n print(\"--------------------------------------------------------------------------------\")\n self.find_subtour(c)\n print('********************************************************************************')\n print()\n\n soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst,\n 'distance': self.distance_lst}\n return pd.DataFrame(soln_dict)\n\n @property\n def shortest_distance(self):\n \"\"\" Add description here\"\"\"\n if self._shortest_distance is None:\n return self._calculate_shortest_distance()\n\n def _calculate_shortest_distance(self): # find the tour with the lowest distance\n index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value\n min_row_final = self.final_df.loc[index_min_final]\n return min_row_final.distance\n\n @property\n def shortest_tour(self):\n \"\"\" Add description here\"\"\"\n if self._shortest_tour is None:\n return self._generate_shortest_tour()\n\n def _generate_shortest_tour(self):\n index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value\n min_row_final = self.final_df.loc[index_min_final]\n return min_row_final.tour\n\n\n# ********************************************************************************\n# ********************************************************************************\n\nif __name__ == '__main__':\n df = pd.read_csv('city_data_add.csv')\n tsp = AddHeuristicTSP(df)\n\n tsp.final_df\n print(\"final_df\")\n print(tsp.final_df)\n print()\n\n print(\"shortest_distance_final\", tsp.shortest_distance)\n print(\"shortest_tour_final\", tsp.shortest_tour)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
YellowOfTheEgg/ots-eval | [
"8ec08e60330d41f8f7ffd571dd6301cdedaefd99"
] | [
"ots_eval/stability_evaluation/close.py"
] | [
"import numpy as np\nfrom scipy.spatial.distance import euclidean\nfrom typing import Union\nimport pandas\n\n\nclass CLOSE(object):\n\n def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False,\n jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False):\n \"\"\"\n Params:\n data (pandas.DataFrame) - pandas dataframe with columns order 'object_id', 'time', 'cluster_id' containing cluster belongings,\n features ..\n Note: outliers should have negative labels/cluster_ids, these should be different for different times\n Optional:\n measure (str or callable) - for used quality measure, possible measures:\n 'sse', 'mse', 'mae', 'max', 'dbi', 'exploit'\n minPts (int) - used minPts for density-based quality measure\n output (boolean) - whether intermediate results should be printed\n jaccard (boolean) - whether the jaccard index should be used for proportion\n weighting (boolean) - whether the weighting function should be used for subsequence_score\n exploitation_term (boolean) - whether the exploitation term should be included in CLOSE calculation\n \"\"\"\n self._data = data\n self._column_names = data.columns.values\n self._object_column_name = self._column_names[0]\n self._time_column_name = self._column_names[1]\n self._cluster_column_name = self._column_names[2]\n\n self._jaccard = jaccard\n self._weighting = weighting\n self._exp_term = exploitation_term\n\n self._minPts = minPts\n self._output = output\n self.pos_measures = {### Measures for Clusters\n 'sse': self.calc_sse, # NOTE: sse is not between 0 and 1\n 'mse': self.calc_mse, # NOTE: mse is only between 0 and 1, if data is normalized\n 'mae': self.calc_mae, # NOTE: mae is only between 0 and 1, if data is normalized\n 'max': self.calc_max_dist,\n 'dbi': self.calc_min_pts,\n 'None': self.return_zero,\n ### Measures for Time Clusterings\n 'exploit': self.calc_exploit_at_t}\n\n if measure in self.pos_measures:\n self.measure = self.pos_measures[measure]\n elif callable(measure):\n self.measure = measure\n else:\n self.measure = self.pos_measures['mse']\n\n def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:\n \"\"\"\n Optional:\n start_time (int) - time that should be considered as beginning\n end_time (int) - time which should be rated up to\n return_measures (boolean) - whether additional information such as average stability\n and quality should be returned\n Returns:\n CLOSE score (float): rating of clustering regarding all clusters\n (dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information\n if 'return_measures' is True\n \"\"\"\n cluster_ratings = self.rate_clusters(start_time, end_time)\n gr_clusters = self._data.groupby(self._cluster_column_name)\n\n score = 0\n avg_quality = 0\n avg_stab = 0\n\n for cluster in cluster_ratings:\n cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique()\n cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0]\n feature_list = self.get_feature_list(cluster_objects, cluster_time)\n\n measure = self.measure(feature_list)\n avg_quality += measure\n avg_stab += cluster_ratings[cluster]\n score += (cluster_ratings[cluster] * (1 - measure))\n\n num_clusters = len(cluster_ratings)\n num_timestamps = self.get_num_timestamps(start_time, end_time)\n\n if num_clusters <= 0:\n if self._output:\n print('Clustering has no Clusters!!')\n return 0\n\n avg_quality /= num_clusters\n if self._output:\n print('Average Quality: ', str(avg_quality))\n avg_stab /= num_clusters\n if self._output:\n print('Average Stability: ', str(avg_stab))\n\n if self._exp_term:\n exp_term = self.calc_exploit()\n factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term\n else:\n factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2)\n\n if not return_measures:\n return score * factor\n\n else:\n return {'stability_evaluation': score * factor,\n 'stability': avg_stab,\n 'quality': avg_quality,\n 'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)}\n\n def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:\n \"\"\"\n Optional:\n start_time (optional) - int: time that should be considered as beginning\n end_time (optional) - int: time which should be rated up to\n return_measures (boolean) - whether additional information such as average stability and quality should be returned\n Returns:\n CLOSE score (float) - rating of clustering regarding all time clusterings\n (dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information\n if 'return_measures' is True\n \"\"\"\n cluster_ratings = self.rate_clusters(start_time, end_time)\n num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True)\n\n score = 0\n if return_measures:\n quality = 0\n stability = 0\n\n for time in timestamps:\n if not return_measures:\n score += self.calc_t_clustering_rating(cluster_ratings, time)\n else:\n cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True)\n score += cur_scores['score']\n quality += cur_scores['quality']\n stability += cur_scores['stability']\n\n if return_measures:\n quality /= num_timestamps\n stability /= num_timestamps\n\n num_clusters = len(cluster_ratings)\n if num_clusters <= 0:\n if self._output:\n print('Over-Time Clustering has no Clusters!!')\n return 0\n\n if self._exp_term:\n exp_term = self.calc_exploit()\n factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term\n else:\n factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2)\n\n if not return_measures:\n return score * factor\n else:\n return {'stability_evaluation': score * factor,\n 'stability': stability,\n 'quality': quality,\n 'pre-factor': factor}\n\n def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]:\n \"\"\"\n Params:\n cluster_ratings (dict) - {<object_id>: <rating>} with ratings of objects\n time (int) - time that should be considered\n Optional:\n return_measures (boolean) - whether additional information such as average stability and quality should be returned\n Output:\n CLOSE score (float) - rating of clustering at considered time\n (dict): with key 'score', 'stability', 'quality' with additional information if 'return_measures' is True\n \"\"\"\n avg_stab = 0\n\n clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique()\n clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0))\n \n for cluster in clusters_at_time:\n try:\n avg_stab += cluster_ratings[cluster]\n except:\n continue\n\n num_clusters = len(clusters_at_time)\n if num_clusters <= 0:\n if self._output:\n print('Time Clustering at Time ', str(time), ' has no Clusters!!')\n return 0\n\n avg_stab /= num_clusters\n if self._output:\n print('Average Stability at Time ', str(time), ' : ', str(avg_stab))\n\n quality = self.measure(time)\n if self._output:\n print('Quality of Clustering at Time ' , str(time), ' : ', str(quality))\n\n t_clustering_score = avg_stab * quality\n if not return_measures:\n return t_clustering_score\n else:\n return {\n 'score': t_clustering_score,\n 'stability': avg_stab,\n 'quality': quality\n }\n\n def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict:\n \"\"\"\n Optional:\n start_time (int) - time that should be considered as beginning\n end_time (int) - time which should be rated up to\n id (int, str, list or None) - representing the cluster_ids that should be rated. If id is None,\n all objects are rated\n Returns:\n ratings (dict) - {<cluster_id>: <rating>} with ratings of clusters\n \"\"\"\n ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time)\n ids = ids_to_rate[:]\n\n # don't rate outliers\n for i in ids_to_rate:\n if int(i) < 0:\n ids.remove(i)\n\n ratings = self.calc_cluster_rating(ids, start_time)\n return ratings\n\n def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict:\n \"\"\"\n Params:\n ids_to_rate (array-like) - list of clusters that should be rated\n Optional:\n start_time (int) - time that should be considered as beginning\n Returns:\n ratings - dict {<cluster_id>: <rating>} with ratings of clusters\n \"\"\"\n if start_time is None:\n start_time = np.min(self._data[self._time_column_name].unique())\n\n ratings = {}\n cluster_compositions = self.obtain_cluster_compositions()\n gr_clusters = self._data.groupby(self._cluster_column_name)\n\n # iterate over all cluster ids\n for id in ids_to_rate:\n time = gr_clusters.get_group(id)[self._time_column_name].iloc[0]\n\n # rate the clusters of all timestamps except of the first one\n if time != start_time:\n num_merged_clusters = len(cluster_compositions[id])\n obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist()\n obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time)\n score = 0\n for obj in obj_ratings:\n score += obj_ratings[obj]\n try:\n score /= len(obj_ratings)\n except ZeroDivisionError:\n if self._output:\n print('Cluster ', str(id), ' has no non-outlier members.')\n else:\n continue\n\n clusters = list(cluster_compositions[id].keys())\n num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)]\n [self._time_column_name].unique())\n try:\n div = num_merged_clusters / num_timestamps\n score /= div\n except ZeroDivisionError:\n if self._output:\n print(\"<<ZeroDivisionError - Cluster Score>> Cluster ID: \", str(id), \" Merged Clusters: \", str(num_merged_clusters),\n \" Num Timestamps: \", str(num_timestamps))\n else:\n continue\n ratings[id] = score\n\n # clusters of the first timestamp have a stability of 1.0\n else:\n ratings[id] = 1.0\n return ratings\n\n def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:\n \"\"\"\n Optional:\n id (int, str, list or None) - representing the data points that should be rated. If id is None,\n all objects are rated\n start_time (int) - time that should be considered as beginning\n end_time (int) - representing the timestamp which should be rated up to\n Returns:\n ratings (dict) - {<object_id>: <rating>} with ratings of objects\n \"\"\"\n ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)\n if end_time is None:\n end_time = np.max(self._data[self._time_column_name].unique())\n cluster_compositions = self.obtain_cluster_compositions()\n ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time)\n return ratings\n\n def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict:\n \"\"\"\n Params:\n cluster_composition (dict) - {<cluster_id>: {<contained_cluster_id>: <proportion>}} containing the proportions of\n clusters (contained_cluster_id) that belong to cluster (cluster_id)\n ids_to_rate (array-like) - list of data points that should be rated\n end_time (int) - representing the timestamp which should be rated up to\n Optional:\n start_time (int) - time that should be considered as beginning\n Returns:\n ratings - dict {<object_id>: <rating>} with ratings of objects\n \"\"\"\n ratings = {}\n gr_clusters = self._data.groupby(self._object_column_name)\n\n # iterate over object ids\n for id in ids_to_rate:\n cur_group = gr_clusters.get_group(id)\n cur_group = cur_group[cur_group[self._time_column_name] <= end_time]\n\n if start_time is not None:\n cur_group = cur_group[cur_group[self._time_column_name] >= start_time]\n\n try:\n # id of the cluster of the last considered timestamp\n last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[\n 0]\n except IndexError:\n print(\">>INDEXERROR - LAST CLUSTER<< ID: \", str(id), \", Start Time: \", str(start_time), \", End Time: \",\n str(end_time))\n continue\n\n # if object is an outlier for the considered timestamp, it is skipped\n if int(last_cluster) < 0:\n continue\n\n cluster_ids = cur_group[self._cluster_column_name].unique()\n\n object_ratings = []\n num_clusters = 0\n has_outlier = False\n for cluster in cluster_ids:\n if cluster == last_cluster:\n continue\n # Add the proportion of clusters before last timestamp, that merged in last cluster\n else:\n # outliers get worst rating of 0.0\n if int(cluster) < 0:\n object_ratings.append(0.0)\n has_outlier = True\n else:\n object_ratings.append(cluster_composition[last_cluster][cluster])\n num_clusters += 1\n if not has_outlier and len(object_ratings) == 0:\n # print(str(id) + \" has no data before t=\" + str(end_time))\n continue\n\n if self._weighting:\n try:\n weighting_denominator = 0\n for i in range(1, num_clusters + 1):\n weighting_denominator += i\n\n if num_clusters > 0:\n object_rating = 0\n for i in range(num_clusters):\n object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)\n\n else:\n continue\n except (TypeError, ZeroDivisionError):\n # print(str(id) + \" is not assigned to any cluster before t=\" + str(end_time))\n continue\n else:\n try:\n object_rating = np.sum(object_ratings)\n object_rating /= num_clusters\n except (TypeError, ZeroDivisionError):\n # print(str(id) + \" is not assigned to any cluster before t=\" + str(end_time))\n continue\n\n ratings[id] = round(object_rating, 3)\n return ratings\n\n def calc_exploit(self) -> float:\n \"\"\"\n Returns:\n exploitation_term (float) - exploitation term for whole clustering\n \"\"\"\n num_objects = len(self._data[self._object_column_name].unique())\n num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique())\n return num_no_outliers / num_objects\n\n\n ######## HELPER FUNCTIONS ########\n\n def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray:\n \"\"\"\n Params:\n objects (array-like) - list of objects_ids that belong to considered cluster\n time (int) - time of cluster that is considered\n\n Output:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n \"\"\"\n feature_list = []\n for obj in objects:\n features = self._data[\n (self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]\n try:\n features = \\\n features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],\n axis=1).iloc[0].tolist()\n except IndexError:\n print(\">>INDEXERROR - FEATURE LIST<< ID: \", str(obj), \", Time: \", str(time))\n continue\n\n if len(features) <= 0:\n print(\"No features found for object \", str(obj))\n continue\n feature_list.append(features)\n return np.array(feature_list)\n\n def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int:\n \"\"\"\n Params:\n start_time (int) - first timestamp to be considered\n end_time (int) - last timestamp to be considered\n Optional:\n return_timestamps (boolean) - list of all timestamps\n Returns:\n num_timestamps (int) - number of timestamps between start_time and end_time\n \"\"\"\n timestamp_list = self._data[self._time_column_name].unique()\n if start_time is not None:\n timestamp_list = [i for i in timestamp_list if i >= start_time]\n if end_time is not None:\n timestamp_list = [i for i in timestamp_list if i <= end_time]\n num_timestamps = len(timestamp_list)\n if not return_timestamps:\n return num_timestamps\n else:\n return num_timestamps, timestamp_list\n\n def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:\n \"\"\"\n Params:\n id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated\n id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract\n Optional:\n start_time (int) - first timestamp to be considered\n end_time (int) - last timestamp to be considered\n Returns:\n ids_to_rate (list) - list of ids that should be rated\n \"\"\"\n if id is None:\n data = self._data.copy()\n if start_time is not None:\n data = data[data[self._time_column_name] >= start_time]\n if end_time is not None:\n data = data[data[self._time_column_name] <= end_time]\n ids_to_rate = data[id_name].unique().tolist()\n elif isinstance(id, int) or isinstance(id, str):\n ids_to_rate = [id]\n elif isinstance(id, list):\n ids_to_rate = id[:]\n else:\n raise Exception('id has to be int, str, list or None')\n return ids_to_rate\n\n def obtain_cluster_compositions(self) -> dict:\n \"\"\"\n Returns:\n cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster compositions\n\n Example:\n {5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that\n 100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5\n \"\"\"\n cluster_compositions = {}\n g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])\n\n if not self._jaccard:\n cluster_members = self._data.groupby(self._cluster_column_name).count()\n\n # iterate over all clusters - 'group' contains the time and cluster_id\n # and 'objects' is the corresponding dataframe\n for group, objects in g_clusters:\n # Ignore outliers\n if int(group[1]) < 0:\n continue\n\n objects = objects[self._object_column_name].values.tolist()\n\n # temporal intersection\n # select considered clusters with later timestamps than the current one to check which clusters the\n # current one merged into and count, how many objects of the current cluster are in the considered clusters\n # example of a series from the dataframe: [cluster_id, count] with [2, 10]\n # meaning: 10 objects of the current cluster merged into the cluster with the id 2\n temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &\n (self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count()\n\n # iterate over all clusters which the current cluster has merged into\n # 'cluster' contains the cluster_id\n # and 'con_objects' is the corresponding number of objects of the temporal intersection\n for cluster, num_objects in temp_intersection.iterrows():\n # Ignore outliers\n if int(cluster) < 0:\n continue\n\n # for all considered clusters save the proportion of the current cluster that merged into the considered\n # one\n # example: {3: {2: 0.3}, 4: {2: 0.1}}\n # meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4\n if cluster not in cluster_compositions:\n cluster_compositions[cluster] = {}\n\n if self._jaccard:\n # cardinality of the union of both considered clusters\n card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |\n (self._data[self._cluster_column_name] == group[1])]\n [self._object_column_name].unique())\n # jaccard distance\n cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /\n float(card_union), 3)\n else:\n cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /\n float(cluster_members.loc[group[1]].values[1]), 3)\n if group[1] not in cluster_compositions:\n cluster_compositions[group[1]] = {}\n return cluster_compositions\n\n\n ######## QUALITY MEASURES ########\n\n @staticmethod\n def calc_sse(feature_list: list) -> float:\n \"\"\"\n Params:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n Returns:\n sse (float) - sum of squared errors to centroid of cluster\n \"\"\"\n centroid = np.average(feature_list, axis=0)\n sse = np.sum(np.power(feature_list - centroid[None, :], 2))\n return sse\n\n def calc_mse(self, feature_list: list) -> float:\n \"\"\"\n Params:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n Returns:\n mse (float) - mean squared error of cluster\n \"\"\"\n sse = self.calc_sse(feature_list)\n return sse / len(feature_list)\n\n @staticmethod\n def calc_mae(feature_list: list) -> float:\n \"\"\"\n Params:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n Returns:\n mae (float) - mean average errors to centroid of cluster\n \"\"\"\n centroid = np.average(feature_list, axis=0)\n mae = np.average(np.abs(feature_list - centroid[None, :]))\n return mae\n\n @staticmethod\n def calc_max_dist(feature_list: list) -> float:\n \"\"\"\n Params:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n Returns:\n max_dist (float) - maximal distance of cluster member to centroid of cluster\n \"\"\"\n max_dist = 0\n for i in range(len(feature_list) - 1):\n for j in range(i + 1, len(feature_list)):\n cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))\n if cur_dist > max_dist:\n max_dist = cur_dist\n max_dist /= 2 ** (1 / 2)\n return max_dist\n\n def calc_min_pts(self, feature_list: list) -> float:\n \"\"\"\n Params:\n feature_list (list) - list of lists containing the features of objects in the considered cluster\n Returns:\n avg_dist (float) - average distance of cluster members to their minPts neighbor\n \"\"\"\n avg_dist = 0\n for i in range(len(feature_list)):\n dist_list = [10] * self._minPts\n for j in range(len(feature_list)):\n if i == j:\n continue\n cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))\n for k in range(len(dist_list)):\n if cur_dist < dist_list[k]:\n dist_list.insert(k, cur_dist)\n dist_list.pop(self._minPts)\n avg_dist += dist_list[self._minPts - 1]\n avg_dist /= len(feature_list)\n return avg_dist\n\n @staticmethod\n def return_zero():\n \"\"\"\n Function is used if no quality measure should be used in CLOSE\n This is the case when only the exploitation term is considered\n\n Returns:\n 0\n \"\"\"\n return 0\n\n def calc_exploit_at_t(self, time: int) -> float:\n \"\"\"\n Params:\n time (int) - time to be considered\n Returns:\n rating (float) - exploitation rating of time clustering\n \"\"\"\n num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique())\n num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) &\n (self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique())\n return num_no_outliers / num_objects_at_t\n\n\n"
] | [
[
"numpy.abs",
"numpy.power",
"numpy.average",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dajes/labelfficient | [
"5dd0566224fb04285e690bf8576eacc04a7c87cd"
] | [
"commons/siam_mask/experiments/siammask_sharp/resnet.py"
] | [
"import torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom commons.siam_mask.models.features import Features\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(Features):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n # padding = (2 - stride) + (dilation // 2 - 1)\n padding = 2 - stride\n assert stride==1 or dilation==1, \"stride and dilation must have one equals to zero at least\"\n if dilation > 1:\n padding = dilation\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=padding, bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n if out.size() != residual.size():\n print(out.size(), residual.size())\n out += residual\n\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, layer4=False, layer3=False):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 31x31, 15x15\n\n self.feature_size = 128 * block.expansion\n\n if layer3:\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7\n self.feature_size = (256 + 128) * block.expansion\n else:\n self.layer3 = lambda x:x # identity\n\n if layer4:\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3\n self.feature_size = 512 * block.expansion\n else:\n self.layer4 = lambda x:x # identity\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n downsample = None\n dd = dilation\n if stride != 1 or self.inplanes != planes * block.expansion:\n if stride == 1 and dilation == 1:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n else:\n if dilation > 1:\n dd = dilation // 2\n padding = dd\n else:\n dd = 1\n padding = 0\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=3, stride=stride, bias=False,\n padding=padding, dilation=dd),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n # layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))\n layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n p0 = self.relu(x)\n x = self.maxpool(p0)\n\n p1 = self.layer1(x)\n p2 = self.layer2(p1)\n p3 = self.layer3(p2)\n\n return p0, p1, p2, p3\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n\n\nif __name__ == '__main__':\n net = resnet50()\n print(net)\n net = net.cuda()\n\n var = torch.FloatTensor(1,3,127,127).cuda()\n var = Variable(var)\n\n net(var)\n print('*************')\n var = torch.FloatTensor(1,3,255,255).cuda()\n var = Variable(var)\n\n net(var)\n\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.FloatTensor",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexBlack2202/EigenGAN-Tensorflow | [
"86b21a47a824a2bb04a088c3e78b03d03a53735c",
"86b21a47a824a2bb04a088c3e78b03d03a53735c"
] | [
"tflib/distribute/distribute.py",
"scripts/remove_black_edge.py"
] | [
"import tensorflow as tf\n\nfrom tensorflow.python.client import device_lib\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\ngpus = get_available_gpus\n\n\ndef split_nest(nest, num_or_size_splits, axis=0):\n \"\"\"Split nested structure.\n\n Examples\n --------\n >>> split_nest({'a': shape(10, 20), 'b': shape(4, 15)}, 2, axis=0)\n >>> [{'a': shape(5, 20), 'b': shape(2, 15)}, {'a': shape(5, 20), 'b': shape(2, 15)}]\n\n \"\"\"\n flatten = tf.nest.flatten(nest)\n split_flatten = [tf.split(x, num_or_size_splits, axis=axis) for x in flatten]\n return [tf.nest.pack_sequence_as(nest, x) for x in zip(*split_flatten)]\n\n\ndef parameter_server_strategy_run(devices, fn, split_args, split_kwargs=None):\n split_kwargs = [{}] * len(devices) if split_kwargs is None else split_kwargs\n\n assert len(devices) == len(split_args) == len(split_kwargs)\n\n split_returns = []\n for device, args, kwargs in zip(devices, split_args, split_kwargs):\n with tf.device(device):\n args = args if isinstance(args, (list, tuple)) else (args,)\n split_returns.append(fn(*args, **kwargs))\n\n return split_returns\n\nparellel_run = parameter_server_strategy_run\n\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Parameters\n ----------\n tower_grads:\n List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n\n Returns\n -------\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n",
"import imlib as im\nimport numpy as np\nimport pylib as py\n\n\n# ==============================================================================\n# = param =\n# ==============================================================================\n\nimg_dir = './data/anime/original_imgs'\nsave_dir = './data/anime/remove_black_edge_imgs'\nportion = 0.075\n\n\n# ==============================================================================\n# = run =\n# ==============================================================================\n\ndef count_edge(img, eps=0.4):\n up = 0\n for i in range(img.shape[0]):\n if np.mean(img[i, ...]) + 1 < eps:\n up += 1\n else:\n break\n\n down = 0\n for i in range(img.shape[0] - 1, -1, -1):\n if np.mean(img[i, ...]) + 1 < eps:\n down += 1\n else:\n break\n\n left = 0\n for i in range(img.shape[1]):\n if np.mean(img[:, i, ...]) + 1 < eps:\n left += 1\n else:\n break\n\n right = 0\n for i in range(img.shape[1] - 1, -1, -1):\n if np.mean(img[:, i, ...]) + 1 < eps:\n right += 1\n else:\n break\n\n return up, down, left, right\n\n\ndef work_fn(img_name):\n img = im.imread(img_name)\n u, d, l, r = count_edge(img)\n o = max(u, d, l, r)\n if o / img.shape[0] < portion:\n img = img[o:img.shape[0] - o, o:img.shape[1] - o, ...]\n im.imwrite(img, img_name.replace(img_dir, save_dir))\n\n\npy.mkdir(save_dir)\nimg_names = py.glob(img_dir, '*')\npy.run_parallels(work_fn, img_names)\n"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.split",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.nest.flatten",
"tensorflow.nest.pack_sequence_as"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vdutor/VFF | [
"459be5b480bba49e8c15dc7daeca5fd1ddd762df"
] | [
"experiments/increasing_dim/Exp_1/kron.py"
] | [
"import numpy as np\nimport sys\nimport gpflow\nimport VFF\n\nfrom time import time\n\nfrom config import *\n\ndim = sys.argv[1]\nrep = sys.argv[2]\n\nprint('vff: dimension {}, replicate {}'.format(dim, r))\n\n# data\ndata = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0))\n\n# full_gp\ndef prodkern(dim):\n return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale)\n for i in range(dim)])\nk = prodkern(dim)\nm = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k)\nm.likelihood.variance = noise_var\ndata = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))\nmarg_lik = m.compute_log_likelihood().squeeze()\nmean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))\n\nfile = open(\"results/full.csv\",\"a\") \nfile.write(\"{}, {}, {}, {}\".format(dim, rep, marg_lik, mean_log_pred)) \nfile.close() \n\n\n##########################\n# kron\nresults = pd.DataFrame()\n\nfor dim in dimensions:\n a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)\n k = prodkern(dim)\n for r in range(repeats):\n print('kron replicate ',r,'/',repeats)\n data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))\n for M in num_freqs:\n if (2*M-1)**dim: \n a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim)\n m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,\n kerns=prodkern(dim).kern_list,\n likelihood=gpflow.likelihoods.Gaussian(),\n use_two_krons=True)\n m.likelihood.variance = noise_var\n\n # only optimize q(u)\n m.kerns.fixed = True\n m.likelihood.fixed = True\n\n start = time()\n m.optimize()\n marg_lik = m.compute_log_likelihood().squeeze()\n mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))\n t = time() - start\n\n results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,\n mean_log_pred=mean_log_pred, time=t,\n num_inducing=M),\n ignore_index=True)\n\n # do this inside the loop so we can get partial results if something crashes\n results.to_csv('results/kron.csv')\n\n##########################\n# kron_opt\nresults = pd.DataFrame()\n\nfor dim in dimensions:\n a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)\n k = prodkern(dim)\n for r in range(repeats):\n print('kron_opt replicate ',r,'/',repeats)\n data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))\n for M in num_freqs:\n if (2*M-1)**dim:\n m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,\n kerns=k.kern_list,\n likelihood=gpflow.likelihoods.Gaussian(),\n use_two_krons=True)\n m.likelihood.variance = noise_var\n # build kronecker GP model\n start = time()\n m.optimize()\n marg_lik = m.compute_log_likelihood().squeeze()\n mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))\n t = time() - start\n\n results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,\n mean_log_pred=mean_log_pred, time=t,\n num_inducing=M),\n ignore_index=True)\n\n results.to_csv('results/kron_opt.csv')\n\n\n\n##########################\n# Sparse\nresults = pd.DataFrame()\n\nfor dim in dimensions:\n for r in range(repeats):\n print('Sparse replicate ',r,'/',repeats)\n data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))\n num_inducing = (2*num_freqs-1)**dim\n for M in num_inducing:\n if M < 500: \n # build sparse GP model\n Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_\n m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))\n m.likelihood.variance = noise_var\n\n start = time()\n marg_lik = m.compute_log_likelihood().squeeze()\n mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))\n t = time() - start\n\n results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,\n mean_log_pred=mean_log_pred, time=t,\n num_inducing=M),\n ignore_index=True)\n\n # do this inside the loop so we can get partial results if something crashes\n results.to_csv('results/sparse_kmeans.csv')\n\n\n\n##########################\n# Sparse GP opt \nresults = pd.DataFrame()\n\nfor dim in dimensions:\n for r in range(repeats):\n print('sparse opt replicate ',r,'/',repeats)\n data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))\n num_inducing = (2*num_freqs-1)**dim\n for M in num_inducing:\n if M < 500: \n # build sparse GP model\n Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_\n m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))\n m.likelihood.variance = noise_var\n\n # only optimize Z\n m.kern.fixed = True\n m.likelihood.fixed = True\n\n start = time()\n m.optimize()\n marg_lik = m.compute_log_likelihood().squeeze()\n mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))\n t = time() - start\n\n results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,\n mean_log_pred=mean_log_pred, time=t,\n num_inducing=M),\n ignore_index=True)\n\n # do this inside the loop so we can get partial results if something crashes\n results.to_csv('results/sparse_opt.csv')\n\n\n##########################\n# \n"
] | [
[
"numpy.arange",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Akshat-unt/jina | [
"b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717",
"b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717"
] | [
"tests/integration/issues/hanging_termination/test_hanging_termination.py",
"tests/unit/clients/python/test_request.py"
] | [
"import os\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom jina import Flow, Document\nfrom jina.clients import Client\nfrom jina.logging.profile import TimeContext\nfrom jina.parsers import set_client_cli_parser\nfrom typing import Dict\nfrom jina import DocumentArray, Executor, requests\n\n\nclass DumpExecutor(Executor):\n @requests\n def dump(self, docs: DocumentArray, parameters: Dict, **kwargs):\n shards = int(parameters['shards'])\n dump_path = parameters['dump_path']\n shard_size = len(docs) / shards\n os.makedirs(dump_path, exist_ok=True)\n for i in range(shards):\n dump_file = f'{dump_path}/{i}.ndjson'\n docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)]\n docs_to_be_dumped.save(dump_file)\n\n\nclass ErrorExecutor(Executor):\n @requests\n def dump(self, docs: DocumentArray, **kwargs):\n if len(docs) > 0:\n assert False\n\n\nclass ReloadExecutor(Executor):\n def __init__(self, dump_path=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # backwards compatibility\n assert 'dump_path' in kwargs['runtime_args'].keys()\n if dump_path is not None:\n shard_id = getattr(self.runtime_args, 'pea_id', None)\n shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson')\n self._docs = DocumentArray.load(shard_dump_path)\n else:\n self._docs = DocumentArray()\n\n @requests\n def search(self, docs: DocumentArray, **kwargs):\n docs.clear()\n docs.extend(self._docs)\n\n\nclass MergeExecutor(Executor):\n @requests\n def merge(self, docs_matrix: DocumentArray, **kwargs):\n merged_docs = DocumentArray()\n for docs in docs_matrix:\n merged_docs.extend(docs)\n return merged_docs\n\n\ndef get_client(port):\n args = set_client_cli_parser().parse_args(\n ['--host', 'localhost', '--port', str(port)]\n )\n\n return Client(args)\n\n\ndef get_documents(count=10, emb_size=7):\n for i in range(count):\n yield Document(\n id=i,\n text=f'hello world {i}',\n embedding=np.random.random(emb_size),\n tags={'tag_field': f'tag data {i}'},\n )\n\n\ndef path_size(dump_path):\n return (\n sum(\n f.stat().st_size\n for f in Path(dump_path).glob('**/*')\n if f.is_file()\n )\n / 1e6\n )\n\n\[email protected](20)\[email protected]('shards', [5, 3, 1])\[email protected]('nr_docs', [7])\[email protected]('emb_size', [10])\ndef test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2):\n \"\"\"showcases using replicas + dump + rolling update with independent clients\"\"\"\n\n with Flow().add(uses=DumpExecutor, name='dump_exec').add(\n uses=ErrorExecutor, name='error_exec'\n ) as flow_dump:\n merge_executor = MergeExecutor if shards > 1 else None\n with Flow().add(\n uses=ReloadExecutor,\n name='reload_exec',\n replicas=2,\n shards=shards,\n uses_after=merge_executor,\n ) as flow_reload:\n for run_number in range(times_to_index):\n dump_path = os.path.join(tmpdir, f'dump-{run_number}')\n client_dbms = get_client(flow_dump.port_expose)\n client_query = get_client(flow_reload.port_expose)\n docs = list(\n get_documents(\n count=nr_docs * (run_number + 1),\n emb_size=emb_size,\n )\n )\n\n with TimeContext(f'### dumping {len(docs)} docs'):\n client_dbms.post(\n on='/dump',\n inputs=docs,\n target_peapod='dump_exec',\n parameters={'dump_path': dump_path, 'shards': shards},\n )\n\n print(f'### dump path size: {path_size(dump_path)} MBs')\n\n with TimeContext(f'### rolling update on {len(docs)}'):\n # flow object is used for ctrl requests\n flow_reload.rolling_update('reload_exec', dump_path)\n\n for _ in range(5):\n result = client_query.post(\n on='/search', inputs=[Document()], return_results=True\n )\n\n assert len(docs) == len(result[0].docs)\n",
"import os\nimport sys\n\nimport numpy as np\nimport pytest\nfrom google.protobuf.json_format import MessageToJson, MessageToDict\n\nfrom jina import Document, Flow\nfrom jina.clients.request import request_generator\nfrom jina.clients.request.helper import _new_doc_from_data\nfrom jina.enums import DataInputType\nfrom jina.excepts import BadDocType\nfrom jina.proto import jina_pb2\nfrom jina.proto.jina_pb2 import DocumentProto\nfrom jina.types.ndarray.generic import NdArray\n\n\[email protected](\n sys.version_info < (3, 8, 0),\n reason='somehow this does not work on Github workflow with Py3.7, '\n 'but Py 3.8 is fine, local Py3.7 is fine',\n)\ndef test_on_bad_iterator():\n # this should not stuck the server as request_generator's error is handled on the client side\n f = Flow().add()\n with f:\n f.index([1, 2, 3])\n\n\[email protected](\n 'builder',\n [\n lambda x: x.SerializeToString(),\n lambda x: MessageToJson(x),\n lambda x: MessageToDict(x),\n lambda x: Document(x),\n ],\n)\ndef test_data_type_builder_doc(builder):\n a = DocumentProto()\n a.id = 'a236cbb0eda62d58'\n d, t = _new_doc_from_data(builder(a), DataInputType.DOCUMENT)\n assert d.id == a.id\n assert t == DataInputType.DOCUMENT\n\n\ndef test_data_type_builder_doc_bad():\n a = DocumentProto()\n a.id = 'a236cbb0eda62d58'\n with pytest.raises(BadDocType):\n _new_doc_from_data(b'BREAKIT!' + a.SerializeToString(), DataInputType.DOCUMENT)\n\n with pytest.raises(BadDocType):\n _new_doc_from_data(MessageToJson(a) + '🍔', DataInputType.DOCUMENT)\n\n\[email protected]('input_type', [DataInputType.AUTO, DataInputType.CONTENT])\ndef test_data_type_builder_auto(input_type):\n if 'JINA_ARRAY_QUANT' in os.environ:\n print(f'quant is on: {os.environ[\"JINA_ARRAY_QUANT\"]}')\n del os.environ['JINA_ARRAY_QUANT']\n\n d, t = _new_doc_from_data('123', input_type)\n assert d.text == '123'\n assert t == DataInputType.CONTENT\n\n d, t = _new_doc_from_data(b'123', input_type)\n assert t == DataInputType.CONTENT\n assert d.buffer == b'123'\n\n c = np.random.random([10, 10])\n d, t = _new_doc_from_data(c, input_type)\n np.testing.assert_equal(d.blob, c)\n assert t == DataInputType.CONTENT\n\n\ndef test_request_generate_lines():\n def random_lines(num_lines):\n for j in range(1, num_lines + 1):\n yield f'i\\'m dummy doc {j}'\n\n req = request_generator('', data=random_lines(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n assert request.docs[0].mime_type == 'text/plain'\n assert request.docs[0].text == 'i\\'m dummy doc 1'\n\n\ndef test_request_generate_lines_from_list():\n def random_lines(num_lines):\n return [f'i\\'m dummy doc {j}' for j in range(1, num_lines + 1)]\n\n req = request_generator('', data=random_lines(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n for index, doc in enumerate(request.docs, 1):\n assert doc.mime_type == 'text/plain'\n assert doc.text == f'i\\'m dummy doc {index}'\n\n\ndef test_request_generate_bytes():\n def random_lines(num_lines):\n for j in range(1, num_lines + 1):\n yield f'i\\'m dummy doc {j}'\n\n req = request_generator('', data=random_lines(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n for index, doc in enumerate(request.docs, 1):\n assert doc.text == f'i\\'m dummy doc {index}'\n assert doc.mime_type == 'text/plain'\n\n\ndef test_request_generate_docs():\n def random_docs(num_docs):\n for j in range(1, num_docs + 1):\n doc = jina_pb2.DocumentProto()\n doc.text = f'i\\'m dummy doc {j}'\n doc.offset = 1000\n doc.tags['id'] = 1000 # this will be ignored\n doc.mime_type = 'mime_type'\n yield doc\n\n req = request_generator('', data=random_docs(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n for index, doc in enumerate(request.docs, 1):\n assert doc.mime_type == 'mime_type'\n assert doc.text == f'i\\'m dummy doc {index}'\n assert doc.offset == 1000\n\n\ndef test_request_generate_dict():\n def random_docs(num_docs):\n for j in range(1, num_docs + 1):\n doc = {\n 'text': f'i\\'m dummy doc {j}',\n 'offset': 1000,\n 'tags': {'id': 1000},\n 'chunks': [\n {'text': \"i'm chunk 1\", 'modality': 'text'},\n {'text': \"i'm chunk 2\", 'modality': 'image'},\n ],\n }\n\n yield doc\n\n req = request_generator('', data=random_docs(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n for index, doc in enumerate(request.docs, 1):\n assert doc.text == f'i\\'m dummy doc {index}'\n assert doc.offset == 1000\n assert doc.tags['id'] == 1000\n assert len(doc.chunks) == 2\n assert doc.chunks[0].modality == 'text'\n assert doc.chunks[0].text == f'i\\'m chunk 1'\n assert doc.chunks[1].modality == 'image'\n assert doc.chunks[1].text == f'i\\'m chunk 2'\n\n\ndef test_request_generate_dict_str():\n import json\n\n def random_docs(num_docs):\n for j in range(1, num_docs + 1):\n doc = {\n 'text': f'i\\'m dummy doc {j}',\n 'offset': 1000,\n 'tags': {'id': 1000},\n 'chunks': [\n {'text': \"i'm chunk 1\", 'modality': 'text'},\n {'text': \"i'm chunk 2\", 'modality': 'image'},\n ],\n }\n\n yield json.dumps(doc)\n\n req = request_generator('', data=random_docs(100), request_size=100)\n\n request = next(req)\n assert len(request.docs) == 100\n for index, doc in enumerate(request.docs, 1):\n assert doc.text == f'i\\'m dummy doc {index}'\n assert doc.offset == 1000\n assert doc.tags['id'] == 1000\n assert len(doc.chunks) == 2\n assert doc.chunks[0].modality == 'text'\n assert doc.chunks[0].text == f'i\\'m chunk 1'\n assert doc.chunks[1].modality == 'image'\n assert doc.chunks[1].text == f'i\\'m chunk 2'\n\n\ndef test_request_generate_numpy_arrays():\n input_array = np.random.random([10, 10])\n\n req = request_generator('', data=input_array, request_size=5)\n\n request = next(req)\n assert len(request.docs) == 5\n for index, doc in enumerate(request.docs, 1):\n assert NdArray(doc.blob).value.shape == (10,)\n\n request = next(req)\n assert len(request.docs) == 5\n for index, doc in enumerate(request.docs, 1):\n assert NdArray(doc.blob).value.shape == (10,)\n\n\ndef test_request_generate_numpy_arrays_iterator():\n input_array = np.random.random([10, 10])\n\n def generator():\n yield from input_array\n\n req = request_generator('', data=generator(), request_size=5)\n\n request = next(req)\n assert len(request.docs) == 5\n for index, doc in enumerate(request.docs, 1):\n assert NdArray(doc.blob).value.shape == (10,)\n\n request = next(req)\n assert len(request.docs) == 5\n for index, doc in enumerate(request.docs, 1):\n assert NdArray(doc.blob).value.shape == (10,)\n"
] | [
[
"numpy.random.random"
],
[
"numpy.testing.assert_equal",
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
claireguichon/pynet | [
"92706375e61fb5cb523548303b7d04769c9de134",
"92706375e61fb5cb523548303b7d04769c9de134",
"92706375e61fb5cb523548303b7d04769c9de134"
] | [
"pynet/cam.py",
"examples/tuning/breast_cancer.py",
"pynet/models/braingengan.py"
] | [
"# -*- coding: utf-8 -*-\n##########################################################################\n# NSAp - Copyright (C) CEA, 2019\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n\n\"\"\"\nModule that provides tools to compute class activation map.\n\"\"\"\n\n\n# Imports\nimport logging\nimport skimage\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as func\n\n\n# Global parameters\nlogger = logging.getLogger(\"pynet\")\n\n\nclass FeatureExtractor(object):\n \"\"\" Class for extracting activations and registering gradients from\n targetted intermediate layers.\n \"\"\"\n def __init__(self, model, target_layers):\n self.model = model\n self.target_layers = target_layers\n self.gradients = []\n\n def save_gradient(self, grad):\n self.gradients.append(grad)\n\n def __call__(self, x):\n outputs = []\n self.gradients = []\n for name, module in self.model._modules.items():\n x = module(x)\n if name in self.target_layers:\n x.register_hook(self.save_gradient)\n outputs += [x]\n return outputs, x\n\n\nclass ModelOutputs(object):\n \"\"\" Class for making a forward pass, and getting:\n 1- the network output.\n 2- activations from intermeddiate targetted layers.\n 3- gradients from intermeddiate targetted layers.\n \"\"\"\n def __init__(self, model, target_layers):\n self.model = model\n self.feature_extractor = FeatureExtractor(\n self.model.features, target_layers)\n\n def get_activations_gradient(self):\n return self.feature_extractor.gradients\n\n def get_activations(self, x):\n return self.feature_extractor(x)\n\n def __call__(self, x):\n if hasattr(self.model, \"pre\"):\n x = self.model.pre(x)\n target_activations, output = self.feature_extractor(x)\n if hasattr(self.model, \"pool\"):\n output = self.model.pool(output)\n output = output.view(output.size(0), -1)\n output = self.model.classifier(output)\n return target_activations, output\n\n\nclass GradCam(object):\n \"\"\" Class for computing class activation map.\n \"\"\"\n def __init__(self, model, target_layers, labels, top=1):\n self.model = model\n self.labels = labels\n self.top = top\n self.model.eval()\n self.extractor = ModelOutputs(self.model, target_layers)\n\n def forward(self, input):\n return self.model(input)\n\n def __call__(self, input):\n features, output = self.extractor(input)\n pred_prob = func.softmax(output, dim=1).data.squeeze()\n probs, indices = pred_prob.sort(0, True)\n probs = probs.data.numpy()\n indices = indices.data.numpy()\n heatmaps = {}\n for cnt, (prob, index) in enumerate(zip(probs, indices)):\n if cnt == self.top:\n break\n label = self.labels[str(index)][1]\n line = \"{0:.3f} -> {1}\".format(prob, label)\n logger.info(line)\n one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)\n one_hot[0][index] = 1\n one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)\n one_hot = torch.sum(one_hot * output)\n self.model.features.zero_grad()\n self.model.classifier.zero_grad()\n one_hot.backward(retain_graph=True)\n gradients = self.extractor.get_activations_gradient()[-1]\n gradients = gradients.cpu().data.numpy()\n pooled_gradients = np.mean(gradients, axis=(0, 2, 3))\n activations = features[-1]\n activations = activations.cpu().data.numpy()\n for cnt, weight in enumerate(pooled_gradients):\n activations[:, cnt] *= weight\n heatmap = np.mean(activations, axis=1).squeeze()\n heatmap = np.maximum(heatmap, 0)\n heatmap -= np.min(heatmap)\n heatmap /= np.max(heatmap)\n heatmap_highres = skimage.transform.resize(\n heatmap, input.shape[2:])\n heatmaps[label] = (input, heatmap, heatmap_highres)\n return heatmaps\n",
"\"\"\"\npynet: hyper parameters tuning\n==============================\n\nCredit: A Grigis\nBased on:\n- https://github.com/autonomio/talos/blob/master/docs/Examples_PyTorch.md\n\nIn this tutorial, you will learn how to tune the hyperparameters using the\ntalos and the kerasplotlib modules.\n\"\"\"\n\nimport talos\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# from torch_optimizer import torch_optimizer\n\nfrom sklearn.metrics import f1_score\n\nfrom pynet.interfaces import DeepLearningInterface\nfrom pynet.datasets import DataManager\n\n#############################################################################\n# Data Preparation\n# ----------------\n#\n# For this experiment, we're going to use the breast cancer dataset.\n\nx, y = talos.templates.datasets.breast_cancer()\nx = talos.utils.rescale_meanzero(x)\nx_train, y_train, x_val, y_val = talos.utils.val_split(x, y, .2)\nprint(\"Train: \", x_train.shape, y_train.shape)\nprint(\"Validation: \", x_val.shape, y_val.shape)\n\n#############################################################################\n# Model Preparation\n# -----------------\n#\n# Talos works with any pynet model, without changing the structure of the\n# model in anyway, or without introducing any new syntax. The below example\n# shows clearly how this works.\n\n\nclass BreastCancerNet(nn.Module, talos.utils.TorchHistory):\n def __init__(self, n_feature, first_neuron, second_neuron, dropout):\n super(BreastCancerNet, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, first_neuron)\n torch.nn.init.normal_(self.hidden.weight)\n self.hidden1 = torch.nn.Linear(first_neuron, second_neuron)\n self.dropout = torch.nn.Dropout(dropout)\n self.out = torch.nn.Linear(second_neuron, 2)\n\n def forward(self, x):\n x = F.relu(self.hidden(x))\n x = self.dropout(x)\n x = torch.sigmoid(self.hidden1(x))\n x = self.out(x)\n return x\n\n\ndef update_talos_history(signal):\n \"\"\" Callback to update talos history.\n\n Parameters\n ----------\n signal: SignalObject\n an object with the trained model 'object', the emitted signal\n 'signal', the epoch number 'epoch' and the fold index 'fold'.\n \"\"\"\n net = signal.object.model\n emitted_signal = signal.signal\n epoch = signal.epoch\n fold = signal.fold\n for key in signal.keys:\n if key in (\"epoch\", \"fold\"):\n continue\n value = getattr(signal, key)\n if value is not None:\n net.append_history(value, key)\n\n\ndef breast_cancer(x_train, y_train, x_val, y_val, params):\n print(\"Iteration parameters: \", params)\n\n def weights_init_uniform_rule(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n n = m.in_features\n y = 1.0 / np.sqrt(n)\n m.weight.data.uniform_(-y, y)\n m.bias.data.fill_(0)\n manager = DataManager.from_numpy(\n train_inputs=x_train, train_labels=y_train,\n batch_size=params[\"batch_size\"], validation_inputs=x_val,\n validation_labels=y_val)\n net = BreastCancerNet(\n n_feature=x_train.shape[1], first_neuron=params[\"first_neuron\"],\n second_neuron=params[\"second_neuron\"], dropout=params[\"dropout\"])\n net.apply(weights_init_uniform_rule)\n net.init_history()\n model = DeepLearningInterface(\n model=net,\n optimizer_name=params[\"optimizer_name\"],\n learning_rate=params[\"learning_rate\"],\n loss_name=params[\"loss_name\"],\n metrics=[\"accuracy\"])\n model.add_observer(\"after_epoch\", update_talos_history)\n model.training(\n manager=manager,\n nb_epochs=params[\"epochs\"],\n checkpointdir=None,\n fold_index=0,\n with_validation=True)\n return net, net.parameters()\n\n#############################################################################\n# Setting the Parameter Space Boundaries\n# --------------------------------------\n#\n# In the last and final step, we're going to create the dictionary, which will\n# then be passed on to Talos together with the model above. Here we have\n# three different ways to input values:\n# - as stepped ranges (min, max, steps)\n# - as multiple values [in a list]\n# - as a single value [in a list]\n# For values we don't want to use, it's ok to set it as None.\n\n\nparams = {\n \"first_neuron\": [200, 100],\n \"second_neuron\": [30, 50],\n \"dropout\": [0.2, 0.3],\n \"optimizer_name\": [\"SGD\", \"Adam\"],\n \"loss_name\": [\"CrossEntropyLoss\"],\n \"learning_rate\": [1e-3, 1e-4],\n \"batch_size\": [20, 50, 5],\n \"epochs\": [10, 20]\n}\n\n#############################################################################\n# Run the Hyperparameter scan\n# ---------------------------\n#\n# Now we are ready to run the model based on the parameters and the layer\n# configuration above. The exact same process would apply with any other\n# model, just make sure to pass the model function name in the Scan() command\n# as in the below example. To get started quickly, we're going to invoke only\n# 10 rounds.\n\nos.chdir(\"/tmp\")\nscan_object = talos.Scan(x=x_train,\n y=y_train,\n params=params,\n model=breast_cancer,\n experiment_name=\"breast_cancer\",\n round_limit=10)\n\n\n#############################################################################\n# Access the results through the Scan object\n# ------------------------------------------\n#\n\nprint(\"accessing the results data frame\")\nprint(scan_object.data.head())\n\nprint(\"accessing epoch entropy values for each round\")\nprint(scan_object.learning_entropy)\n\nprint(\"access the summary details\")\nprint(scan_object.details)\n\nprint(\"accessing the saved models\")\nprint(scan_object.saved_models)\n\nprint(\"accessing the saved weights for models\")\nprint(scan_object.saved_weights)\n\n#############################################################################\n# Analysing the Scan results with reporting\n# -----------------------------------------\n#\n\nprint(\"use Scan object as input\")\nanalyze_object = talos.Analyze(scan_object)\n\nprint(\"access the dataframe with the results\")\nprint(analyze_object.data)\n\nprint(\"get the number of rounds in the Scan\")\nprint(analyze_object.rounds())\n\nprint(\"et the highest result for any metric\")\nprint(analyze_object.high('val_accuracy'))\n\nprint(\"get the round with the best result\")\nprint(analyze_object.rounds2high('val_accuracy'))\n\nprint(\"get the best paramaters\")\nprint(analyze_object.best_params(\n 'val_accuracy', ['accuracy', 'loss', 'val_loss']))\n\nprint(\"get correlation for hyperparameters against a metric\")\nprint(analyze_object.correlate('val_loss', ['accuracy', 'loss', 'val_loss']))\n\nprint(\"a regression plot for two dimensions\")\nanalyze_object.plot_regs('val_accuracy', 'val_loss')\n\nprint(\"line plot\")\nanalyze_object.plot_line('val_accuracy')\n\nprint(\"up to two dimensional kernel density estimator\")\nanalyze_object.plot_kde('val_accuracy')\n\nprint(\"a simple histogram\")\nanalyze_object.plot_hist('val_accuracy', bins=50)\n\nprint(\"heatmap correlation\")\nanalyze_object.plot_corr('val_loss', ['accuracy', 'loss', 'val_loss'])\n\nprint(\"a four dimensional bar grid\")\nanalyze_object.plot_bars(\n 'batch_size', 'val_accuracy', 'first_neuron', 'learning_rate')\n\nif \"CI_MODE\" not in os.environ:\n import matplotlib.pyplot as plt\n plt.show()\n",
"# -*- coding: utf-8 -*-\n##########################################################################\n# NSAp - Copyright (C) CEA, 2020\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n\"\"\"\n3D MRI Brain Generation with Generative Adversarial Networks (BGGAN) with\nVariational Auto Encoder (VAE).\n\"\"\"\n\n# Imports\nimport logging\nimport collections\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as func\nfrom pynet.utils import Networks\n\n\n# Global parameters\nlogger = logging.getLogger(\"pynet\")\n\n\[email protected]\nclass BGDiscriminator(nn.Module):\n \"\"\" This is the discriminator part of the BGGAN.\n \"\"\"\n def __init__(self, in_shape, in_channels=1, out_channels=1,\n start_filts=64, with_logit=True):\n \"\"\" Init class.\n\n Parameters\n ----------\n in_shape: uplet\n the input tensor data shape (X, Y, Z).\n in_channels: int, default 1\n number of channels in the input tensor.\n out_channels: int, default 1\n number of channels in the output tensor.\n start_filts: int, default 64\n number of convolutional filters for the first conv.\n with_logit: bool, default True\n apply the logit function to the result.\n \"\"\"\n super(BGDiscriminator, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.start_filts = start_filts\n self.with_logit = with_logit\n self.in_shape = in_shape\n self.shapes = _downsample_shape(\n self.in_shape, nb_iterations=4, scale_factor=2)\n self.conv1 = nn.Conv3d(\n self.in_channels, self.start_filts, kernel_size=4, stride=2,\n padding=1)\n self.conv2 = nn.Conv3d(\n self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,\n padding=1)\n self.bn2 = nn.BatchNorm3d(self.start_filts * 2)\n self.conv3 = nn.Conv3d(\n self.start_filts * 2, self.start_filts * 4, kernel_size=4,\n stride=2, padding=1)\n self.bn3 = nn.BatchNorm3d(self.start_filts * 4)\n self.conv4 = nn.Conv3d(\n self.start_filts * 4, self.start_filts * 8, kernel_size=4,\n stride=2, padding=1)\n self.bn4 = nn.BatchNorm3d(self.start_filts * 8)\n self.conv5 = nn.Conv3d(\n self.start_filts * 8, self.out_channels,\n kernel_size=self.shapes[-1], stride=1, padding=0)\n\n def forward(self, x):\n logger.debug(\"BGGAN Discriminator...\")\n self.debug(\"input\", x)\n h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)\n self.debug(\"conv1\", h1)\n h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)\n self.debug(\"conv2\", h2)\n h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)\n self.debug(\"conv3\", h3)\n h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)\n self.debug(\"conv4\", h4)\n h5 = self.conv5(h4)\n self.debug(\"conv5\", h5)\n if self.with_logit:\n output = torch.sigmoid(h5.view(h5.size(0), -1))\n self.debug(\"output\", output)\n else:\n output = h5\n logger.debug(\"Done.\")\n return output\n\n def debug(self, name, tensor):\n logger.debug(\" {3}: {0} - {1} - {2}\".format(\n tensor.shape, tensor.get_device(), tensor.dtype, name))\n\n\[email protected]\nclass BGEncoder(nn.Module):\n \"\"\" This is the encoder part of the BGGAN.\n \"\"\"\n def __init__(self, in_shape, in_channels=1, start_filts=64,\n latent_dim=1000):\n \"\"\" Init class.\n\n Parameters\n ----------\n in_shape: uplet\n the input tensor data shape (X, Y, Z).\n in_channels: int, default 1\n number of channels in the input tensor.\n start_filts: int, default 64\n number of convolutional filters for the first conv.\n latent_dim: int, default 1000\n the latent variable sizes.\n \"\"\"\n super(BGEncoder, self).__init__()\n self.in_channels = in_channels\n self.start_filts = start_filts\n self.latent_dim = latent_dim\n self.in_shape = in_shape\n self.shapes = _downsample_shape(\n self.in_shape, nb_iterations=4, scale_factor=2)\n self.dense_features = np.prod(self.shapes[-1])\n logger.debug(\"BGGAN Encoder shapes: {0}\".format(self.shapes))\n self.conv1 = nn.Conv3d(\n self.in_channels, self.start_filts, kernel_size=4, stride=2,\n padding=1)\n self.conv2 = nn.Conv3d(\n self.start_filts, self.start_filts * 2, kernel_size=4, stride=2,\n padding=1)\n self.bn2 = nn.BatchNorm3d(self.start_filts * 2)\n self.conv3 = nn.Conv3d(\n self.start_filts * 2, self.start_filts * 4, kernel_size=4,\n stride=2, padding=1)\n self.bn3 = nn.BatchNorm3d(self.start_filts * 4)\n self.conv4 = nn.Conv3d(\n self.start_filts * 4, self.start_filts * 8, kernel_size=4,\n stride=2, padding=1)\n self.bn4 = nn.BatchNorm3d(self.start_filts * 8)\n self.mean = nn.Sequential(\n nn.Linear(self.start_filts * 8 * self.dense_features, 2048),\n nn.BatchNorm1d(2048),\n nn.ReLU(),\n nn.Linear(2048, self.latent_dim))\n self.logvar = nn.Sequential(\n nn.Linear(self.start_filts * 8 * self.dense_features, 2048),\n nn.BatchNorm1d(2048),\n nn.ReLU(),\n nn.Linear(2048, self.latent_dim))\n\n def forward(self, x):\n logger.debug(\"BGGAN Encoder...\")\n batch_size = x.size(0)\n logger.debug(\" batch_size: {0}\".format(batch_size))\n self.debug(\"input\", x)\n h1 = func.leaky_relu(self.conv1(x), negative_slope=0.2)\n self.debug(\"conv1\", h1)\n h2 = func.leaky_relu(self.bn2(self.conv2(h1)), negative_slope=0.2)\n self.debug(\"conv2\", h2)\n h3 = func.leaky_relu(self.bn3(self.conv3(h2)), negative_slope=0.2)\n self.debug(\"conv3\", h3)\n h4 = func.leaky_relu(self.bn4(self.conv4(h3)), negative_slope=0.2)\n self.debug(\"conv4\", h4)\n mean = self.mean(h4.view(batch_size, -1))\n self.debug(\"mean\", mean)\n logvar = self.logvar(h4.view(batch_size, -1))\n self.debug(\"logvar\", logvar)\n std = logvar.mul(0.5).exp_()\n reparametrized_noise = Variable(\n torch.randn((batch_size, self.latent_dim))).to(x.device)\n reparametrized_noise = mean + std * reparametrized_noise\n self.debug(\"reparametrization\", reparametrized_noise)\n logger.debug(\"Done.\")\n return mean, logvar, reparametrized_noise\n\n def debug(self, name, tensor):\n logger.debug(\" {3}: {0} - {1} - {2}\".format(\n tensor.shape, tensor.get_device(), tensor.dtype, name))\n\n\[email protected]\nclass BGCodeDiscriminator(nn.Module):\n \"\"\" This is the code discriminator part of the BGGAN.\n \"\"\"\n def __init__(self, out_channels=1, code_size=1000, n_units=4096):\n \"\"\" Init class.\n\n Parameters\n ----------\n out_channels: int, default 1\n number of channels in the output tensor.\n code_size: int, default 1000\n the code sier.\n n_units: int, default 4096\n the number of hidden units.\n \"\"\"\n super(BGCodeDiscriminator, self).__init__()\n self.out_channels = out_channels\n self.code_size = code_size\n self.n_units = n_units\n self.layer1 = nn.Sequential(\n nn.Linear(self.code_size, self.n_units),\n nn.BatchNorm1d(self.n_units),\n nn.LeakyReLU(0.2, inplace=True))\n self.layer2 = nn.Sequential(\n nn.Linear(self.n_units, self.n_units),\n nn.BatchNorm1d(self.n_units),\n nn.LeakyReLU(0.2, inplace=True))\n self.layer3 = nn.Linear(self.n_units, self.out_channels)\n\n def forward(self, x):\n logger.debug(\"BGGAN Code Discriminator...\")\n self.debug(\"input\", x)\n h1 = self.layer1(x)\n self.debug(\"layer1\", h1)\n h2 = self.layer2(h1)\n self.debug(\"layer2\", h2)\n output = self.layer3(h2)\n self.debug(\"layer3\", output)\n logger.debug(\"Done.\")\n return output\n\n def debug(self, name, tensor):\n logger.debug(\" {3}: {0} - {1} - {2}\".format(\n tensor.shape, tensor.get_device(), tensor.dtype, name))\n\n\[email protected]\nclass BGGenerator(nn.Module):\n \"\"\" This is the generator part of the BGGAN.\n \"\"\"\n def __init__(self, in_shape, out_channels=1, start_filts=64,\n latent_dim=1000, mode=\"trilinear\", with_code=False):\n \"\"\" Init class.\n\n Parameters\n ----------\n in_shape: uplet\n the input tensor data shape (X, Y, Z).\n out_channels: int, default 1\n number of channels in the output tensor.\n start_filts: int, default 64\n number of convolutional filters for the first conv.\n latent_dim: int, default 1000\n the latent variable sizes.\n mode: str, default 'trilinear'\n the interpolation mode.\n with_code: bool, default False\n change the architecture if code discriminator is used.\n \"\"\"\n super(BGGenerator, self).__init__()\n self.out_channels = out_channels\n self.start_filts = start_filts\n self.latent_dim = latent_dim\n self.in_shape = in_shape\n self.mode = mode\n self.with_code = with_code\n self.shapes = _downsample_shape(\n self.in_shape, nb_iterations=4, scale_factor=2)\n self.dense_features = np.prod(self.shapes[-1])\n logger.debug(\"BGGAN Generator shapes: {0}\".format(self.shapes))\n if self.with_code:\n self.tp_conv1 = nn.ConvTranspose3d(\n self.latent_dim, self.start_filts * 8, kernel_size=4,\n stride=1, padding=0, bias=False)\n else:\n self.fc = nn.Linear(\n self.latent_dim, self.start_filts * 8 * self.dense_features)\n self.bn1 = nn.BatchNorm3d(self.start_filts * 8)\n\n self.tp_conv2 = nn.Conv3d(\n self.start_filts * 8, self.start_filts * 4, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(self.start_filts * 4)\n\n self.tp_conv3 = nn.Conv3d(\n self.start_filts * 4, self.start_filts * 2, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn3 = nn.BatchNorm3d(self.start_filts * 2)\n\n self.tp_conv4 = nn.Conv3d(\n self.start_filts * 2, self.start_filts, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn4 = nn.BatchNorm3d(self.start_filts)\n\n self.tp_conv5 = nn.Conv3d(\n self.start_filts, self.out_channels, kernel_size=3, stride=1,\n padding=1, bias=False)\n\n def forward(self, noise):\n logger.debug(\"BGGAN Generator...\")\n self.debug(\"input\", noise)\n if self.with_code:\n noise = noise.view(-1, self.latent_dim, 1, 1, 1)\n self.debug(\"view\", noise)\n h = self.tp_conv1(noise)\n self.debug(\"tp_conv1\", h)\n else:\n noise = noise.view(-1, self.latent_dim)\n self.debug(\"view\", noise)\n h = self.fc(noise)\n self.debug(\"dense\", h)\n h = h.view(-1, self.start_filts * 8, *self.shapes[-1])\n self.debug(\"view\", h)\n h = func.relu(self.bn1(h))\n\n h = nn.functional.interpolate(\n h, size=self.shapes[-2], mode=self.mode, align_corners=False)\n h = self.tp_conv2(h)\n h = func.relu(self.bn2(h))\n self.debug(\"tp_conv2\", h)\n\n h = nn.functional.interpolate(\n h, size=self.shapes[-3], mode=self.mode, align_corners=False)\n h = self.tp_conv3(h)\n h = func.relu(self.bn3(h))\n self.debug(\"tp_conv3\", h)\n\n h = nn.functional.interpolate(\n h, size=self.shapes[-4], mode=self.mode, align_corners=False)\n h = self.tp_conv4(h)\n h = func.relu(self.bn4(h))\n self.debug(\"tp_conv4\", h)\n\n h = nn.functional.interpolate(\n h, size=self.shapes[-5], mode=self.mode, align_corners=False)\n h = self.tp_conv5(h)\n self.debug(\"tp_conv5\", h)\n\n h = torch.tanh(h)\n self.debug(\"output\", h)\n logger.debug(\"Done.\")\n return h\n\n def debug(self, name, tensor):\n logger.debug(\" {3}: {0} - {1} - {2}\".format(\n tensor.shape, tensor.get_device(), tensor.dtype, name))\n\n\ndef _downsample_shape(shape, nb_iterations=1, scale_factor=2):\n shape = np.asarray(shape)\n all_shapes = [shape.astype(int).tolist()]\n for idx in range(nb_iterations):\n shape = np.floor(shape / scale_factor)\n all_shapes.append(shape.astype(int).tolist())\n return all_shapes\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.maximum",
"numpy.min",
"torch.sum",
"torch.from_numpy",
"numpy.max",
"numpy.mean"
],
[
"torch.nn.Dropout",
"numpy.sqrt",
"torch.nn.Linear",
"torch.nn.init.normal_",
"matplotlib.pyplot.show"
],
[
"torch.nn.BatchNorm1d",
"numpy.asarray",
"torch.nn.ConvTranspose3d",
"torch.randn",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.Conv3d",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.functional.interpolate",
"numpy.floor",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GatherLab/OLED-evaluation | [
"419dfd5d2c3773f5f90d76aef634f8b1cc0b6378"
] | [
"src/UI_assign_group_window.py"
] | [
"# -*- coding: utf-8 -*-\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nimport json\nimport core_functions as cf\nimport numpy as np\n\nfrom UI_labeled_slider import LabeledSlider\n\n\nclass Ui_AssignGroup(object):\n def setupUi(self, AssignGroups):\n # Note: this is not how it should be done but currently I don't know\n # how to do it differently. This is only needed to be able to emit\n # signals to the main window\n\n AssignGroups.setObjectName(\"AssignGroups\")\n AssignGroups.setWindowTitle(\"Group Assignement Dialog\")\n AssignGroups.resize(509, 317)\n AssignGroups.setStyleSheet(\n \"QWidget {\\n\"\n \" background-color: rgb(44, 49, 60);\\n\"\n \" color: rgb(255, 255, 255);\\n\"\n ' font: 63 10pt \"Segoe UI\";\\n'\n \"}\\n\"\n \"QPushButton {\\n\"\n \" border: 2px solid rgb(52, 59, 72);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QPushButton:hover {\\n\"\n \" background-color: rgb(57, 65, 80);\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \"}\\n\"\n \"QPushButton:pressed {\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(43, 50, 61);\\n\"\n \"}\\n\"\n \"QPushButton:checked {\\n\"\n \" background-color: rgb(35, 40, 49);\\n\"\n \" border: 2px solid rgb(85, 170, 255);\\n\"\n \"}\"\n \"QLineEdit {\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QSpinBox {\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n \"QDoubleSpinBox {\\n\"\n \" border: 2px solid rgb(61, 70, 86);\\n\"\n \" border-radius: 5px;\\n\"\n \" background-color: rgb(52, 59, 72);\\n\"\n \"}\\n\"\n )\n self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups)\n self.verticalLayout.setContentsMargins(25, 10, 25, 10)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n\n # # Device settings\n # self.device_settings_header_label = QtWidgets.QLabel(AssignGroups)\n # self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20))\n # self.device_settings_header_label.setStyleSheet(\n # 'font: 75 bold 10pt \"Segoe UI\";'\n # )\n # self.device_settings_header_label.setObjectName(\"device_settings_header_label\")\n # self.verticalLayout.addWidget(self.device_settings_header_label)\n\n # self.header_line_1 = QtWidgets.QFrame()\n # self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine)\n # self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken)\n # self.verticalLayout.addWidget(self.header_line_1)\n # self.header_line_1.setStyleSheet(\n # \"QFrame {\\n\" \" border: 2px solid rgb(52, 59, 72);\\n\" \"}\\n\"\n # )\n\n # self.manualRowCountGridLayout = 1\n\n # Define dialog in which parameters should be entered\n # dialog = QtWidgets.QDialog()\n # dialog.setWindowTitle(\"Group Assignement Dialog\")\n\n # Select the scan that shall be evaluated\n if not self.include_all_scans:\n self.select_scan_number_label = QtWidgets.QLabel()\n self.select_scan_number_label.setObjectName(\"select_scan_number_label\")\n self.verticalLayout.addWidget(self.select_scan_number_label)\n\n self.select_scan_number_ComboBox = QtWidgets.QComboBox()\n self.select_scan_number_ComboBox.setObjectName(\n \"select_scan_number_ComboBox\"\n )\n\n for i in range(self.parameters[\"no_of_scans\"]):\n self.select_scan_number_ComboBox.addItem(str(int(i + 1)))\n\n self.select_scan_number_ComboBox.setCurrentIndex(0)\n self.verticalLayout.addWidget(self.select_scan_number_ComboBox)\n\n # Select the number of groups to define\n self.no_groups_label = QtWidgets.QLabel()\n self.verticalLayout.addWidget(self.no_groups_label)\n\n self.no_groups_LabeledSlider = LabeledSlider(\n 1,\n int(np.size(np.unique(self.parameters[\"device_number\"]))),\n interval=1,\n orientation=QtCore.Qt.Horizontal,\n )\n\n self.verticalLayout.addWidget(self.no_groups_LabeledSlider)\n\n self.available_devices_label = QtWidgets.QLabel()\n self.verticalLayout.addWidget(self.available_devices_label)\n\n # if np.size(self.paths) == 1:\n # verticalLayout.addWidget(self.no_groups_LabeledSlider)\n\n # Define the group assignement fields\n self.group_definition_gridLayout = QtWidgets.QGridLayout()\n self.group_definition_gridLayout.setSpacing(10)\n\n # Group names and its container\n self.group_name_label = QtWidgets.QLabel()\n self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1)\n\n self.group_name_LineEdit_container = np.empty(0, dtype=\"object\")\n self.group_name_LineEdit_container = np.append(\n self.group_name_LineEdit_container, QtWidgets.QLineEdit()\n )\n self.group_definition_gridLayout.addWidget(\n self.group_name_LineEdit_container[0], 2, 0\n )\n\n # Enter device numbers and its container\n self.device_assignment_label = QtWidgets.QLabel()\n self.group_definition_gridLayout.addWidget(\n self.device_assignment_label, 1, 1, 1, 1\n )\n\n self.device_assignment_LineEdit_container = np.empty(0, dtype=\"object\")\n self.device_assignment_LineEdit_container = np.append(\n self.device_assignment_LineEdit_container, QtWidgets.QLineEdit()\n )\n self.group_definition_gridLayout.addWidget(\n self.device_assignment_LineEdit_container[0], 2, 1\n )\n\n # Assign a spectrum file to the group\n if not self.autodetect_spectrum:\n self.spectrum_file_label = QtWidgets.QLabel()\n self.group_definition_gridLayout.addWidget(\n self.spectrum_file_label, 1, 2, 1, 1\n )\n\n self.group_spectrum_PushButton_container = np.empty(0, dtype=\"object\")\n self.group_spectrum_PushButton_container = np.append(\n self.group_spectrum_PushButton_container, QtWidgets.QPushButton(\"\")\n )\n self.group_spectrum_PushButton_container[0].setStyleSheet(\n \"background-color: red\"\n )\n self.group_definition_gridLayout.addWidget(\n self.group_spectrum_PushButton_container[0], 2, 2\n )\n\n # Definition of a plotting color for the group\n self.group_color_label = QtWidgets.QLabel()\n self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1)\n self.group_colors_PushButton_container = np.empty(0, dtype=\"object\")\n self.group_colors_PushButton_container = np.append(\n self.group_colors_PushButton_container, QtWidgets.QPushButton(\"\")\n )\n self.group_colors_PushButton_container[0].setStyleSheet(\n \"background-color: \" + str(self.group_color[0])\n )\n self.group_definition_gridLayout.addWidget(\n self.group_colors_PushButton_container[0], 2, 3\n )\n\n # Define the bottom pushbuttons that allows to close and save the dialog\n self.leave_horizontalLayout = QtWidgets.QHBoxLayout()\n self.close_pushButton = QtWidgets.QPushButton(\"Close\")\n\n self.save_pushButton = QtWidgets.QPushButton(\"Save\")\n\n self.leave_horizontalLayout.addWidget(self.close_pushButton)\n self.leave_horizontalLayout.addWidget(self.save_pushButton)\n\n self.verticalLayout.addLayout(self.group_definition_gridLayout)\n self.verticalLayout.addLayout(self.leave_horizontalLayout)\n\n self.setLayout(self.verticalLayout)\n\n self.retranslateUi(AssignGroups)\n QtCore.QMetaObject.connectSlotsByName(AssignGroups)\n\n def retranslateUi(self, AssignGroups):\n _translate = QtCore.QCoreApplication.translate\n AssignGroups.setWindowTitle(_translate(\"AssignGroups\", \"Assign Groups\"))\n\n if not self.include_all_scans:\n self.select_scan_number_label.setText(\n _translate(\"AssignGroups\", \"Select Scan\")\n )\n self.no_groups_label.setText(\n _translate(\"AssignGroups\", \"Select Number of Groups\")\n )\n self.available_devices_label.setText(\n _translate(\n \"AssignGroups\",\n \"Available Devices for Assignment \"\n + str(self.parameters[\"device_number\"]),\n )\n )\n self.group_name_label.setText(_translate(\"AssignGroups\", \"Group Name\"))\n self.device_assignment_label.setText(\n _translate(\"AssignGroups\", \"Assign Devices (seperated by ,)\")\n )\n self.group_color_label.setText(_translate(\"AssignGroups\", \"Color\"))\n if not self.autodetect_spectrum:\n self.spectrum_file_label.setText(_translate(\"AssignGroups\", \"Spectrum\"))\n"
] | [
[
"numpy.empty",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
czw1296924847/ResGraphNet | [
"1638236e4138719c324afc3137f31cfec8a9de64",
"1638236e4138719c324afc3137f31cfec8a9de64"
] | [
"run/run_ResGraphNet.py",
"datasets/HadCRUT5/had_preprocess.py"
] | [
"\"\"\"\nTesting ResGraphNet\n\"\"\"\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport torch\nimport os\nimport os.path as osp\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append(\"..\")\nimport func.cal as cal\n\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n# device = \"cpu\"\nl_x = 60 # Data sequence length\nl_y = 1 # Label sequence length\nlr = 0.0001 # Learning rate\nweight_decay = 5e-4\nepochs = 4000\nhidden_dim = 64\ngnn_style = \"ResGraphNet\"\nsave_fig = True # Whether to save picture\nsave_txt = False # Whether to save txt\nsave_np = True # Whether to save np file\nsave_model = True # Whether to save network model\nratio_train = 0.5 # Proportion of training datasets\nfig_size = (16, 12)\nts_name_all = [\"cli_dash\", \"HadCRUT5\", \"temp_month\", \"temp_year\", \"elect\", \"traffic\", \"sales\"]\nts_name_folder = \"HadCRUT5\" # Name of the folder where the data resides\nts_name = \"HadCRUT5_global\" # Name of the selected time series\niv = 1 # sampling interval, used for plotting curves\nway = \"mean\" # The style of plot curves of real data and predict results\n\nx_address = osp.join(\"../datasets\", ts_name_folder, ts_name + \".npy\")\nx = np.load(x_address)\nnum = x.shape[0] # The length of time series\n\nresult_address = osp.join(\"../result\", ts_name, \"ResGraphNet\")\nif not(osp.exists(result_address)):\n os.makedirs(result_address)\n\nnum_train = int(ratio_train * num)\ndata_train, data_test = x[:num_train], x[num_train:num] # get training dataset and test dataset\n\nlen_interp = l_y + 6\ndata_test_ = np.array(data_test[:-l_y].tolist() + data_test[-len_interp-l_y:-l_y].tolist() + data_test[-l_y:].tolist())\n\n# Using Graph Neural network, prepare data information\nx_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style=\"arr\")\nx_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style=\"arr\")\n\nx_train = torch.from_numpy(x_train).float().to(device)\nx_test = torch.from_numpy(x_test).float().to(device)\ny_train = torch.from_numpy(y_train).float().to(device)\ny_test = torch.from_numpy(y_test).float().to(device)\nnum_nodes = x_train.shape[0] + x_test.shape[0]\nnum_train = x_train.shape[0]\n\nx = torch.cat((x_train, x_test), dim=0)\ny = torch.cat((y_train, y_test), dim=0)\n\nadm = cal.path_graph(num_nodes)\n# adm = cal.ts_un(num_nodes, 6)\nedge_index, edge_weight = cal.tran_adm_to_edge_index(adm)\n\ntrain_index = torch.arange(num_train, dtype=torch.long)\ntest_index = torch.arange(num_train, num_nodes, dtype=torch.long)\ntrain_mask = cal.index_to_mask(train_index, num_nodes).to(device)\ntest_mask = cal.index_to_mask(test_index, num_nodes).to(device)\n\n# Using ResGraphNet, predicting time series (The Proposed Network Model)\nmodel = cal.GNNTime(l_x, hidden_dim, l_y, edge_weight, gnn_style, num_nodes).to(device)\ncriterion = torch.nn.MSELoss().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\nedge_index = edge_index.to(device)\n\nstart_time = datetime.datetime.now()\nprint(\"Running, {}\".format(gnn_style))\nfor epoch in range(epochs):\n model.train()\n optimizer.zero_grad()\n output = model(x, edge_index)\n output_train, y_train = output[train_mask], y[train_mask]\n train_loss = criterion(output_train[:, -1], y_train[:, -1])\n train_loss.backward()\n optimizer.step()\n\n model.eval()\n y_test_1 = y[test_mask][:-len_interp-l_y, :]\n y_test_2 = y[test_mask][-l_y:, :]\n y_test = torch.cat((y_test_1, y_test_2), dim=0)\n output_test = output[test_mask][:-len_interp, :]\n test_loss = criterion(output_test[:, -1], y_test[:, -1])\n\n train_true = y_train.detach().cpu().numpy()[:, -1]\n train_predict = output_train.detach().cpu().numpy()[:, -1]\n test_true = y_test.detach().cpu().numpy()[:, -1]\n test_predict = output_test.detach().cpu().numpy()[:, -1]\n\n r2_train = cal.get_r2_score(train_predict, train_true, axis=1)\n r2_test = cal.get_r2_score(test_predict, test_true, axis=1)\n\n if (epoch + 1) % 100 == 0:\n print(\"Epoch: {:05d} Loss_Train: {:.5f} Loss_Test: {:.5f} R2_Train: {:.7f} R2_Test: {:.7f}\".\n format(epoch + 1, train_loss.item(), test_loss.item(), r2_train, r2_test))\n\n# predict and plot future time series\nplot_predict = test_predict[-l_y:]\nplot_true = test_true[-l_y:]\nmse_plot = np.mean(np.square(plot_predict - plot_true))\nprint(\"mse_plot: {}\".format(mse_plot))\ncal.plot_spiral(plot_predict) # predict results in the coming year\nif save_fig:\n plt.savefig(osp.join(result_address, \"future_predict.png\"))\ncal.plot_spiral(plot_true) # true data in the coming year\nif save_fig:\n plt.savefig(osp.join(result_address, \"future_true.png\"))\n\n# calculate running time\nend_time = datetime.datetime.now()\nrun_time = end_time - start_time # The running time of program\n\n# save model and numpy.file\nif save_model:\n torch.save(model, osp.join(result_address, \"{}.pkl\".format(gnn_style)))\nif save_np:\n np.save(osp.join(result_address, \"train_true.npy\"), train_true)\n np.save(osp.join(result_address, \"test_true.npy\"), test_true)\n np.save(osp.join(result_address, \"train_predict_{}.npy\".format(gnn_style)), train_predict)\n np.save(osp.join(result_address, \"test_predict_{}.npy\".format(gnn_style)), test_predict)\n\n# plot the error and results\ne_gnn = test_true - test_predict\ncal.plot_distribute(e_gnn, 40, 4, x_name=\"e\")\nif save_fig:\n plt.savefig(osp.join(result_address, ts_name + \"_\" + gnn_style + \"_error_distribution.png\"))\n\ncal.plot_result(train_true, test_true, train_predict, test_predict, iv, way, fig_size)\nif save_fig:\n plt.savefig(osp.join(result_address, ts_name + \"_\" + gnn_style + \".png\"))\n\n# print indicators\nrmse_train = cal.get_rmse(train_predict, train_true)\nrmse_test = cal.get_rmse(test_predict, test_true)\nr2_train = cal.get_r2_score(train_predict, train_true, axis=1)\nr2_test = cal.get_r2_score(test_predict, test_true, axis=1)\nprint(\"{}: RMSE_Train={:.5f} RMSE_Test={:.5f} R2_Train={:.7f} R2_Test={:.7f}\".\n format(gnn_style, rmse_train, rmse_test, r2_train, r2_test))\n\n# The output results of each model are appended to the file\nif save_txt:\n info_txt_address = osp.join(result_address, \"ResGraphNet_result.txt\") # txt file address for saving parameter information\n info_df_address = osp.join(result_address, \"ResGraphNet_result.csv\") # csv file address for saving parameter information\n f = open(info_txt_address, 'a')\n if osp.getsize(info_txt_address) == 0: # add the name of each feature in the first line of the text\n f.write(\"gnn_style r2_test r2_train run_time l_x l_y hidden_dim lr epochs\\n\")\n f.write(str(gnn_style) + \" \")\n f.write(str(r2_test) + \" \")\n f.write(str(r2_train) + \" \")\n f.write(str(run_time) + \" \")\n f.write(str(l_x) + \" \")\n f.write(str(l_y) + \" \")\n f.write(str(hidden_dim) + \" \")\n f.write(str(lr) + \" \")\n f.write(str(epochs) + \" \")\n\n f.write(\"\\n\") # Prepare for next running\n f.close() # close file\n\n info = np.loadtxt(info_txt_address, dtype=str)\n columns = info[0, :].tolist()\n values = info[1:, :]\n info_df = pd.DataFrame(values, columns=columns)\n info_df.to_csv(info_df_address)\n\n\nprint()\nplt.show()\nprint()\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\naddress = \"HadCRUT.5.0.1.0.analysis.summary_series.global.monthly.csv\"\ndf = pd.read_csv(address)\nvalue_full = df.loc[:, \"Anomaly (deg C)\"].values\nnp.save(\"HadCRUT5_global_full.npy\", value_full)\n\nlow_con = df.loc[:, \"Lower confidence limit (2.5%)\"].values\nup_con = df.loc[:, \"Upper confidence limit (97.5%)\"].values\n\nvalue = value_full[:-3]\nnp.save(\"HadCRUT5_global.npy\", value)\n\nplt.figure(figsize=(12, 12))\nplt.plot(value_full, c=\"red\", label=\"Observation\", alpha=0.5)\n# plt.plot(low_con, c=\"blue\", label=\"Lower confidence limit\", alpha=0.3)\nx_tick = [0, 360, 720, 1080, 1440, 1800]\nx_label = [\"1850\", \"1880\", \"1910\", \"1940\", \"1970\", \"2000\"]\nplt.xticks(x_tick, x_label, fontsize=20)\nplt.yticks(fontsize=20)\nplt.xlabel(\"Year\", fontsize=30)\nplt.ylabel(\"Anomaly ($^{\\circ}$C)\", fontsize=30)\n# plt.legend(fontsize=30)\nplt.savefig(\"HadCRUT5_global.png\")\n\n\naddress = \"HadCRUT.5.0.1.0.analysis.summary_series.northern_hemisphere.monthly.csv\"\ndf = pd.read_csv(address)\nvalue = df.loc[:, \"Anomaly (deg C)\"].values\nnp.save(\"HadCRUT5_northern.npy\", value)\n# plt.figure()\n# plt.plot(value)\n\n\naddress = \"HadCRUT.5.0.1.0.analysis.summary_series.southern_hemisphere.monthly.csv\"\ndf = pd.read_csv(address)\nvalue = df.loc[:, \"Anomaly (deg C)\"].values\nnp.save(\"HadCRUT5_southern.npy\", value)\n# plt.figure()\n# plt.plot(value)\n\n\nprint()\nplt.show()\nprint()\n"
] | [
[
"numpy.square",
"torch.cat",
"torch.from_numpy",
"pandas.DataFrame",
"torch.cuda.is_available",
"torch.arange",
"numpy.load",
"matplotlib.pyplot.show",
"torch.nn.MSELoss",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.yticks",
"pandas.read_csv",
"numpy.save",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
JaworWr/Dynamic-inverse-kinematics | [
"b9da50b88152682060075a44da940e6f98690a9a"
] | [
"idea.py"
] | [
"import numpy as np\n\n\ndef FNS(scores):\n domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = \"i dominuje j\"\n domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2)\n Nx = domination.sum(0)\n\n Pf = []\n ranks = np.zeros(scores.shape[0])\n r = 0\n Q = np.nonzero(Nx == 0)[0]\n while Q.size > 0:\n Nx[Q] = -1\n Pf.append(Q)\n ranks[Q] = r\n r += 1\n for i in Q:\n Nx[domination[i, :]] -= 1\n Q = np.nonzero(Nx == 0)[0]\n\n return Pf, ranks\n\n\ndef crowding_distance(scores):\n indices = np.argsort(scores, 0)\n sorted_scores = np.take_along_axis(scores, indices, 0)\n cd = np.zeros(scores.shape[0])\n for k in range(scores.shape[1]):\n if sorted_scores[-1, k] != sorted_scores[0, k]:\n cd[indices[[0, -1], k]] = np.inf\n cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / (\n sorted_scores[-1, k] - sorted_scores[0, k])\n return cd\n\n\ndef random_population(d, n, x_min, x_max):\n return np.hstack([np.random.uniform(x_min, x_max, (n, d))])\n\n\ndef tournament_selection(ranks, dists, n):\n candidates = np.random.choice(n, (n, 2), replace=True)\n mask = np.where(\n ranks[candidates[:, 0]] == ranks[candidates[:, 1]],\n dists[candidates[:, 0]] > dists[candidates[:, 1]],\n ranks[candidates[:, 0]] < ranks[candidates[:, 1]]\n )\n result = candidates[:, 1]\n result[mask] = candidates[mask, 0]\n return result\n\n\ndef crossover(x, p, eta): # simulated binary crossover\n n, d = x.shape\n l = n // 2\n mask = np.random.random((l, d)) <= p\n m = np.sum(mask)\n mi = np.random.random(m)\n beta = np.where(\n mi < 0.5,\n np.power(2 * mi, 1. / (eta + 1.)),\n np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.))\n )\n c1 = x[:l, :].copy()\n c2 = x[l:, :].copy()\n c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]\n c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]\n return np.vstack([c1, c2])\n\n\ndef mutation(x, x_min, x_max, p, eta): # polynomial mutation\n n, d = x.shape\n mask = np.random.random((n, d)) <= p\n if isinstance(x_min, np.ndarray):\n x_min = np.repeat(x_min[None, :], n, axis=0)\n x_min = x_min[mask]\n if isinstance(x_max, np.ndarray):\n x_max = np.repeat(x_max[None, :], n, axis=0)\n x_max = x_max[mask]\n m = np.sum(mask)\n mi = np.random.random(m)\n beta = np.where(\n mi < 0.5,\n np.power(2 * mi, 1. / (eta + 1.)) - 1.,\n 1. - np.power(2. * (1 - mi), 1. / (eta + 1.))\n )\n y = x.copy()\n y[mask] = np.where(\n mi < 0.5,\n x[mask] + beta * (x[mask] - x_min),\n x[mask] + beta * (x_max - x[mask])\n )\n return y\n\n\ndef elitist_selection(fronts, dists, to_take):\n taken = []\n for front in fronts:\n if len(front) <= to_take:\n taken += list(front)\n if len(front) == to_take:\n break\n to_take -= len(front)\n else:\n indices = np.argsort(-dists[front])[:to_take]\n taken += list(front[indices])\n break\n return taken\n\n\ndef constraint_violation(constraints):\n n, d = constraints.shape\n sort_indices = np.argsort(constraints, 0)\n violations = np.zeros(n)\n for i in range(d):\n values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane\n counts = np.cumsum(counts)\n counts = list(counts)\n if values[0] != 0:\n counts = [0] + counts\n for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])):\n violations[sort_indices[j:k, i]] += rank\n return violations\n\n\ndef evaluation(objective, n_constraints, population):\n obj_results = objective(population)\n constraint_values = obj_results[:, -n_constraints:]\n violation_measure = constraint_violation(constraint_values)\n scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1)\n return scores\n\n\ndef split_and_select(population, scores, n_f, n_inf):\n dists = crowding_distance(scores)\n mask_f = scores[:, -1] == 0\n population_f = population[mask_f, :]\n scores_f = scores[mask_f, :]\n dists_f = dists[mask_f]\n population_inf = population[~mask_f, :]\n scores_inf = scores[~mask_f, :]\n dists_inf = dists[~mask_f]\n\n s_f = population_f.shape[0]\n s_inf = population_inf.shape[0]\n n = n_f + n_inf\n if s_f < n_f:\n to_take_f = s_f\n to_take_inf = n - s_f\n elif s_inf < n_inf:\n to_take_inf = s_inf\n to_take_f = n - s_inf\n else:\n to_take_f = n_f\n to_take_inf = n_inf\n\n fronts_f, ranks_f = FNS(scores_f)\n taken_f = elitist_selection(fronts_f, dists_f, to_take_f)\n\n fronts_inf, ranks_inf = FNS(scores_inf)\n taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf)\n\n return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :]\n\n\ndef IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs):\n population = random_population(d, n, x_min, x_max)\n return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs)\n\n\ndef dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf,\n *args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs):\n population = random_population(d, n, x_min, x_max)\n\n print(\"=\" * 80)\n print(\"t=0\")\n print(\"=\" * 80)\n\n t = 0\n\n def round_objective(round_population):\n return objective(t, round_population)\n\n p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,\n num_iterations=num_iterations_init, **kwargs)\n population_history = [p]\n score_history = [s]\n\n n_to_keep = n - n_immigrants\n n_inf = int(n_to_keep * alpha_inf)\n n_f = n_to_keep - n_inf\n\n for t in range(1, T):\n print(\"=\" * 80)\n print(f\"t={t}\")\n print(\"=\" * 80)\n\n population = p[-1, :, :]\n scores = s[-1, :, :]\n if n_immigrants > 0:\n population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)\n\n immigrants = random_population(d, n_immigrants, x_min, x_max)\n population = np.vstack([population_f, population_inf, immigrants])\n assert population.shape[0] == n\n\n p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,\n num_iterations=num_iterations, **kwargs)\n population_history.append(p)\n score_history.append(s)\n\n return population_history, score_history\n\n\ndef sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf,\n eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10):\n n_inf = int(n * alpha_inf)\n n_f = n - n_inf\n populations = []\n scores = evaluation(objective, n_constraints, population)\n scores_hist = []\n\n fronts, ranks = FNS(scores)\n dists = crowding_distance(scores)\n\n def log_message():\n count_f = population_f.shape[0]\n count_inf = population_inf.shape[0]\n print(\n f\"Iteration {iter_}, \" +\n\n f\"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, \" +\n f\"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}\"\n )\n\n for iter_ in range(num_iterations):\n parent_indices = tournament_selection(ranks, dists, n)\n offspring = crossover(population[parent_indices, :], p_c, eta_c)\n offspring = np.clip(offspring, x_min, x_max)\n offspring = mutation(offspring, x_min, x_max, p_m, eta_m)\n offspring_scores = evaluation(objective, n_constraints, offspring)\n\n population = np.vstack([population, offspring])\n scores = np.vstack([scores, offspring_scores])\n\n population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)\n\n population = np.vstack([population_f, population_inf])\n scores = np.vstack([scores_f, scores_inf])\n fronts, ranks = FNS(scores)\n dists = crowding_distance(scores)\n\n populations.append(population.copy())\n scores_hist.append(scores.copy())\n\n if iter_ % log_interval == 0:\n log_message()\n log_message()\n return np.stack(populations, 0), np.stack(scores_hist, 0)\n"
] | [
[
"numpy.take_along_axis",
"numpy.cumsum",
"numpy.all",
"numpy.concatenate",
"numpy.any",
"numpy.where",
"numpy.unique",
"numpy.clip",
"numpy.stack",
"numpy.repeat",
"numpy.zeros",
"numpy.nonzero",
"numpy.random.choice",
"numpy.power",
"numpy.argsort",
"numpy.sum",
"numpy.random.random",
"numpy.random.uniform",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Keck-FOBOS/producer | [
"6f2b0d3f29f62187bf593567081061e53ddb5a4e"
] | [
"producer/util.py"
] | [
"\"\"\"\nMiscellaneous package utilities.\n\n.. include:: ../include/links.rst\n\"\"\"\n\nfrom itertools import chain, combinations\n\nfrom IPython import embed \n\nimport numpy\n\n\ndef all_subclasses(cls):\n \"\"\"\n Collect all the subclasses of the provided class.\n\n The search follows the inheritance to the highest-level class. Intermediate\n base classes are included in the returned set, but not the base class itself.\n\n Thanks to:\n https://stackoverflow.com/questions/3862310/how-to-find-all-the-subclasses-of-a-class-given-its-name\n\n Args:\n cls (object):\n The base class\n\n Returns:\n :obj:`set`: The unique set of derived classes, including any\n intermediate base classes in the inheritance thread.\n \"\"\"\n return set(cls.__subclasses__()).union(\n [s for c in cls.__subclasses__() for s in all_subclasses(c)])\n\n\ndef string_table(tbl, delimeter='print', has_header=True):\n \"\"\"\n Provided the array of data, format it with equally spaced columns\n and add a header (first row) and contents delimeter.\n\n Args:\n tbl (`numpy.ndarray`_):\n Array of string representations of the data to print.\n delimeter (:obj:`str`, optional):\n If the first row in the table containts the column headers (see\n ``has_header``), this sets the delimeter between first table row and\n the column data. Use ``'print'`` for a simple line of hyphens,\n anything else results in an ``rst`` style table formatting.\n has_header (:obj:`bool`, optional):\n The first row in ``tbl`` contains the column headers.\n\n Returns:\n :obj:`str`: Single long string with the data table.\n \"\"\"\n nrows, ncols = tbl.shape\n col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T]\n\n _nrows = nrows\n start = 1\n if delimeter != 'print':\n _nrows += 2\n start += 1\n if has_header:\n _nrows += 1\n start += 1\n\n row_string = ['']*_nrows\n\n for i in range(start,nrows+start-1):\n row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)])\n if delimeter == 'print':\n # Heading row\n row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)])\n # Delimiter\n if has_header:\n row_string[1] = '-'*len(row_string[0])\n return '\\n'.join(row_string)+'\\n'\n\n # For an rst table\n row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)])\n row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)])\n if has_header:\n row_string[2] = row_string[0]\n row_string[-1] = row_string[0]\n return '\\n'.join(row_string)+'\\n'\n\n\ndef powerset(iterable, reverse=False):\n \"\"\"\"\n Construct an iterable that steps through all combinations of the\n provided iterable.\n\n This is pulled from the recipes provided by the itertools\n documentation.\n\n Examples:\n \n Get all unique combinations of the list [1,2,3]:\n >>> list(powerset([1,2,3]))\n [() (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)]\n\n Args:\n iterable (iterable):\n An iterable object\n reverse (:obj:`bool`, optional):\n Reverse the order (only roughly) of the iterable by placing\n the longer sequences first.\n \n Returns:\n `itertools.chain`: Iterable object that returns the sequence of\n combinations.\n \"\"\"\n rng = range(len(iterable)+1)[::-1] if reverse else range(len(iterable)+1)\n return chain.from_iterable(combinations(iterable, r) for r in rng)\n\n\ndef polygon_winding_number(polygon, point):\n \"\"\"\n Determine the winding number of a 2D polygon about a point.\n \n The code does **not** check if the polygon is simple (no interesecting line\n segments). Algorithm taken from Numerical Recipes Section 21.4.\n\n Args:\n polygon (`numpy.ndarray`_):\n An Nx2 array containing the x,y coordinates of a polygon.\n The points should be ordered either counter-clockwise or\n clockwise.\n point (`numpy.ndarray`_):\n One or more points for the winding number calculation.\n Must be either a 2-element array for a single (x,y) pair,\n or an Nx2 array with N (x,y) points.\n\n Returns:\n :obj:`int`, `numpy.ndarray`_: The winding number of each point with\n respect to the provided polygon. Points inside the polygon have winding\n numbers of 1 or -1; see :func:`point_inside_polygon`.\n\n Raises:\n ValueError:\n Raised if ``polygon`` is not 2D, if ``polygon`` does not have two\n columns, or if the last axis of ``point`` does not have 2 and only 2\n elements.\n \"\"\"\n # Check input shape is for 2D only\n if len(polygon.shape) != 2:\n raise ValueError('Polygon must be an Nx2 array.')\n if polygon.shape[1] != 2:\n raise ValueError('Polygon must be in two dimensions.')\n _point = numpy.atleast_2d(point)\n if _point.shape[1] != 2:\n raise ValueError('Point must contain two elements.')\n\n # Get the winding number\n nvert = polygon.shape[0]\n npnt = _point.shape[0]\n\n dl = numpy.roll(polygon, 1, axis=0)[None,:,:] - _point[:,None,:]\n dr = polygon[None,:,:] - point[:,None,:]\n dx = dl[...,0]*dr[...,1] - dl[...,1]*dr[...,0]\n\n indx_l = dl[...,1] > 0\n indx_r = dr[...,1] > 0\n\n wind = numpy.zeros((npnt, nvert), dtype=int)\n wind[indx_l & numpy.logical_not(indx_r) & (dx < 0)] = -1\n wind[numpy.logical_not(indx_l) & indx_r & (dx > 0)] = 1\n\n return numpy.sum(wind, axis=1)[0] if point.ndim == 1 else numpy.sum(wind, axis=1)\n\n\ndef point_inside_polygon(polygon, point):\n \"\"\"\n Determine if one or more points is inside the provided polygon.\n\n Primarily a wrapper for :func:`polygon_winding_number`, that\n returns True for each point that is inside the polygon.\n\n Args:\n polygon (`numpy.ndarray`_):\n An Nx2 array containing the x,y coordinates of a polygon.\n The points should be ordered either counter-clockwise or\n clockwise.\n point (`numpy.ndarray`_):\n One or more points for the winding number calculation.\n Must be either a 2-element array for a single (x,y) pair,\n or an Nx2 array with N (x,y) points.\n\n Returns:\n :obj:`bool`, `numpy.ndarray`: Boolean indicating whether or not each\n point is within the polygon.\n \"\"\"\n return numpy.absolute(polygon_winding_number(polygon, point)) == 1\n\n\n\n"
] | [
[
"numpy.logical_not",
"numpy.sum",
"numpy.atleast_2d",
"numpy.zeros",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chundiliu/slim_for_Cdiscount | [
"ea7f9d56072072c031094c12c803c63591066c6c"
] | [
"generate_cdiscount_predictions.py"
] | [
"import math\nimport tensorflow as tf\nimport os\nimport struct\nimport pdb\nimport numpy as np\nfrom datasets import dataset_factory\nfrom nets import nets_factory\nimport nets.resnet_v2 as resnet_v2\nfrom preprocessing import preprocessing_factory\nslim = tf.contrib.slim\n\ndef merge_predictions(predictions_fn):\n '''\n Merge predictions/logit scores for products that are the same.\n '''\n\n out_f = open(predictions_fn + '_merged', 'wb')\n f = open(predictions_fn, 'r')\n line = f.readline().strip().split()\n curr_id = line[0]\n curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3)\n num_elems = 1\n line = f.readline().strip().split()\n\n while line != []:\n id = line[0]\n # raise elements to the third power, and then take the cubic root\n scores = np.power(np.array([float(x) for x in line[1:]]), 3)\n\n if id == curr_id:\n num_elems += 1\n curr_scores += scores\n else:\n curr_scores = np.cbrt(curr_scores / float(num_elems))\n for score in curr_scores:\n out_f.write(struct.pack('>f', score))\n\n curr_scores = scores\n num_elems = 1\n curr_id = id\n\n line = f.readline().strip().split()\n\n\n curr_scores = np.cbrt(curr_scores / float(num_elems))\n for score in curr_scores:\n out_f.write(struct.pack('>f', score))\n\n out_f.close()\n f.close()\n\n\nif __name__ == '__main__':\n\n checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training'\n dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records'\n num_classes = 5270\n image_size = 180\n batch_size = 100\n set_name = 'validation'\n data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080}\n out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name))\n\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)\n\n # loading the dataset\n dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir)\n\n # dataset provider to load data from the dataset.\n provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size,\n common_queue_min=batch_size)\n [image, label, product_id] = provider.get(['image', 'label', 'product_id'])\n\n # Pre-processing step.\n image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False)\n image = image_preprocessing_fn(image, image_size, image_size)\n\n images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1,\n capacity=5 * batch_size)\n\n # Get the model\n # network_fn = nets_factory.get_network_fn('resnet_v2_152', num_classes=num_classes, is_training=False)\n with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)):\n logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False)\n\n #Obtain the trainable variables and a saver\n variables_to_restore = slim.get_variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n output_f = open(out_fn, 'w')\n\n with tf.Session() as sess:\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, checkpoint_file)\n num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size)))\n num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name])\n\n for i in range(num_iters):\n output, ids = sess.run([logits, product_ids])\n\n if i == num_iters - 1:\n output = output[:num_last_batch, :]\n ids = ids[:num_last_batch]\n\n for j in range(output.shape[0]):\n vec_str = [str(x) for x in output[j, :]]\n output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\\n')\n\n output_f.close()"
] | [
[
"tensorflow.train.latest_checkpoint",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.batch"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jeantardelli/math-with-python | [
"119bbbc62329c0d834d965232239bd3b39116cc1",
"119bbbc62329c0d834d965232239bd3b39116cc1",
"119bbbc62329c0d834d965232239bd3b39116cc1"
] | [
"data-and-statistics/understanding-a-population-using-sampling.py",
"finding-optimal-solutions/analyzing-simple-two-player-games.py",
"trees-and-networks/generating-the-adjacency-matrix-of-a-network.py"
] | [
"\"\"\"\nOne of the central problems in statistics is to make estimations — and quantify\nhow good these estimations are — of the distribution of an entire population\ngiven only a small (random) sample. A classic example is to estimate the average\nheight of all the people in a country when measuring the height of a randomly\nselected sample of people. These kinds of problems are particularly interesting\nwhen the true population distribution, by which we usually mean the mean of the\nwhole population, cannot feasibly be measured. In this case, we must rely on our\nknowledge of statistics and a (usually much smaller) randomly selected sample to\nestimate the true population mean and standard deviation, and also quantify how\ngood our estimations are. It is the latter that is the source of confusion,\nmisunderstanding, and misrepresentation of statistics in the wider world.\n\nThis module illustrates how to estimate the population mean and give a\nconfidence interval fo these estimates.\n\"\"\"\nimport math\nimport pandas as pd\n\nfrom scipy import stats\n\nsample_data = pd.Series([\n 172.3, 171.3, 164.7, 162.9, 172.5, 176.3, 174.8, 171.9,\n 176.8, 167.8, 164.5, 179.7, 157.8, 170.6, 189.9, 185. ,\n 172.7, 165.5, 174.5, 171.5])\n\nsample_mean = sample_data.mean()\nsample_std = sample_data.std()\n\nprint(f\"Mean: {sample_mean}, st. dev: {sample_std}\")\n# Mean: 172.15, st. dev: 7.473778724383846\n\nN = sample_data.count()\nstd_err = sample_std/math.sqrt(N)\n\ncv_95, cv_99 = stats.t.ppf([0.975, 0.995], df=N-1)\n\npm_95 = cv_95 * std_err\npm_99 = cv_99 * std_err\nconf_interval_95 = [sample_mean - pm_95, sample_mean + pm_95] \nconf_interval_99 = [sample_mean - pm_99, sample_mean + pm_99]\n\nprint(f\"95% confidence: {conf_interval_95}\")\nprint(f\"99% confidence: {conf_interval_99}\")\n# 95% confidence: [168.65216388659374, 175.64783611340627]\n# 99% confidence: [167.36884119608774, 176.93115880391227]\n",
"\"\"\"\nGame theory is a branch of mathematics concerned with the analysis of\ndecision-making and strategy. It has applications in economics, biology, and\nbehavioral science. Many seemingly complex situations can be reduced to a\nrelatively simple mathematical game that can be analyzed in a systematic way\nto find \"optimal\" solutions.\n\nA classic problem in game theory is the prisoner's dilemma, which, in its\noriginal form, is as follows: two co-conspirators are caught and must decide\nwhether to remain quiet or to testify against the other. If both remain quiet,\nthey both serve a 1-year sentence; if one testifies but the other does not, the\ntestifier is released and the other serves a 3-year sentence; and if both\ntestify against one another, they both serve a 2-year sentence. What should each\nconspirator do? It turns out that the best choice each conspirator can make,\ngiven any reasonable distrust of the other, is to testify. Adopting this\nstrategy, they will either serve no sentence or a 2-year sentence maximum\n\n\nFor example purposes, let's consider the following table as the possible\npayoffs from a simple two-player game based on which programming language\nto use:\n\n Player1/Player2 C Python\n C 3 / 1 2 / 3\n Python 2 / 1 2 / 4\n\nIf the players agree on a language, then they write the code at the speed they\npredicted (P1: Python 4; C 1 - P2: Python 2; C: 3), but if they disagree,\nthen the productivity of the faster programmer is reduced by 1.\n\nThis module illustrates how to construct an object in Python to represent this\nsimple two-player game, and then perform some elementary analysis regarding the\noutcomes of the this game.\n\"\"\"\nimport numpy as np\nimport nashpy as nash\n\nplayer1 = np.array([[3, 2], [2, 2]])\nplayer2 = np.array([[1, 3], [1, 4]])\ndilemma = nash.Game(player1, player2)\n\nprint(dilemma[[1, 0], [1, 0]]) # [1 3]\nprint(dilemma[[1, 0], [0, 1]]) # [2 3]\nprint(dilemma[[0, 1], [1, 0]]) # [1 2]\nprint(dilemma[[0, 1], [0, 1]]) # [2 4]\n\nprint(dilemma[[0.1, 0.9], [0.5, 0.5]]) # [2.05, 2.45]\n",
"\"\"\"\nOne potent tool in the analysis of graphs is the adjacency matrix, which has\nentries a_ij = 1 if there is an edge from node i to node j, and 0 otherwise.\nFor most networks, the adjacency matrix will be sparse (most of the entries\nare 0). For networks that are not directed, the matrix will also be symmetric\n(a_ij = a_ji).\n\nThis module illustrates how to access such objects and structures.\n\"\"\"\nimport numpy as np\nimport networkx as nx\n\nG = nx.dense_gnm_random_graph(5, 5, seed=12345)\n\nmatrix = nx.adjacency_matrix(G).todense()\nprint(matrix)\n# [[0 0 1 0 0]\n# [0 0 1 1 0]\n# [1 1 0 0 1]\n# [0 1 0 0 1]\n# [0 0 1 1 0]]\n\npaths_len_4 = np.linalg.matrix_power(matrix, 4)\nprint(paths_len_4)\n# [[ 3 5 0 0 5]\n# [ 5 9 0 0 9]\n# [ 0 0 13 10 0]\n# [ 0 0 10 8 0]\n# [ 5 9 0 0 9]]\n"
] | [
[
"scipy.stats.t.ppf",
"pandas.Series"
],
[
"numpy.array"
],
[
"numpy.linalg.matrix_power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bhavay192/keras | [
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"f1e9c76675981ee6683f54a3ce569212d551d12d"
] | [
"keras/optimizer_v2/rmsprop_test.py",
"keras/layers/normalization/layer_normalization.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for rmsprop.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport copy\nimport itertools\nimport math\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python.framework import test_util\nfrom keras import combinations\nfrom keras import testing_utils\nfrom keras.optimizer_v2 import learning_rate_schedule\nfrom keras.optimizer_v2 import rmsprop\n\n_DATA_TYPES = [\n tf.half, tf.float32, tf.float64, tf.complex64,\n tf.complex128\n]\n\n_TEST_PARAM_VALUES = [\n # learning_rate, rho, momentum, epsilon, centered\n [0.05, 0.9, 0.0, 1e-3, True],\n [0.05, 0.9, 0.0, 1e-3, False],\n [0.1, 0.9, 0.0, 1e-3, True],\n [0.01, 0.9, 0.0, 1e-5, True],\n [0.01, 0.9, 0.9, 1e-5, True],\n]\n\n_TESTPARAMS = [\n [data_type] + values\n for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)\n]\n\n\nclass RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):\n\n def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,\n epsilon, centered):\n rms_t = rms * rho + (1 - rho) * g * g\n if centered:\n mg_t = mg * rho + (1 - rho) * g\n denom_t = rms_t - mg_t * mg_t\n else:\n mg_t = mg\n denom_t = rms_t\n if momentum > 0.:\n mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))\n var_t = var - mom_t\n else:\n mom_t = mom\n var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)\n return var_t, mg_t, rms_t, mom_t\n\n def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,\n lr, rho, momentum, epsilon, centered):\n mg_t = copy.deepcopy(mg)\n rms_t = copy.deepcopy(rms)\n mom_t = copy.deepcopy(mom)\n var_t = copy.deepcopy(var)\n for i in range(len(gindexs)):\n gindex = gindexs[i]\n gvalue = gvalues[i]\n rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue\n if centered:\n mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue\n denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]\n else:\n denom_t = rms_t[gindex]\n if momentum > 0.:\n mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +\n epsilon)\n var_t[gindex] = var[gindex] - mom_t[gindex]\n else:\n mom_t[gindex] = mom[gindex]\n var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)\n return var_t, mg_t, rms_t, mom_t\n\n def testDense(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:\n with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu():\n # Initialize variables for numpy implementation.\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np, dtype=dtype)\n var1 = tf.Variable(var1_np, dtype=dtype)\n grads0 = tf.constant(grads0_np, dtype=dtype)\n grads1 = tf.constant(grads1_np, dtype=dtype)\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n if centered:\n mg0 = opt.get_slot(var0, \"mg\")\n mg1 = opt.get_slot(var1, \"mg\")\n else:\n mg0 = None\n mg1 = None\n\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n\n mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 3 steps of RMSprop\n for _ in range(1, 4):\n self.evaluate(update)\n\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,\n momentum, epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,\n momentum, epsilon, centered)\n\n # Validate updated params\n if centered:\n self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))\n self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDenseWithLearningRateDecay(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n var0_np = np.array([1.0, 2.0])\n grads0_np = np.array([0.1, 0.2])\n var1_np = np.array([3.0, 4.0])\n grads1_np = np.array([0.01, 0.2])\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n learning_rate = 0.01\n rho = 0.9\n momentum = 0.0\n epsilon = 1e-7\n centered = False\n decay = 0.5\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered,\n decay=decay)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0])\n mg1_np = np.array([0.0, 0.0])\n rms0_np = np.array([0.0, 0.0])\n rms1_np = np.array([0.0, 0.0])\n mom0_np = np.array([0.0, 0.0])\n mom1_np = np.array([0.0, 0.0])\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 4 steps of RMSprop\n for t in range(2):\n self.evaluate(update)\n\n lr = learning_rate / (1 + decay * t)\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,\n epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,\n epsilon, centered)\n\n # Validate updated params\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDenseWithLearningRateInverseTimeDecay(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n var0_np = np.array([1.0, 2.0])\n grads0_np = np.array([0.1, 0.2])\n var1_np = np.array([3.0, 4.0])\n grads1_np = np.array([0.01, 0.2])\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n learning_rate = 0.01\n rho = 0.9\n momentum = 0.0\n epsilon = 1e-7\n centered = False\n decay = 0.5\n lr_schedule = learning_rate_schedule.InverseTimeDecay(\n learning_rate, decay_steps=1.0, decay_rate=decay)\n opt = rmsprop.RMSprop(\n learning_rate=lr_schedule,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0])\n mg1_np = np.array([0.0, 0.0])\n rms0_np = np.array([0.0, 0.0])\n rms1_np = np.array([0.0, 0.0])\n mom0_np = np.array([0.0, 0.0])\n mom1_np = np.array([0.0, 0.0])\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 4 steps of RMSprop\n for t in range(2):\n self.evaluate(update)\n\n lr = learning_rate / (1 + decay * t)\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,\n epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,\n epsilon, centered)\n\n # Validate updated params\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testMinimizeSparseResourceVariable(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)\n x = tf.constant([[4.0], [5.0]], dtype=dtype)\n\n def loss():\n pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop\n return pred * pred\n\n sgd_op = rmsprop.RMSprop(\n learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,\n centered=False).minimize(\n loss, var_list=[var0])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n self.assertAllCloseAccordingToType([[0., 1.]],\n self.evaluate(var0),\n atol=0.01)\n\n def testMinimizeSparseResourceVariableCentered(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)\n x = tf.constant([[4.0], [5.0]], dtype=dtype)\n\n def loss():\n pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop\n return pred * pred\n\n # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop\n sgd_op = rmsprop.RMSprop(\n learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,\n centered=True).minimize(\n loss, var_list=[var0])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n self.assertAllCloseAccordingToType([[-111, -138]],\n self.evaluate(var0),\n atol=0.01)\n\n def testSparse(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:\n with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu():\n # Initialize variables for numpy implementation.\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0_np_indices = np.array([0], dtype=np.int32)\n grads0 = tf.IndexedSlices(\n tf.constant(grads0_np),\n tf.constant(grads0_np_indices), tf.constant([1]))\n grads1_np_indices = np.array([1], dtype=np.int32)\n grads1 = tf.IndexedSlices(\n tf.constant(grads1_np),\n tf.constant(grads1_np_indices), tf.constant([1]))\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n if centered:\n mg0 = opt.get_slot(var0, \"mg\")\n self.assertEqual(mg0 is not None, centered)\n mg1 = opt.get_slot(var1, \"mg\")\n self.assertEqual(mg1 is not None, centered)\n else:\n mg0 = None\n mg1 = None\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 3 steps of RMSprop\n for _ in range(1, 4):\n self.evaluate(update)\n\n var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(\n var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,\n learning_rate, rho, momentum, epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(\n var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,\n learning_rate, rho, momentum, epsilon, centered)\n\n # Validate updated params\n if centered:\n self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))\n self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n @combinations.generate(combinations.combine(mode=[\"eager\"]))\n def testCallableParams(self):\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([1.0, 2.0], dtype=dtype)\n var1 = tf.Variable([3.0, 4.0], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n\n learning_rate = lambda: 2.0\n rho = lambda: 0.9\n momentum = lambda: 0.0\n epsilon = 1.0\n opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n # Step 1: the rms accumulators where 1. So we should see a normal\n # update: v -= grad * learning_rate\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n # Check the parameters.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),\n 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))\n ]), self.evaluate(var0))\n self.assertAllCloseAccordingToType(\n np.array([\n 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),\n 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))\n ]), self.evaluate(var1))\n # Step 2: the root mean square accumulators contain the previous update.\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n # Check the parameters.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -\n (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),\n 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -\n (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))\n ]), self.evaluate(var0))\n self.assertAllCloseAccordingToType(\n np.array([\n 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -\n (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),\n 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -\n (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))\n ]), self.evaluate(var1))\n\n def testConstructRMSpropWithLR(self):\n opt = rmsprop.RMSprop(lr=1.0)\n opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)\n opt_3 = rmsprop.RMSprop(learning_rate=0.1)\n self.assertIsInstance(opt.lr, tf.Variable)\n self.assertIsInstance(opt_2.lr, tf.Variable)\n self.assertIsInstance(opt_3.lr, tf.Variable)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.assertAllClose(self.evaluate(opt.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_2.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_3.lr), (0.1))\n\n @combinations.generate(combinations.combine(mode=[\"eager\"]))\n def testSlotsUniqueEager(self):\n v1 = tf.Variable(1.)\n v2 = tf.Variable(1.)\n\n opt = rmsprop.RMSprop(1., momentum=0., centered=False)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and one unique slot variable for v1 and v2.\n self.assertLen(set({id(v) for v in opt.variables()}), 3)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and two unique slot variables for v1 and v2.\n self.assertLen(set({id(v) for v in opt.variables()}), 5)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and three unique slot variables for v1 and v2\n self.assertLen(set({id(v) for v in opt.variables()}), 7)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n @combinations.generate(combinations.combine(mode=[\"eager\"]))\n def testMomentumProperValue(self):\n with self.assertRaisesRegex(ValueError,\n r\"`momentum` must be between \\[0, 1\\]. \"\n r\"Received: momentum=2.5 \\(of type <class \"\n r\"\\'float\\'>\\).\"):\n rmsprop.RMSprop(1., momentum=2.5, centered=False)\n\n\[email protected](combinations.combine(mode=[\"graph\", \"eager\"]))\nclass SlotColocationTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters([True, False])\n @test_util.run_gpu_only\n def testRunMinimizeOnGPUForCPUVariables(self, use_resource):\n with tf.device(\"/device:CPU:0\"):\n if use_resource:\n var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)\n var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)\n else:\n var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)\n var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)\n\n def loss():\n return 5 * var0 + 3 * var1\n\n opt = rmsprop.RMSprop(\n learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0)\n\n # Fetch params to validate initial values\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 1 step through optimizer on GPU.\n # Slot variables are created the first time optimizer is used on some\n # variable. This tests that slot variables will be colocated with the base\n # variable.\n with tf.device(\"/device:GPU:0\"):\n # Note that for eager execution, minimize expects a function instead of a\n # Tensor.\n opt_op = opt.minimize(loss, [var0, var1])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(opt_op)\n\n # Validate updated params, All variables should have decreased.\n self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)),\n msg=\"updated variables: %s\" % self.evaluate(var0))\n self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)),\n msg=\"updated variables: %s\" % self.evaluate(var1))\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layer Normalization layer.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n# pylint: disable=g-classes-have-attributes\n\nfrom keras import constraints\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras.engine.base_layer import Layer\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.LayerNormalization')\nclass LayerNormalization(Layer):\n \"\"\"Layer normalization layer (Ba et al., 2016).\n\n Normalize the activations of the previous layer for each given example in a\n batch independently, rather than across a batch like Batch Normalization.\n i.e. applies a transformation that maintains the mean activation within each\n example close to 0 and the activation standard deviation close to 1.\n\n Given a tensor `inputs`, moments are calculated and normalization\n is performed across the axes specified in `axis`.\n\n Example:\n\n >>> data = tf.constant(np.arange(10).reshape(5, 2) * 10, dtype=tf.float32)\n >>> print(data)\n tf.Tensor(\n [[ 0. 10.]\n [20. 30.]\n [40. 50.]\n [60. 70.]\n [80. 90.]], shape=(5, 2), dtype=float32)\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=1)\n >>> output = layer(data)\n >>> print(output)\n tf.Tensor(\n [[-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]\n [-1. 1.]], shape=(5, 2), dtype=float32)\n\n Notice that with Layer Normalization the normalization happens across the\n axes *within* each example, rather than across different examples in the\n batch.\n\n If `scale` or `center` are enabled, the layer will scale the normalized\n outputs by broadcasting them with a trainable variable `gamma`, and center\n the outputs by broadcasting with a trainable variable `beta`. `gamma` will\n default to a ones tensor and `beta` will default to a zeros tensor, so that\n centering and scaling are no-ops before training has begun.\n\n So, with scaling and centering enabled the normalization equations\n are as follows:\n\n Let the intermediate activations for a mini-batch to be the `inputs`.\n\n For each sample `x_i` in `inputs` with `k` features, we compute the mean and\n variance of the sample:\n\n ```python\n mean_i = sum(x_i[j] for j in range(k)) / k\n var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n ```\n\n and then compute a normalized `x_i_normalized`, including a small factor\n `epsilon` for numerical stability.\n\n ```python\n x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n ```\n\n And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,\n which are learned parameters:\n\n ```python\n output_i = x_i_normalized * gamma + beta\n ```\n\n `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and\n this part of the inputs' shape must be fully defined.\n\n For example:\n\n >>> layer = tf.keras.layers.LayerNormalization(axis=[1, 2, 3])\n >>> layer.build([5, 20, 30, 40])\n >>> print(layer.beta.shape)\n (20, 30, 40)\n >>> print(layer.gamma.shape)\n (20, 30, 40)\n\n Note that other implementations of layer normalization may choose to define\n `gamma` and `beta` over a separate set of axes from the axes being\n normalized across. For example, Group Normalization\n ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1\n corresponds to a Layer Normalization that normalizes across height, width,\n and channel and has `gamma` and `beta` span only the channel dimension.\n So, this Layer Normalization implementation will not match a Group\n Normalization layer with group size set to 1.\n\n Args:\n axis: Integer or List/Tuple. The axis or axes to normalize across. Typically\n this is the features axis/axes. The left-out axes are typically the batch\n axis/axes. This argument defaults to `-1`, the last dimension in the\n input.\n epsilon: Small float added to variance to avoid dividing by zero. Defaults\n to 1e-3\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored. Defaults to True.\n scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults\n to True. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling will be done by the next layer.\n beta_initializer: Initializer for the beta weight. Defaults to zeros.\n gamma_initializer: Initializer for the gamma weight. Defaults to ones.\n beta_regularizer: Optional regularizer for the beta weight. None by default.\n gamma_regularizer: Optional regularizer for the gamma weight. None by\n default.\n beta_constraint: Optional constraint for the beta weight. None by default.\n gamma_constraint: Optional constraint for the gamma weight. None by default.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape` (tuple of\n integers, does not include the samples axis) when using this layer as the\n first layer in a model.\n\n Output shape:\n Same shape as input.\n\n Reference:\n - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).\n \"\"\"\n\n def __init__(self,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer='zeros',\n gamma_initializer='ones',\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs):\n super(LayerNormalization, self).__init__(**kwargs)\n if isinstance(axis, (list, tuple)):\n self.axis = axis[:]\n elif isinstance(axis, int):\n self.axis = axis\n else:\n raise TypeError('Expected an int or a list/tuple of ints for the '\n 'argument \\'axis\\', but received: %r' % axis)\n\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.beta_initializer = initializers.get(beta_initializer)\n self.gamma_initializer = initializers.get(gamma_initializer)\n self.beta_regularizer = regularizers.get(beta_regularizer)\n self.gamma_regularizer = regularizers.get(gamma_regularizer)\n self.beta_constraint = constraints.get(beta_constraint)\n self.gamma_constraint = constraints.get(gamma_constraint)\n\n self.supports_masking = True\n\n # Indicates whether a faster fused implementation can be used. This will be\n # set to True or False in build()\"\n self._fused = None\n\n def _fused_can_be_used(self, ndims):\n \"\"\"Returns false if fused implementation cannot be used.\n\n Check if the axis is contiguous and can be collapsed into the last axis.\n The self.axis is assumed to have no duplicates.\n \"\"\"\n axis = sorted(self.axis)\n can_use_fused = False\n\n if axis[-1] == ndims - 1 and axis[-1] - axis[0] == len(axis) - 1:\n can_use_fused = True\n\n # fused_batch_norm will silently raise epsilon to be at least 1.001e-5, so\n # we cannot used the fused version if epsilon is below that value. Also, the\n # variable dtype must be float32, as fused_batch_norm only supports float32\n # variables.\n if self.epsilon < 1.001e-5 or self.dtype != 'float32':\n can_use_fused = False\n\n return can_use_fused\n\n def build(self, input_shape):\n ndims = len(input_shape)\n if ndims is None:\n raise ValueError('Input shape %s has undefined rank.' % input_shape)\n\n # Convert axis to list and resolve negatives\n if isinstance(self.axis, int):\n self.axis = [self.axis]\n elif isinstance(self.axis, tuple):\n self.axis = list(self.axis)\n for idx, x in enumerate(self.axis):\n if x < 0:\n self.axis[idx] = ndims + x\n\n # Validate axes\n for x in self.axis:\n if x < 0 or x >= ndims:\n raise ValueError(\n f'Invalid axis. Expected 0 <= axis < inputs.rank (with '\n f'inputs.rank={ndims}). Received: layer.axis={self.axis}')\n if len(self.axis) != len(set(self.axis)):\n raise ValueError('Duplicate axis: {}'.format(tuple(self.axis)))\n\n param_shape = [input_shape[dim] for dim in self.axis]\n if self.scale:\n self.gamma = self.add_weight(\n name='gamma',\n shape=param_shape,\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint,\n trainable=True,\n experimental_autocast=False)\n else:\n self.gamma = None\n\n if self.center:\n self.beta = self.add_weight(\n name='beta',\n shape=param_shape,\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint,\n trainable=True,\n experimental_autocast=False)\n else:\n self.beta = None\n\n self._fused = self._fused_can_be_used(ndims)\n\n self.built = True\n\n def call(self, inputs):\n # Compute the axes along which to reduce the mean / variance\n input_shape = inputs.shape\n ndims = len(input_shape)\n\n # Broadcasting only necessary for norm when the axis is not just\n # the last dimension\n broadcast_shape = [1] * ndims\n for dim in self.axis:\n broadcast_shape[dim] = input_shape.dims[dim].value\n\n def _broadcast(v):\n if (v is not None and len(v.shape) != ndims and self.axis != [ndims - 1]):\n return tf.reshape(v, broadcast_shape)\n return v\n\n if not self._fused:\n input_dtype = inputs.dtype\n if input_dtype in ('float16', 'bfloat16') and self.dtype == 'float32':\n # If mixed precision is used, cast inputs to float32 so that this is at\n # least as numerically stable as the fused version.\n inputs = tf.cast(inputs, 'float32')\n\n # Calculate the moments on the last axis (layer activations).\n mean, variance = tf.nn.moments(inputs, self.axis, keepdims=True)\n\n scale, offset = _broadcast(self.gamma), _broadcast(self.beta)\n\n # Compute layer normalization using the batch_normalization function.\n outputs = tf.nn.batch_normalization(\n inputs,\n mean,\n variance,\n offset=offset,\n scale=scale,\n variance_epsilon=self.epsilon)\n outputs = tf.cast(outputs, input_dtype)\n else:\n # Collapse dims before self.axis, and dims in self.axis\n pre_dim, in_dim = (1, 1)\n axis = sorted(self.axis)\n tensor_shape = tf.shape(inputs)\n for dim in range(0, ndims):\n dim_tensor = tensor_shape[dim]\n if dim < axis[0]:\n pre_dim = pre_dim * dim_tensor\n else:\n assert dim in axis\n in_dim = in_dim * dim_tensor\n\n squeezed_shape = [1, pre_dim, in_dim, 1]\n # This fused operation requires reshaped inputs to be NCHW.\n data_format = 'NCHW'\n\n inputs = tf.reshape(inputs, squeezed_shape)\n\n # self.gamma and self.beta have the wrong shape for fused_batch_norm, so\n # we cannot pass them as the scale and offset parameters. Therefore, we\n # create two constant tensors in correct shapes for fused_batch_norm and\n # later construct a separate calculation on the scale and offset.\n scale = tf.ones([pre_dim], dtype=self.dtype)\n offset = tf.zeros([pre_dim], dtype=self.dtype)\n\n # Compute layer normalization using the fused_batch_norm function.\n outputs, _, _ = tf.compat.v1.nn.fused_batch_norm(\n inputs,\n scale=scale,\n offset=offset,\n epsilon=self.epsilon,\n data_format=data_format)\n\n outputs = tf.reshape(outputs, tensor_shape)\n\n scale, offset = _broadcast(self.gamma), _broadcast(self.beta)\n\n if scale is not None:\n outputs = outputs * tf.cast(scale, outputs.dtype)\n if offset is not None:\n outputs = outputs + tf.cast(offset, outputs.dtype)\n\n # If some components of the shape got lost due to adjustments, fix that.\n outputs.set_shape(input_shape)\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\n 'axis': self.axis,\n 'epsilon': self.epsilon,\n 'center': self.center,\n 'scale': self.scale,\n 'beta_initializer': initializers.serialize(self.beta_initializer),\n 'gamma_initializer': initializers.serialize(self.gamma_initializer),\n 'beta_regularizer': regularizers.serialize(self.beta_regularizer),\n 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),\n 'beta_constraint': constraints.serialize(self.beta_constraint),\n 'gamma_constraint': constraints.serialize(self.gamma_constraint)\n }\n base_config = super(LayerNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.constant",
"numpy.sqrt",
"tensorflow.compat.v2.device",
"tensorflow.compat.v2.compat.v1.nn.embedding_lookup",
"tensorflow.compat.v2.compat.v1.global_variables_initializer",
"tensorflow.compat.v2.Graph",
"tensorflow.compat.v2.compat.v1.get_default_graph",
"numpy.array"
],
[
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.nn.batch_normalization",
"tensorflow.compat.v2.compat.v1.nn.fused_batch_norm",
"tensorflow.compat.v2.nn.moments"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shivampotdar/Artificial-Intelligence-with-Python | [
"00221c3b1a6d8003765d1ca48b5c95f86da375d9",
"00221c3b1a6d8003765d1ca48b5c95f86da375d9"
] | [
"Chapter 10/code/category_predictor.py",
"Chapter 02/code/regressor_multivar.py"
] | [
"from sklearn.datasets import fetch_20newsgroups\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# Define the category map\ncategory_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos', \n 'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics', \n 'sci.med': 'Medicine'}\n\n# Get the training dataset\ntraining_data = fetch_20newsgroups(subset='train', \n categories=category_map.keys(), shuffle=True, random_state=5)\n\n# Build a count vectorizer and extract term counts \ncount_vectorizer = CountVectorizer()\ntrain_tc = count_vectorizer.fit_transform(training_data.data)\nprint(\"\\nDimensions of training data:\", train_tc.shape)\n\n# Create the tf-idf transformer\ntfidf = TfidfTransformer()\ntrain_tfidf = tfidf.fit_transform(train_tc)\n\n# Define test data \ninput_data = [\n 'You need to be careful with cars when you are driving on slippery roads', \n 'A lot of devices can be operated wirelessly',\n 'Players need to be careful when they are close to goal posts',\n 'Political debates help us understand the perspectives of both sides'\n]\n\n# Train a Multinomial Naive Bayes classifier\nclassifier = MultinomialNB().fit(train_tfidf, training_data.target)\n\n# Transform input data using count vectorizer\ninput_tc = count_vectorizer.transform(input_data)\n\n# Transform vectorized data using tfidf transformer\ninput_tfidf = tfidf.transform(input_tc)\n\n# Predict the output categories\npredictions = classifier.predict(input_tfidf)\n\n# Print the outputs\nfor sent, category in zip(input_data, predictions):\n print('\\nInput:', sent, '\\nPredicted category:', \\\n category_map[training_data.target_names[category]])\n\n",
"import numpy as np\nfrom sklearn import linear_model\nimport sklearn.metrics as sm\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Input file containing data\ninput_file = 'data_multivar_regr.txt'\n\n# Load the data from the input file\ndata = np.loadtxt(input_file, delimiter=',')\nX, y = data[:, :-1], data[:, -1]\n\n# Split data into training and testing \nnum_training = int(0.8 * len(X))\nnum_test = len(X) - num_training\n\n# Training data\nX_train, y_train = X[:num_training], y[:num_training]\n\n# Test data\nX_test, y_test = X[num_training:], y[num_training:]\n\n# Create the linear regressor model\nlinear_regressor = linear_model.LinearRegression()\n\n# Train the model using the training sets\nlinear_regressor.fit(X_train, y_train)\n\n# Predict the output\ny_test_pred = linear_regressor.predict(X_test)\n\n# Measure performance\nprint(\"Linear Regressor performance:\")\nprint(\"Mean absolute error =\", round(sm.mean_absolute_error(y_test, y_test_pred), 2))\nprint(\"Mean squared error =\", round(sm.mean_squared_error(y_test, y_test_pred), 2))\nprint(\"Median absolute error =\", round(sm.median_absolute_error(y_test, y_test_pred), 2))\nprint(\"Explained variance score =\", round(sm.explained_variance_score(y_test, y_test_pred), 2))\nprint(\"R2 score =\", round(sm.r2_score(y_test, y_test_pred), 2))\n\n# Polynomial regression\npolynomial = PolynomialFeatures(degree=10)\nX_train_transformed = polynomial.fit_transform(X_train)\ndatapoint = [[7.75, 6.35, 5.56]]\npoly_datapoint = polynomial.fit_transform(datapoint)\n\npoly_linear_model = linear_model.LinearRegression()\npoly_linear_model.fit(X_train_transformed, y_train)\nprint(\"\\nLinear regression:\\n\", linear_regressor.predict(datapoint))\nprint(\"\\nPolynomial regression:\\n\", poly_linear_model.predict(poly_datapoint))\n\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.feature_extraction.text.TfidfTransformer"
],
[
"sklearn.metrics.explained_variance_score",
"sklearn.metrics.r2_score",
"sklearn.metrics.median_absolute_error",
"sklearn.metrics.mean_absolute_error",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrzejmalota/StockPricePrediction | [
"a6d7da353b706fb2d970f2883841db14d896268f"
] | [
"src/trading_simulation/simulation.py"
] | [
"import sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\nclass Simulation:\n def __init__(self, init_investment, stock_returns, strategy, predicted_movements=None):\n self.init_investment = init_investment\n self.predicted_movements = predicted_movements\n self.stock_returns = stock_returns\n self.strategy = strategy\n self.action_history = []\n self.account_history = [init_investment]\n self.__actual_investment = 0\n self.step = 0\n self.return_on_investment = 0\n self.profit_on_investment = 0\n\n def start(self):\n for self.step in range(len(self.stock_returns)):\n if self.predicted_movements is not None:\n action = self.strategy.decide(self.predicted_movements[self.step])\n else:\n action = self.strategy.decide(self.step)\n self.__make_transaction(action)\n\n def __make_transaction(self, action):\n self.action_history.append(action)\n if action == 'buy':\n self.__buy()\n elif action == 'hold':\n self.__hold()\n elif action == 'sell':\n self.__sell()\n elif action == 'wait':\n self.__wait()\n else:\n sys.exit('Action not implemented, exiting program!')\n\n def get_investment_performance(self):\n self.return_on_investment = (self.account_history[-1] - self.init_investment) / self.init_investment\n self.profit_on_investment = self.account_history[-1] - self.init_investment\n return {'return': self.return_on_investment,\n 'profit': self.profit_on_investment}\n\n def plot_trading_history(self, stock_prices, date):\n date = date.iloc[-len(stock_prices-1):]\n stock_prices = np.insert(stock_prices, 0, stock_prices[0])\n fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(40, 20))\n ax1.plot(stock_prices, color='black', label='Cena zamknięcia akcji')\n actions = pd.DataFrame(self.action_history)\n buy_idx = actions[actions[0] == 'buy'].index.to_list()\n sell_idx = actions[actions[0] == 'sell'].index.to_list()\n stock_prices = np.array(stock_prices)\n ax1.scatter(buy_idx, stock_prices[buy_idx], color='green', s=40, label='Kupno')\n ax1.scatter(sell_idx, stock_prices[sell_idx], color='red', s=40, label='Sprzedaż')\n ax1.legend()\n ax2.plot(self.account_history[:-1], label='Kapitał')\n plt.xlabel('Krok czasowy')\n ax1.set_ylabel('Cena akcji')\n ax2.set_ylabel('Kapitał')\n ax2.legend()\n plt.show()\n\n def __calculate_daily_profit(self):\n self.__actual_investment += self.__actual_investment * self.stock_returns[self.step]\n\n def __buy(self):\n self.__actual_investment = self.account_history[self.step]\n self.__calculate_daily_profit()\n self.account_history.append(self.__actual_investment)\n\n def __hold(self):\n self.__calculate_daily_profit()\n self.account_history.append(self.__actual_investment)\n\n def __sell(self):\n self.account_history.append(self.__actual_investment)\n self.__actual_investment = 0\n\n def __wait(self):\n self.account_history.append(self.account_history[self.step-1])\n"
] | [
[
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.insert",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pranavrajpal/scipy | [
"7dcdeffed53483a60b3e054618520e0f28adeba4",
"859c1061b3d5aa30c4466824049d69edde5499a2"
] | [
"scipy/optimize/tests/test_linprog.py",
"scipy/integrate/_ivp/rk.py"
] | [
"\"\"\"\nUnit test for Linear Programming\n\"\"\"\nimport sys\n\nimport numpy as np\nfrom numpy.testing import (assert_, assert_allclose, assert_equal,\n assert_array_less, assert_warns, suppress_warnings)\nfrom pytest import raises as assert_raises\nfrom scipy.optimize import linprog, OptimizeWarning\nfrom scipy.sparse.linalg import MatrixRankWarning\nfrom scipy.linalg import LinAlgWarning\nimport scipy.sparse\nimport pytest\n\nhas_umfpack = True\ntry:\n from scikits.umfpack import UmfpackWarning\nexcept ImportError:\n has_umfpack = False\n\nhas_cholmod = True\ntry:\n import sksparse\n from sksparse.cholmod import cholesky as cholmod\nexcept ImportError:\n has_cholmod = False\n\n\ndef _assert_iteration_limit_reached(res, maxiter):\n assert_(not res.success, \"Incorrectly reported success\")\n assert_(res.success < maxiter, \"Incorrectly reported number of iterations\")\n assert_equal(res.status, 1, \"Failed to report iteration limit reached\")\n\n\ndef _assert_infeasible(res):\n # res: linprog result object\n assert_(not res.success, \"incorrectly reported success\")\n assert_equal(res.status, 2, \"failed to report infeasible status\")\n\n\ndef _assert_unbounded(res):\n # res: linprog result object\n assert_(not res.success, \"incorrectly reported success\")\n assert_equal(res.status, 3, \"failed to report unbounded status\")\n\n\ndef _assert_unable_to_find_basic_feasible_sol(res):\n # res: linprog result object\n\n # The status may be either 2 or 4 depending on why the feasible solution\n # could not be found. If the undelying problem is expected to not have a\n # feasible solution, _assert_infeasible should be used.\n assert_(not res.success, \"incorrectly reported success\")\n assert_(res.status in (2, 4), \"failed to report optimization failure\")\n\n\ndef _assert_success(res, desired_fun=None, desired_x=None,\n rtol=1e-8, atol=1e-8):\n # res: linprog result object\n # desired_fun: desired objective function value or None\n # desired_x: desired solution or None\n if not res.success:\n msg = \"linprog status {0}, message: {1}\".format(res.status,\n res.message)\n raise AssertionError(msg)\n\n assert_equal(res.status, 0)\n if desired_fun is not None:\n assert_allclose(res.fun, desired_fun,\n err_msg=\"converged to an unexpected objective value\",\n rtol=rtol, atol=atol)\n if desired_x is not None:\n assert_allclose(res.x, desired_x,\n err_msg=\"converged to an unexpected solution\",\n rtol=rtol, atol=atol)\n\n\ndef magic_square(n):\n \"\"\"\n Generates a linear program for which integer solutions represent an\n n x n magic square; binary decision variables represent the presence\n (or absence) of an integer 1 to n^2 in each position of the square.\n \"\"\"\n\n np.random.seed(0)\n M = n * (n**2 + 1) / 2\n\n numbers = np.arange(n**4) // n**2 + 1\n\n numbers = numbers.reshape(n**2, n, n)\n\n zeros = np.zeros((n**2, n, n))\n\n A_list = []\n b_list = []\n\n # Rule 1: use every number exactly once\n for i in range(n**2):\n A_row = zeros.copy()\n A_row[i, :, :] = 1\n A_list.append(A_row.flatten())\n b_list.append(1)\n\n # Rule 2: Only one number per square\n for i in range(n):\n for j in range(n):\n A_row = zeros.copy()\n A_row[:, i, j] = 1\n A_list.append(A_row.flatten())\n b_list.append(1)\n\n # Rule 3: sum of rows is M\n for i in range(n):\n A_row = zeros.copy()\n A_row[:, i, :] = numbers[:, i, :]\n A_list.append(A_row.flatten())\n b_list.append(M)\n\n # Rule 4: sum of columns is M\n for i in range(n):\n A_row = zeros.copy()\n A_row[:, :, i] = numbers[:, :, i]\n A_list.append(A_row.flatten())\n b_list.append(M)\n\n # Rule 5: sum of diagonals is M\n A_row = zeros.copy()\n A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]\n A_list.append(A_row.flatten())\n b_list.append(M)\n A_row = zeros.copy()\n A_row[:, range(n), range(-1, -n - 1, -1)] = \\\n numbers[:, range(n), range(-1, -n - 1, -1)]\n A_list.append(A_row.flatten())\n b_list.append(M)\n\n A = np.array(np.vstack(A_list), dtype=float)\n b = np.array(b_list, dtype=float)\n c = np.random.rand(A.shape[1])\n\n return A, b, c, numbers\n\n\ndef lpgen_2d(m, n):\n \"\"\" -> A b c LP test: m*n vars, m+n constraints\n row sums == n/m, col sums == 1\n https://gist.github.com/denis-bz/8647461\n \"\"\"\n np.random.seed(0)\n c = - np.random.exponential(size=(m, n))\n Arow = np.zeros((m, m * n))\n brow = np.zeros(m)\n for j in range(m):\n j1 = j + 1\n Arow[j, j * n:j1 * n] = 1\n brow[j] = n / m\n\n Acol = np.zeros((n, m * n))\n bcol = np.zeros(n)\n for j in range(n):\n j1 = j + 1\n Acol[j, j::n] = 1\n bcol[j] = 1\n\n A = np.vstack((Arow, Acol))\n b = np.hstack((brow, bcol))\n\n return A, b, c.ravel()\n\n\ndef very_random_gen(seed=0):\n np.random.seed(seed)\n m_eq, m_ub, n = 10, 20, 50\n c = np.random.rand(n)-0.5\n A_ub = np.random.rand(m_ub, n)-0.5\n b_ub = np.random.rand(m_ub)-0.5\n A_eq = np.random.rand(m_eq, n)-0.5\n b_eq = np.random.rand(m_eq)-0.5\n lb = -np.random.rand(n)\n ub = np.random.rand(n)\n lb[lb < -np.random.rand()] = -np.inf\n ub[ub > np.random.rand()] = np.inf\n bounds = np.vstack((lb, ub)).T\n return c, A_ub, b_ub, A_eq, b_eq, bounds\n\n\ndef nontrivial_problem():\n c = [-1, 8, 4, -6]\n A_ub = [[-7, -7, 6, 9],\n [1, -1, -3, 0],\n [10, -10, -7, 7],\n [6, -1, 3, 4]]\n b_ub = [-3, 6, -6, 6]\n A_eq = [[-10, 1, 1, -8]]\n b_eq = [-4]\n x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391]\n f_star = 7083 / 1391\n return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star\n\n\ndef l1_regression_prob(seed=0, m=8, d=9, n=100):\n '''\n Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)}\n x in R^d\n y in R\n n: number of training samples\n d: dimension of x, i.e. x in R^d\n phi: feature map R^d -> R^m\n m: dimension of feature space\n '''\n np.random.seed(seed)\n phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping\n w_true = np.random.randn(m)\n x = np.random.normal(0, 1, size=(d, n)) # features\n y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements\n\n # construct the problem\n c = np.ones(m+n)\n c[:m] = 0\n A_ub = scipy.sparse.lil_matrix((2*n, n+m))\n idx = 0\n for ii in range(n):\n A_ub[idx, :m] = phi @ x[:, ii]\n A_ub[idx, m+ii] = -1\n A_ub[idx+1, :m] = -1*phi @ x[:, ii]\n A_ub[idx+1, m+ii] = -1\n idx += 2\n A_ub = A_ub.tocsc()\n b_ub = np.zeros(2*n)\n b_ub[0::2] = y\n b_ub[1::2] = -y\n bnds = [(None, None)]*m + [(0, None)]*n\n return c, A_ub, b_ub, bnds\n\n\ndef generic_callback_test(self):\n # Check that callback is as advertised\n last_cb = {}\n\n def cb(res):\n message = res.pop('message')\n complete = res.pop('complete')\n\n assert_(res.pop('phase') in (1, 2))\n assert_(res.pop('status') in range(4))\n assert_(isinstance(res.pop('nit'), int))\n assert_(isinstance(complete, bool))\n assert_(isinstance(message, str))\n\n last_cb['x'] = res['x']\n last_cb['fun'] = res['fun']\n last_cb['slack'] = res['slack']\n last_cb['con'] = res['con']\n\n c = np.array([-3, -2])\n A_ub = [[2, 1], [1, 1], [1, 0]]\n b_ub = [10, 8, 4]\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)\n\n _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])\n assert_allclose(last_cb['fun'], res['fun'])\n assert_allclose(last_cb['x'], res['x'])\n assert_allclose(last_cb['con'], res['con'])\n assert_allclose(last_cb['slack'], res['slack'])\n\n\ndef test_unknown_solvers_and_options():\n c = np.array([-3, -2])\n A_ub = [[2, 1], [1, 1], [1, 0]]\n b_ub = [10, 8, 4]\n\n assert_raises(ValueError, linprog,\n c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')\n assert_raises(ValueError, linprog,\n c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki')\n assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub,\n options={\"rr_method\": 'ekki-ekki-ekki'})\n\n\ndef test_choose_solver():\n # 'highs' chooses 'dual'\n c = np.array([-3, -2])\n A_ub = [[2, 1], [1, 1], [1, 0]]\n b_ub = [10, 8, 4]\n\n res = linprog(c, A_ub, b_ub, method='highs')\n _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])\n\n\nA_ub = None\nb_ub = None\nA_eq = None\nb_eq = None\nbounds = None\n\n################\n# Common Tests #\n################\n\n\nclass LinprogCommonTests:\n \"\"\"\n Base class for `linprog` tests. Generally, each test will be performed\n once for every derived class of LinprogCommonTests, each of which will\n typically change self.options and/or self.method. Effectively, these tests\n are run for many combination of method (simplex, revised simplex, and\n interior point) and options (such as pivoting rule or sparse treatment).\n \"\"\"\n\n ##################\n # Targeted Tests #\n ##################\n\n def test_callback(self):\n generic_callback_test(self)\n\n def test_disp(self):\n # test that display option does not break anything.\n A, b, c = lpgen_2d(20, 20)\n res = linprog(c, A_ub=A, b_ub=b, method=self.method,\n options={\"disp\": True})\n _assert_success(res, desired_fun=-64.049494229)\n\n def test_docstring_example(self):\n # Example from linprog docstring.\n c = [-1, 4]\n A = [[-3, 1], [1, 2]]\n b = [6, 4]\n x0_bounds = (None, None)\n x1_bounds = (-3, None)\n res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),\n options=self.options, method=self.method)\n _assert_success(res, desired_fun=-22)\n\n def test_type_error(self):\n # (presumably) checks that linprog recognizes type errors\n # This is tested more carefully in test__linprog_clean_inputs.py\n c = [1]\n A_eq = [[1]]\n b_eq = \"hello\"\n assert_raises(TypeError, linprog,\n c, A_eq=A_eq, b_eq=b_eq,\n method=self.method, options=self.options)\n\n def test_aliasing_b_ub(self):\n # (presumably) checks that linprog does not modify b_ub\n # This is tested more carefully in test__linprog_clean_inputs.py\n c = np.array([1.0])\n A_ub = np.array([[1.0]])\n b_ub_orig = np.array([3.0])\n b_ub = b_ub_orig.copy()\n bounds = (-4.0, np.inf)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-4, desired_x=[-4])\n assert_allclose(b_ub_orig, b_ub)\n\n def test_aliasing_b_eq(self):\n # (presumably) checks that linprog does not modify b_eq\n # This is tested more carefully in test__linprog_clean_inputs.py\n c = np.array([1.0])\n A_eq = np.array([[1.0]])\n b_eq_orig = np.array([3.0])\n b_eq = b_eq_orig.copy()\n bounds = (-4.0, np.inf)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=3, desired_x=[3])\n assert_allclose(b_eq_orig, b_eq)\n\n def test_non_ndarray_args(self):\n # (presumably) checks that linprog accepts list in place of arrays\n # This is tested more carefully in test__linprog_clean_inputs.py\n c = [1.0]\n A_ub = [[1.0]]\n b_ub = [3.0]\n A_eq = [[1.0]]\n b_eq = [2.0]\n bounds = (-1.0, 10.0)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=2, desired_x=[2])\n\n def test_unknown_options(self):\n c = np.array([-3, -2])\n A_ub = [[2, 1], [1, 1], [1, 0]]\n b_ub = [10, 8, 4]\n\n def f(c, A_ub=None, b_ub=None, A_eq=None,\n b_eq=None, bounds=None, options={}):\n linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=options)\n\n o = {key: self.options[key] for key in self.options}\n o['spam'] = 42\n\n assert_warns(OptimizeWarning, f,\n c, A_ub=A_ub, b_ub=b_ub, options=o)\n\n def test_invalid_inputs(self):\n\n def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):\n linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n # Test ill-formatted bounds\n assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)])\n assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)])\n assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)])\n\n # Test other invalid inputs\n assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])\n assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])\n assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])\n assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])\n assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)\n\n # this last check doesn't make sense for sparse presolve\n if (\"_sparse_presolve\" in self.options and\n self.options[\"_sparse_presolve\"]):\n return\n # there aren't 3-D sparse matrices\n\n assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)\n\n def test_sparse_constraints(self):\n # gh-13559: improve error message for sparse inputs when unsupported\n def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):\n linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n np.random.seed(0)\n m = 100\n n = 150\n A_eq = scipy.sparse.rand(m, n, 0.5)\n x_valid = np.random.randn((n))\n c = np.random.randn((n))\n ub = x_valid + np.random.rand((n))\n lb = x_valid - np.random.rand((n))\n bounds = np.column_stack((lb, ub))\n b_eq = A_eq * x_valid\n\n if self.method in {'simplex', 'revised simplex'}:\n # simplex and revised simplex should raise error\n with assert_raises(ValueError, match=f\"Method '{self.method}' \"\n \"does not support sparse constraint matrices.\"):\n linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,\n method=self.method, options=self.options)\n else:\n # other methods should succeed\n options = {**self.options}\n if self.method in {'interior-point'}:\n options['sparse'] = True\n\n res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,\n method=self.method, options=options)\n assert res.success\n\n def test_maxiter(self):\n # test iteration limit w/ Enzo example\n c = [4, 8, 3, 0, 0, 0]\n A = [\n [2, 5, 3, -1, 0, 0],\n [3, 2.5, 8, 0, -1, 0],\n [8, 10, 4, 0, 0, -1]]\n b = [185, 155, 600]\n np.random.seed(0)\n maxiter = 3\n res = linprog(c, A_eq=A, b_eq=b, method=self.method,\n options={\"maxiter\": maxiter})\n _assert_iteration_limit_reached(res, maxiter)\n assert_equal(res.nit, maxiter)\n\n def test_bounds_fixed(self):\n\n # Test fixed bounds (upper equal to lower)\n # If presolve option True, test if solution found in presolve (i.e.\n # number of iterations is 0).\n do_presolve = self.options.get('presolve', True)\n\n res = linprog([1], bounds=(1, 1),\n method=self.method, options=self.options)\n _assert_success(res, 1, 1)\n if do_presolve:\n assert_equal(res.nit, 0)\n\n res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)],\n method=self.method, options=self.options)\n _assert_success(res, 12, [5, -1, 3])\n if do_presolve:\n assert_equal(res.nit, 0)\n\n res = linprog([1, 1], bounds=[(1, 1), (1, 3)],\n method=self.method, options=self.options)\n _assert_success(res, 2, [1, 1])\n if do_presolve:\n assert_equal(res.nit, 0)\n\n res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7],\n bounds=[(-5, 5), (0, 10), (3.5, 3.5)],\n method=self.method, options=self.options)\n _assert_success(res, 15, [1, 7, 3.5])\n if do_presolve:\n assert_equal(res.nit, 0)\n\n def test_bounds_infeasible(self):\n\n # Test ill-valued bounds (upper less than lower)\n # If presolve option True, test if solution found in presolve (i.e.\n # number of iterations is 0).\n do_presolve = self.options.get('presolve', True)\n\n res = linprog([1], bounds=(1, -2), method=self.method, options=self.options)\n _assert_infeasible(res)\n if do_presolve:\n assert_equal(res.nit, 0)\n\n res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options)\n _assert_infeasible(res)\n if do_presolve:\n assert_equal(res.nit, 0)\n\n res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options)\n _assert_infeasible(res)\n if do_presolve:\n assert_equal(res.nit, 0)\n\n def test_bounds_infeasible_2(self):\n\n # Test ill-valued bounds (lower inf, upper -inf)\n # If presolve option True, test if solution found in presolve (i.e.\n # number of iterations is 0).\n # For the simplex method, the cases do not result in an\n # infeasible status, but in a RuntimeWarning. This is a\n # consequence of having _presolve() take care of feasibility\n # checks. See issue gh-11618.\n do_presolve = self.options.get('presolve', True)\n simplex_without_presolve = not do_presolve and self.method == 'simplex'\n\n c = [1, 2, 3]\n bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)]\n bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)]\n\n if simplex_without_presolve:\n def g(c, bounds):\n res = linprog(c, bounds=bounds, method=self.method, options=self.options)\n return res\n\n with pytest.warns(RuntimeWarning):\n with pytest.raises(IndexError):\n g(c, bounds=bounds_1)\n\n with pytest.warns(RuntimeWarning):\n with pytest.raises(IndexError):\n g(c, bounds=bounds_2)\n else:\n res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options)\n _assert_infeasible(res)\n if do_presolve:\n assert_equal(res.nit, 0)\n res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options)\n _assert_infeasible(res)\n if do_presolve:\n assert_equal(res.nit, 0)\n\n def test_empty_constraint_1(self):\n c = [-1, -2]\n res = linprog(c, method=self.method, options=self.options)\n _assert_unbounded(res)\n\n def test_empty_constraint_2(self):\n c = [-1, 1, -1, 1]\n bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]\n res = linprog(c, bounds=bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n # Unboundedness detected in presolve requires no iterations\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_empty_constraint_3(self):\n c = [1, -1, 1, -1]\n bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]\n res = linprog(c, bounds=bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)\n\n def test_inequality_constraints(self):\n # Minimize linear function subject to linear inequality constraints.\n # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf\n c = np.array([3, 2]) * -1 # maximize\n A_ub = [[2, 1],\n [1, 1],\n [1, 0]]\n b_ub = [10, 8, 4]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-18, desired_x=[2, 6])\n\n def test_inequality_constraints2(self):\n # Minimize linear function subject to linear inequality constraints.\n # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf\n # (dead link)\n c = [6, 3]\n A_ub = [[0, 3],\n [-1, -1],\n [-2, 1]]\n b_ub = [2, -1, -1]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])\n\n def test_bounds_simple(self):\n c = [1, 2]\n bounds = (1, 2)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[1, 1])\n\n bounds = [(1, 2), (1, 2)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[1, 1])\n\n def test_bounded_below_only_1(self):\n c = np.array([1.0])\n A_eq = np.array([[1.0]])\n b_eq = np.array([3.0])\n bounds = (1.0, None)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=3, desired_x=[3])\n\n def test_bounded_below_only_2(self):\n c = np.ones(3)\n A_eq = np.eye(3)\n b_eq = np.array([1, 2, 3])\n bounds = (0.5, np.inf)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))\n\n def test_bounded_above_only_1(self):\n c = np.array([1.0])\n A_eq = np.array([[1.0]])\n b_eq = np.array([3.0])\n bounds = (None, 10.0)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=3, desired_x=[3])\n\n def test_bounded_above_only_2(self):\n c = np.ones(3)\n A_eq = np.eye(3)\n b_eq = np.array([1, 2, 3])\n bounds = (-np.inf, 4)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))\n\n def test_bounds_infinity(self):\n c = np.ones(3)\n A_eq = np.eye(3)\n b_eq = np.array([1, 2, 3])\n bounds = (-np.inf, np.inf)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))\n\n def test_bounds_mixed(self):\n # Problem has one unbounded variable and\n # another with a negative lower bound.\n c = np.array([-1, 4]) * -1 # maximize\n A_ub = np.array([[-3, 1],\n [1, 2]], dtype=np.float64)\n b_ub = [6, 4]\n x0_bounds = (-np.inf, np.inf)\n x1_bounds = (-3, np.inf)\n bounds = (x0_bounds, x1_bounds)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])\n\n def test_bounds_equal_but_infeasible(self):\n c = [-4, 1]\n A_ub = [[7, -2], [0, 1], [2, -2]]\n b_ub = [14, 0, 3]\n bounds = [(2, 2), (0, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n def test_bounds_equal_but_infeasible2(self):\n c = [-4, 1]\n A_eq = [[7, -2], [0, 1], [2, -2]]\n b_eq = [14, 0, 3]\n bounds = [(2, 2), (0, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n def test_bounds_equal_no_presolve(self):\n # There was a bug when a lower and upper bound were equal but\n # presolve was not on to eliminate the variable. The bound\n # was being converted to an equality constraint, but the bound\n # was not eliminated, leading to issues in postprocessing.\n c = [1, 2]\n A_ub = [[1, 2], [1.1, 2.2]]\n b_ub = [4, 8]\n bounds = [(1, 2), (2, 2)]\n\n o = {key: self.options[key] for key in self.options}\n o[\"presolve\"] = False\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n _assert_infeasible(res)\n\n def test_zero_column_1(self):\n m, n = 3, 4\n np.random.seed(0)\n c = np.random.rand(n)\n c[1] = 1\n A_eq = np.random.rand(m, n)\n A_eq[:, 1] = 0\n b_eq = np.random.rand(m)\n A_ub = [[1, 0, 1, 1]]\n b_ub = 3\n bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-9.7087836730413404)\n\n def test_zero_column_2(self):\n np.random.seed(0)\n m, n = 2, 4\n c = np.random.rand(n)\n c[1] = -1\n A_eq = np.random.rand(m, n)\n A_eq[:, 1] = 0\n b_eq = np.random.rand(m)\n\n A_ub = np.random.rand(m, n)\n A_ub[:, 1] = 0\n b_ub = np.random.rand(m)\n bounds = (None, None)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n # Unboundedness detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_zero_row_1(self):\n c = [1, 2, 3]\n A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]\n b_eq = [0, 3, 0]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=3)\n\n def test_zero_row_2(self):\n A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]\n b_ub = [0, 3, 0]\n c = [1, 2, 3]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=0)\n\n def test_zero_row_3(self):\n m, n = 2, 4\n c = np.random.rand(n)\n A_eq = np.random.rand(m, n)\n A_eq[0, :] = 0\n b_eq = np.random.rand(m)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n # Infeasibility detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_zero_row_4(self):\n m, n = 2, 4\n c = np.random.rand(n)\n A_ub = np.random.rand(m, n)\n A_ub[0, :] = 0\n b_ub = -np.random.rand(m)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n # Infeasibility detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_singleton_row_eq_1(self):\n c = [1, 1, 1, 2]\n A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]\n b_eq = [1, 2, 2, 4]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n # Infeasibility detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_singleton_row_eq_2(self):\n c = [1, 1, 1, 2]\n A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]\n b_eq = [1, 2, 1, 4]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=4)\n\n def test_singleton_row_ub_1(self):\n c = [1, 1, 1, 2]\n A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]\n b_ub = [1, 2, -2, 4]\n bounds = [(None, None), (0, None), (0, None), (0, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n # Infeasibility detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_singleton_row_ub_2(self):\n c = [1, 1, 1, 2]\n A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]\n b_ub = [1, 2, -0.5, 4]\n bounds = [(None, None), (0, None), (0, None), (0, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=0.5)\n\n def test_infeasible(self):\n # Test linprog response to an infeasible problem\n c = [-1, -1]\n A_ub = [[1, 0],\n [0, 1],\n [-1, -1]]\n b_ub = [2, 2, -5]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n def test_infeasible_inequality_bounds(self):\n c = [1]\n A_ub = [[2]]\n b_ub = 4\n bounds = (5, 6)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n # Infeasibility detected in presolve\n if self.options.get('presolve', True):\n assert_equal(res.nit, 0)\n\n def test_unbounded(self):\n # Test linprog response to an unbounded problem\n c = np.array([1, 1]) * -1 # maximize\n A_ub = [[-1, 1],\n [-1, -1]]\n b_ub = [-1, -2]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n\n def test_unbounded_below_no_presolve_corrected(self):\n c = [1]\n bounds = [(None, 1)]\n\n o = {key: self.options[key] for key in self.options}\n o[\"presolve\"] = False\n\n res = linprog(c=c, bounds=bounds,\n method=self.method,\n options=o)\n if self.method == \"revised simplex\":\n # Revised simplex has a special pathway for no constraints.\n assert_equal(res.status, 5)\n else:\n _assert_unbounded(res)\n\n def test_unbounded_no_nontrivial_constraints_1(self):\n \"\"\"\n Test whether presolve pathway for detecting unboundedness after\n constraint elimination is working.\n \"\"\"\n c = np.array([0, 0, 0, 1, -1, -1])\n A_ub = np.array([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, -1]])\n b_ub = np.array([2, -2, 0])\n bounds = [(None, None), (None, None), (None, None),\n (-1, 1), (-1, 1), (0, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n if not self.method.lower().startswith(\"highs\"):\n assert_equal(res.x[-1], np.inf)\n assert_equal(res.message[:36],\n \"The problem is (trivially) unbounded\")\n\n def test_unbounded_no_nontrivial_constraints_2(self):\n \"\"\"\n Test whether presolve pathway for detecting unboundedness after\n constraint elimination is working.\n \"\"\"\n c = np.array([0, 0, 0, 1, -1, 1])\n A_ub = np.array([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1]])\n b_ub = np.array([2, -2, 0])\n bounds = [(None, None), (None, None), (None, None),\n (-1, 1), (-1, 1), (None, 0)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n if not self.method.lower().startswith(\"highs\"):\n assert_equal(res.x[-1], -np.inf)\n assert_equal(res.message[:36],\n \"The problem is (trivially) unbounded\")\n\n def test_cyclic_recovery(self):\n # Test linprogs recovery from cycling using the Klee-Minty problem\n # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf\n c = np.array([100, 10, 1]) * -1 # maximize\n A_ub = [[1, 0, 0],\n [20, 1, 0],\n [200, 20, 1]]\n b_ub = [1, 100, 10000]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)\n\n def test_cyclic_bland(self):\n # Test the effect of Bland's rule on a cycling problem\n c = np.array([-10, 57, 9, 24.])\n A_ub = np.array([[0.5, -5.5, -2.5, 9],\n [0.5, -1.5, -0.5, 1],\n [1, 0, 0, 0]])\n b_ub = [0, 0, 1]\n\n # copy the existing options dictionary but change maxiter\n maxiter = 100\n o = {key: val for key, val in self.options.items()}\n o['maxiter'] = maxiter\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n\n if self.method == 'simplex' and not self.options.get('bland'):\n # simplex cycles without Bland's rule\n _assert_iteration_limit_reached(res, o['maxiter'])\n else:\n # other methods, including simplex with Bland's rule, succeed\n _assert_success(res, desired_x=[1, 0, 1, 0])\n # note that revised simplex skips this test because it may or may not\n # cycle depending on the initial basis\n\n def test_remove_redundancy_infeasibility(self):\n # mostly a test of redundancy removal, which is carefully tested in\n # test__remove_redundancy.py\n m, n = 10, 10\n c = np.random.rand(n)\n A_eq = np.random.rand(m, n)\n b_eq = np.random.rand(m)\n A_eq[-1, :] = 2 * A_eq[-2, :]\n b_eq[-1] *= -1\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n #################\n # General Tests #\n #################\n\n def test_nontrivial_problem(self):\n # Problem involves all constraint types,\n # negative resource limits, and rounding issues.\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n\n def test_lpgen_problem(self):\n # Test linprog with a rather large problem (400 variables,\n # 40 constraints) generated by https://gist.github.com/denis-bz/8647461\n A_ub, b_ub, c = lpgen_2d(20, 20)\n\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"Solving system with option 'sym_pos'\")\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-64.049494229)\n\n def test_network_flow(self):\n # A network flow problem with supply and demand at nodes\n # and with costs along directed edges.\n # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf\n c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]\n n, p = -1, 1\n A_eq = [\n [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],\n [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],\n [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],\n [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]\n b_eq = [0, 19, -16, 33, 0, 0, -36]\n with suppress_warnings() as sup:\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)\n\n def test_network_flow_limited_capacity(self):\n # A network flow problem with supply and demand at nodes\n # and with costs and capacities along directed edges.\n # http://blog.sommer-forst.de/2013/04/10/\n c = [2, 2, 1, 3, 1]\n bounds = [\n [0, 4],\n [0, 2],\n [0, 2],\n [0, 3],\n [0, 5]]\n n, p = -1, 1\n A_eq = [\n [n, n, 0, 0, 0],\n [p, 0, n, n, 0],\n [0, p, p, 0, n],\n [0, 0, 0, p, p]]\n b_eq = [-4, 0, 0, 4]\n\n with suppress_warnings() as sup:\n # this is an UmfpackWarning but I had trouble importing it\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(RuntimeWarning, \"scipy.linalg.solve\\nIll...\")\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=14)\n\n def test_simplex_algorithm_wikipedia_example(self):\n # https://en.wikipedia.org/wiki/Simplex_algorithm#Example\n c = [-2, -3, -4]\n A_ub = [\n [3, 2, 1],\n [2, 5, 3]]\n b_ub = [10, 15]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-20)\n\n def test_enzo_example(self):\n # https://github.com/scipy/scipy/issues/1779 lp2.py\n #\n # Translated from Octave code at:\n # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm\n # and placed under MIT licence by Enzo Michelangeli\n # with permission explicitly granted by the original author,\n # Prof. Kazunobu Yoshida\n c = [4, 8, 3, 0, 0, 0]\n A_eq = [\n [2, 5, 3, -1, 0, 0],\n [3, 2.5, 8, 0, -1, 0],\n [8, 10, 4, 0, 0, -1]]\n b_eq = [185, 155, 600]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=317.5,\n desired_x=[66.25, 0, 17.5, 0, 183.75, 0],\n atol=6e-6, rtol=1e-7)\n\n def test_enzo_example_b(self):\n # rescued from https://github.com/scipy/scipy/pull/218\n c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]\n A_eq = [[-1, -1, -1, 0, 0, 0],\n [0, 0, 0, 1, 1, 1],\n [1, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 1]]\n b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]\n\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-1.77,\n desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])\n\n def test_enzo_example_c_with_degeneracy(self):\n # rescued from https://github.com/scipy/scipy/pull/218\n m = 20\n c = -np.ones(m)\n tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)\n A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))\n b_eq = [0, 0]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=0, desired_x=np.zeros(m))\n\n def test_enzo_example_c_with_unboundedness(self):\n # rescued from https://github.com/scipy/scipy/pull/218\n m = 50\n c = -np.ones(m)\n tmp = 2 * np.pi * np.arange(m) / (m + 1)\n A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))\n b_eq = [0, 0]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_unbounded(res)\n\n def test_enzo_example_c_with_infeasibility(self):\n # rescued from https://github.com/scipy/scipy/pull/218\n m = 50\n c = -np.ones(m)\n tmp = 2 * np.pi * np.arange(m) / (m + 1)\n A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))\n b_eq = [1, 1]\n\n o = {key: self.options[key] for key in self.options}\n o[\"presolve\"] = False\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n _assert_infeasible(res)\n\n def test_basic_artificial_vars(self):\n # Problem is chosen to test two phase simplex methods when at the end\n # of phase 1 some artificial variables remain in the basis.\n # Also, for `method='simplex'`, the row in the tableau corresponding\n # with the artificial variables is not all zero.\n c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])\n A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],\n [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],\n [1.0, 1.0, 0, 0, 0, 0]])\n b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])\n A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])\n b_eq = np.array([0, 0])\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),\n atol=2e-6)\n\n def test_optimize_result(self):\n # check all fields in OptimizeResult\n c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0)\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,\n bounds=bounds, method=self.method, options=self.options)\n assert_(res.success)\n assert_(res.nit)\n assert_(not res.status)\n assert_(res.message == \"Optimization terminated successfully.\")\n assert_allclose(c @ res.x, res.fun)\n assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11)\n assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11)\n\n #################\n # Bug Fix Tests #\n #################\n\n def test_bug_5400(self):\n # https://github.com/scipy/scipy/issues/5400\n bounds = [\n (0, None),\n (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),\n (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),\n (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]\n\n f = 1 / 9\n g = -1e4\n h = -3.1\n A_ub = np.array([\n [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],\n [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],\n [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],\n [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],\n [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],\n [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],\n [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])\n\n b_ub = np.array([\n 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,\n 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 0, 0, 0, 0, 0, 0])\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning,\n \"Solving system with option 'sym_pos'\")\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=-106.63507541835018)\n\n def test_bug_6139(self):\n # linprog(method='simplex') fails to find a basic feasible solution\n # if phase 1 pseudo-objective function is outside the provided tol.\n # https://github.com/scipy/scipy/issues/6139\n\n # Note: This is not strictly a bug as the default tolerance determines\n # if a result is \"close enough\" to zero and should not be expected\n # to work for all cases.\n\n c = np.array([1, 1, 1])\n A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])\n b_eq = np.array([5.00000000e+00, -1.00000000e+04])\n A_ub = -np.array([[0., 1000000., 1010000.]])\n b_ub = -np.array([10000000.])\n bounds = (None, None)\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n _assert_success(res, desired_fun=14.95,\n desired_x=np.array([5, 4.95, 5]))\n\n def test_bug_6690(self):\n # linprog simplex used to violate bound constraint despite reporting\n # success.\n # https://github.com/scipy/scipy/issues/6690\n\n A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])\n b_eq = np.array([0.9626])\n A_ub = np.array([\n [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],\n [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],\n [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]\n ])\n b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])\n bounds = np.array([\n [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],\n [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]\n ]).T\n c = np.array([\n -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28\n ])\n\n with suppress_warnings() as sup:\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(OptimizeWarning,\n \"Solving system with option 'cholesky'\")\n sup.filter(OptimizeWarning, \"Solving system with option 'sym_pos'\")\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n desired_fun = -1.19099999999\n desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,\n 0.5000, 0.4700, 0.0900, 0.3200, -0.7300])\n _assert_success(res, desired_fun=desired_fun, desired_x=desired_x)\n\n # Add small tol value to ensure arrays are less than or equal.\n atol = 1e-6\n assert_array_less(bounds[:, 0] - atol, res.x)\n assert_array_less(res.x, bounds[:, 1] + atol)\n\n def test_bug_7044(self):\n # linprog simplex failed to \"identify correct constraints\" (?)\n # leading to a non-optimal solution if A is rank-deficient.\n # https://github.com/scipy/scipy/issues/7044\n\n A_eq, b_eq, c, N = magic_square(3)\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n desired_fun = 1.730550597\n _assert_success(res, desired_fun=desired_fun)\n assert_allclose(A_eq.dot(res.x), b_eq)\n assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)\n\n def test_bug_7237(self):\n # https://github.com/scipy/scipy/issues/7237\n # linprog simplex \"explodes\" when the pivot value is very\n # close to zero.\n\n c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])\n A_ub = np.array([\n [1., -724., 911., -551., -555., -896., 478., -80., -293.],\n [1., 566., 42., 937., 233., 883., 392., -909., 57.],\n [1., -208., -894., 539., 321., 532., -924., 942., 55.],\n [1., 857., -859., 83., 462., -265., -971., 826., 482.],\n [1., 314., -424., 245., -424., 194., -443., -104., -429.],\n [1., 540., 679., 361., 149., -827., 876., 633., 302.],\n [0., -1., -0., -0., -0., -0., -0., -0., -0.],\n [0., -0., -1., -0., -0., -0., -0., -0., -0.],\n [0., -0., -0., -1., -0., -0., -0., -0., -0.],\n [0., -0., -0., -0., -1., -0., -0., -0., -0.],\n [0., -0., -0., -0., -0., -1., -0., -0., -0.],\n [0., -0., -0., -0., -0., -0., -1., -0., -0.],\n [0., -0., -0., -0., -0., -0., -0., -1., -0.],\n [0., -0., -0., -0., -0., -0., -0., -0., -1.],\n [0., 1., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 1.]\n ])\n b_ub = np.array([\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])\n A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])\n b_eq = np.array([[1.]])\n bounds = [(None, None)] * 9\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=108.568535, atol=1e-6)\n\n def test_bug_8174(self):\n # https://github.com/scipy/scipy/issues/8174\n # The simplex method sometimes \"explodes\" if the pivot value is very\n # close to zero.\n A_ub = np.array([\n [22714, 1008, 13380, -2713.5, -1116],\n [-4986, -1092, -31220, 17386.5, 684],\n [-4986, 0, 0, -2713.5, 0],\n [22714, 0, 0, 17386.5, 0]])\n b_ub = np.zeros(A_ub.shape[0])\n c = -np.ones(A_ub.shape[1])\n bounds = [(0, 1)] * A_ub.shape[1]\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':\n _assert_unable_to_find_basic_feasible_sol(res)\n else:\n _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)\n\n def test_bug_8174_2(self):\n # Test supplementary example from issue 8174.\n # https://github.com/scipy/scipy/issues/8174\n # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution\n c = np.array([1, 0, 0, 0, 0, 0, 0])\n A_ub = -np.identity(7)\n b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])\n A_eq = np.array([\n [1, 1, 1, 1, 1, 1, 0],\n [0.3, 1.3, 0.9, 0, 0, 0, -1],\n [0.3, 0, 0, 0, 0, 0, -2/3],\n [0, 0.65, 0, 0, 0, 0, -1/15],\n [0, 0, 0.3, 0, 0, 0, -1/15]\n ])\n b_eq = np.array([[100], [0], [0], [0], [0]])\n\n with suppress_warnings() as sup:\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_fun=43.3333333331385)\n\n def test_bug_8561(self):\n # Test that pivot row is chosen correctly when using Bland's rule\n # This was originally written for the simplex method with\n # Bland's rule only, but it doesn't hurt to test all methods/options\n # https://github.com/scipy/scipy/issues/8561\n c = np.array([7, 0, -4, 1.5, 1.5])\n A_ub = np.array([\n [4, 5.5, 1.5, 1.0, -3.5],\n [1, -2.5, -2, 2.5, 0.5],\n [3, -0.5, 4, -12.5, -7],\n [-1, 4.5, 2, -3.5, -2],\n [5.5, 2, -4.5, -1, 9.5]])\n b_ub = np.array([0, 0, 0, 0, 1])\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,\n method=self.method)\n _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])\n\n def test_bug_8662(self):\n # linprog simplex used to report incorrect optimal results\n # https://github.com/scipy/scipy/issues/8662\n c = [-10, 10, 6, 3]\n A_ub = [[8, -8, -4, 6],\n [-8, 8, 4, -6],\n [-4, 4, 8, -4],\n [3, -3, -3, -10]]\n b_ub = [9, -9, -9, -4]\n bounds = [(0, None), (0, None), (0, None), (0, None)]\n desired_fun = 36.0000000000\n\n with suppress_warnings() as sup:\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n\n # Set boundary condition as a constraint\n A_ub.append([0, 0, -1, 0])\n b_ub.append(0)\n bounds[2] = (None, None)\n\n with suppress_warnings() as sup:\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n rtol = 1e-5\n _assert_success(res1, desired_fun=desired_fun, rtol=rtol)\n _assert_success(res2, desired_fun=desired_fun, rtol=rtol)\n\n def test_bug_8663(self):\n # exposed a bug in presolve\n # https://github.com/scipy/scipy/issues/8663\n c = [1, 5]\n A_eq = [[0, -7]]\n b_eq = [-6]\n bounds = [(0, None), (None, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)\n\n def test_bug_8664(self):\n # interior-point has trouble with this when presolve is off\n # tested for interior-point with presolve off in TestLinprogIPSpecific\n # https://github.com/scipy/scipy/issues/8664\n c = [4]\n A_ub = [[2], [5]]\n b_ub = [4, 4]\n A_eq = [[0], [-8], [9]]\n b_eq = [3, 2, 10]\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_infeasible(res)\n\n def test_bug_8973(self):\n \"\"\"\n Test whether bug described at:\n https://github.com/scipy/scipy/issues/8973\n was fixed.\n \"\"\"\n c = np.array([0, 0, 0, 1, -1])\n A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])\n b_ub = np.array([2, -2])\n bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n # solution vector x is not unique\n _assert_success(res, desired_fun=-2)\n # HiGHS IPM had an issue where the following wasn't true!\n assert_equal(c @ res.x, res.fun)\n\n def test_bug_8973_2(self):\n \"\"\"\n Additional test for:\n https://github.com/scipy/scipy/issues/8973\n suggested in\n https://github.com/scipy/scipy/pull/8985\n review by @antonior92\n \"\"\"\n c = np.zeros(1)\n A_ub = np.array([[1]])\n b_ub = np.array([-2])\n bounds = (None, None)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[-2], desired_fun=0)\n\n def test_bug_10124(self):\n \"\"\"\n Test for linprog docstring problem\n 'disp'=True caused revised simplex failure\n \"\"\"\n c = np.zeros(1)\n A_ub = np.array([[1]])\n b_ub = np.array([-2])\n bounds = (None, None)\n c = [-1, 4]\n A_ub = [[-3, 1], [1, 2]]\n b_ub = [6, 4]\n bounds = [(None, None), (-3, None)]\n o = {\"disp\": True}\n o.update(self.options)\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n _assert_success(res, desired_x=[10, -3], desired_fun=-22)\n\n def test_bug_10349(self):\n \"\"\"\n Test for redundancy removal tolerance issue\n https://github.com/scipy/scipy/issues/10349\n \"\"\"\n A_eq = np.array([[1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 1]])\n b_eq = np.array([221, 210, 10, 141, 198, 102])\n c = np.concatenate((0, 1, np.zeros(4)), axis=None)\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options)\n _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)\n\n def test_bug_10466(self):\n \"\"\"\n Test that autoscale fixes poorly-scaled problem\n \"\"\"\n c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]\n A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],\n [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],\n [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]\n\n b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,\n 1.00663296e+09, 1.07374182e+09, 1.07374182e+09,\n 1.07374182e+09, 1.07374182e+09, 1.07374182e+09,\n 1.07374182e+09]\n\n o = {}\n # HiGHS methods don't use autoscale option\n if not self.method.startswith(\"highs\"):\n o = {\"autoscale\": True}\n o.update(self.options)\n\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(RuntimeWarning, \"scipy.linalg.solve\\nIll...\")\n sup.filter(RuntimeWarning, \"divide by zero encountered...\")\n sup.filter(RuntimeWarning, \"overflow encountered...\")\n sup.filter(RuntimeWarning, \"invalid value encountered...\")\n sup.filter(LinAlgWarning, \"Ill-conditioned matrix...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n assert_allclose(res.fun, -8589934560)\n\n#########################\n# Method-specific Tests #\n#########################\n\n\nclass LinprogSimplexTests(LinprogCommonTests):\n method = \"simplex\"\n\n\nclass LinprogIPTests(LinprogCommonTests):\n method = \"interior-point\"\n\n\nclass LinprogRSTests(LinprogCommonTests):\n method = \"revised simplex\"\n\n # Revised simplex does not reliably solve these problems.\n # Failure is intermittent due to the random choice of elements to complete\n # the basis after phase 1 terminates. In any case, linprog exists\n # gracefully, reporting numerical difficulties. I do not think this should\n # prevent revised simplex from being merged, as it solves the problems\n # most of the time and solves a broader range of problems than the existing\n # simplex implementation.\n # I believe that the root cause is the same for all three and that this\n # same issue prevents revised simplex from solving many other problems\n # reliably. Somehow the pivoting rule allows the algorithm to pivot into\n # a singular basis. I haven't been able to find a reference that\n # acknowledges this possibility, suggesting that there is a bug. On the\n # other hand, the pivoting rule is quite simple, and I can't find a\n # mistake, which suggests that this is a possibility with the pivoting\n # rule. Hopefully, a better pivoting rule will fix the issue.\n\n def test_bug_5400(self):\n pytest.skip(\"Intermittent failure acceptable.\")\n\n def test_bug_8662(self):\n pytest.skip(\"Intermittent failure acceptable.\")\n\n def test_network_flow(self):\n pytest.skip(\"Intermittent failure acceptable.\")\n\n\nclass LinprogHiGHSTests(LinprogCommonTests):\n def test_callback(self):\n # this is the problem from test_callback\n cb = lambda res: None\n c = np.array([-3, -2])\n A_ub = [[2, 1], [1, 1], [1, 0]]\n b_ub = [10, 8, 4]\n assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub,\n callback=cb, method=self.method)\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method)\n _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])\n\n @pytest.mark.parametrize(\"options\",\n [{\"maxiter\": -1},\n {\"disp\": -1},\n {\"presolve\": -1},\n {\"time_limit\": -1},\n {\"dual_feasibility_tolerance\": -1},\n {\"primal_feasibility_tolerance\": -1},\n {\"ipm_optimality_tolerance\": -1},\n {\"simplex_dual_edge_weight_strategy\": \"ekki\"},\n ])\n def test_invalid_option_values(self, options):\n def f(options):\n linprog(1, method=self.method, options=options)\n options.update(self.options)\n assert_warns(OptimizeWarning, f, options=options)\n\n def test_crossover(self):\n c = np.array([1, 1]) * -1 # maximize\n A_ub = np.array([[1, 1]])\n b_ub = [1]\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,\n bounds=bounds, method=self.method, options=self.options)\n # there should be nonzero crossover iterations for IPM (only)\n assert_equal(res.crossover_nit == 0, self.method != \"highs-ipm\")\n\n\n################################\n# Simplex Option-Specific Tests#\n################################\n\n\nclass TestLinprogSimplexDefault(LinprogSimplexTests):\n\n def setup_method(self):\n self.options = {}\n\n def test_bug_5400(self):\n pytest.skip(\"Simplex fails on this problem.\")\n\n def test_bug_7237_low_tol(self):\n # Fails if the tolerance is too strict. Here, we test that\n # even if the solution is wrong, the appropriate error is raised.\n pytest.skip(\"Simplex fails on this problem.\")\n\n def test_bug_8174_low_tol(self):\n # Fails if the tolerance is too strict. Here, we test that\n # even if the solution is wrong, the appropriate warning is issued.\n self.options.update({'tol': 1e-12})\n with pytest.warns(OptimizeWarning):\n super(TestLinprogSimplexDefault, self).test_bug_8174()\n\n\nclass TestLinprogSimplexBland(LinprogSimplexTests):\n\n def setup_method(self):\n self.options = {'bland': True}\n\n def test_bug_5400(self):\n pytest.skip(\"Simplex fails on this problem.\")\n\n def test_bug_8174_low_tol(self):\n # Fails if the tolerance is too strict. Here, we test that\n # even if the solution is wrong, the appropriate error is raised.\n self.options.update({'tol': 1e-12})\n with pytest.raises(AssertionError):\n with pytest.warns(OptimizeWarning):\n super(TestLinprogSimplexBland, self).test_bug_8174()\n\n\nclass TestLinprogSimplexNoPresolve(LinprogSimplexTests):\n\n def setup_method(self):\n self.options = {'presolve': False}\n\n is_32_bit = np.intp(0).itemsize < 8\n is_linux = sys.platform.startswith('linux')\n\n @pytest.mark.xfail(\n condition=is_32_bit and is_linux,\n reason='Fails with warning on 32-bit linux')\n def test_bug_5400(self):\n super(TestLinprogSimplexNoPresolve, self).test_bug_5400()\n\n def test_bug_6139_low_tol(self):\n # Linprog(method='simplex') fails to find a basic feasible solution\n # if phase 1 pseudo-objective function is outside the provided tol.\n # https://github.com/scipy/scipy/issues/6139\n # Without ``presolve`` eliminating such rows the result is incorrect.\n self.options.update({'tol': 1e-12})\n with pytest.raises(AssertionError, match='linprog status 4'):\n return super(TestLinprogSimplexNoPresolve, self).test_bug_6139()\n\n def test_bug_7237_low_tol(self):\n pytest.skip(\"Simplex fails on this problem.\")\n\n def test_bug_8174_low_tol(self):\n # Fails if the tolerance is too strict. Here, we test that\n # even if the solution is wrong, the appropriate warning is issued.\n self.options.update({'tol': 1e-12})\n with pytest.warns(OptimizeWarning):\n super(TestLinprogSimplexNoPresolve, self).test_bug_8174()\n\n def test_unbounded_no_nontrivial_constraints_1(self):\n pytest.skip(\"Tests behavior specific to presolve\")\n\n def test_unbounded_no_nontrivial_constraints_2(self):\n pytest.skip(\"Tests behavior specific to presolve\")\n\n\n#######################################\n# Interior-Point Option-Specific Tests#\n#######################################\n\n\nclass TestLinprogIPDense(LinprogIPTests):\n options = {\"sparse\": False}\n\n\nif has_cholmod:\n class TestLinprogIPSparseCholmod(LinprogIPTests):\n options = {\"sparse\": True, \"cholesky\": True}\n\n\nif has_umfpack:\n class TestLinprogIPSparseUmfpack(LinprogIPTests):\n options = {\"sparse\": True, \"cholesky\": False}\n\n def test_bug_10466(self):\n pytest.skip(\"Autoscale doesn't fix everything, and that's OK.\")\n\n\nclass TestLinprogIPSparse(LinprogIPTests):\n options = {\"sparse\": True, \"cholesky\": False, \"sym_pos\": False}\n\n @pytest.mark.xfail_on_32bit(\"This test is sensitive to machine epsilon level \"\n \"perturbations in linear system solution in \"\n \"_linprog_ip._sym_solve.\")\n def test_bug_6139(self):\n super(TestLinprogIPSparse, self).test_bug_6139()\n\n @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')\n def test_bug_6690(self):\n # Test defined in base class, but can't mark as xfail there\n super(TestLinprogIPSparse, self).test_bug_6690()\n\n def test_magic_square_sparse_no_presolve(self):\n # test linprog with a problem with a rank-deficient A_eq matrix\n A_eq, b_eq, c, N = magic_square(3)\n bounds = (0, 1)\n\n with suppress_warnings() as sup:\n if has_umfpack:\n sup.filter(UmfpackWarning)\n sup.filter(MatrixRankWarning, \"Matrix is exactly singular\")\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n\n o = {key: self.options[key] for key in self.options}\n o[\"presolve\"] = False\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n _assert_success(res, desired_fun=1.730550597)\n\n def test_sparse_solve_options(self):\n # checking that problem is solved with all column permutation options\n A_eq, b_eq, c, N = magic_square(3)\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n sup.filter(OptimizeWarning, \"Invalid permc_spec option\")\n o = {key: self.options[key] for key in self.options}\n permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',\n 'COLAMD', 'ekki-ekki-ekki')\n # 'ekki-ekki-ekki' raises warning about invalid permc_spec option\n # and uses default\n for permc_spec in permc_specs:\n o[\"permc_spec\"] = permc_spec\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=o)\n _assert_success(res, desired_fun=1.730550597)\n\n\nclass TestLinprogIPSparsePresolve(LinprogIPTests):\n options = {\"sparse\": True, \"_sparse_presolve\": True}\n\n @pytest.mark.xfail_on_32bit(\"This test is sensitive to machine epsilon level \"\n \"perturbations in linear system solution in \"\n \"_linprog_ip._sym_solve.\")\n def test_bug_6139(self):\n super(TestLinprogIPSparsePresolve, self).test_bug_6139()\n\n def test_enzo_example_c_with_infeasibility(self):\n pytest.skip('_sparse_presolve=True incompatible with presolve=False')\n\n @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')\n def test_bug_6690(self):\n # Test defined in base class, but can't mark as xfail there\n super(TestLinprogIPSparsePresolve, self).test_bug_6690()\n\n\nclass TestLinprogIPSpecific:\n method = \"interior-point\"\n # the following tests don't need to be performed separately for\n # sparse presolve, sparse after presolve, and dense\n\n def test_solver_select(self):\n # check that default solver is selected as expected\n if has_cholmod:\n options = {'sparse': True, 'cholesky': True}\n elif has_umfpack:\n options = {'sparse': True, 'cholesky': False}\n else:\n options = {'sparse': True, 'cholesky': False, 'sym_pos': False}\n A, b, c = lpgen_2d(20, 20)\n res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options)\n res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver\n assert_allclose(res1.fun, res2.fun,\n err_msg=\"linprog default solver unexpected result\",\n rtol=1e-15, atol=1e-15)\n\n def test_unbounded_below_no_presolve_original(self):\n # formerly caused segfault in TravisCI w/ \"cholesky\":True\n c = [-1]\n bounds = [(None, 1)]\n res = linprog(c=c, bounds=bounds,\n method=self.method,\n options={\"presolve\": False, \"cholesky\": True})\n _assert_success(res, desired_fun=-1)\n\n def test_cholesky(self):\n # use cholesky factorization and triangular solves\n A, b, c = lpgen_2d(20, 20)\n res = linprog(c, A_ub=A, b_ub=b, method=self.method,\n options={\"cholesky\": True}) # only for dense\n _assert_success(res, desired_fun=-64.049494229)\n\n def test_alternate_initial_point(self):\n # use \"improved\" initial point\n A, b, c = lpgen_2d(20, 20)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"scipy.linalg.solve\\nIll...\")\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n sup.filter(LinAlgWarning, \"Ill-conditioned matrix...\")\n res = linprog(c, A_ub=A, b_ub=b, method=self.method,\n options={\"ip\": True, \"disp\": True})\n # ip code is independent of sparse/dense\n _assert_success(res, desired_fun=-64.049494229)\n\n def test_bug_8664(self):\n # interior-point has trouble with this when presolve is off\n c = [4]\n A_ub = [[2], [5]]\n b_ub = [4, 4]\n A_eq = [[0], [-8], [9]]\n b_eq = [3, 2, 10]\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n sup.filter(OptimizeWarning, \"Solving system with option...\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options={\"presolve\": False})\n assert_(not res.success, \"Incorrectly reported success\")\n\n\n########################################\n# Revised Simplex Option-Specific Tests#\n########################################\n\n\nclass TestLinprogRSCommon(LinprogRSTests):\n options = {}\n\n def test_cyclic_bland(self):\n pytest.skip(\"Intermittent failure acceptable.\")\n\n def test_nontrivial_problem_with_guess(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=x_star)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n assert_equal(res.nit, 0)\n\n def test_nontrivial_problem_with_unbounded_variables(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n bounds = [(None, None), (None, None), (0, None), (None, None)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=x_star)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n assert_equal(res.nit, 0)\n\n def test_nontrivial_problem_with_bounded_variables(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n bounds = [(None, 1), (1, None), (0, None), (.4, .6)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=x_star)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n assert_equal(res.nit, 0)\n\n def test_nontrivial_problem_with_negative_unbounded_variable(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n b_eq = [4]\n x_star = np.array([-219/385, 582/385, 0, 4/10])\n f_star = 3951/385\n bounds = [(None, None), (1, None), (0, None), (.4, .6)]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=x_star)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n assert_equal(res.nit, 0)\n\n def test_nontrivial_problem_with_bad_guess(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n bad_guess = [1, 2, 3, .5]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=bad_guess)\n assert_equal(res.status, 6)\n\n def test_redundant_constraints_with_guess(self):\n A, b, c, N = magic_square(3)\n p = np.random.rand(*c.shape)\n with suppress_warnings() as sup:\n sup.filter(OptimizeWarning, \"A_eq does not appear...\")\n sup.filter(RuntimeWarning, \"invalid value encountered\")\n sup.filter(LinAlgWarning)\n res = linprog(c, A_eq=A, b_eq=b, method=self.method)\n res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)\n res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)\n _assert_success(res2, desired_fun=1.730550597)\n assert_equal(res2.nit, 0)\n _assert_success(res3)\n assert_(res3.nit < res.nit) # hot start reduces iterations\n\n\nclass TestLinprogRSBland(LinprogRSTests):\n options = {\"pivot\": \"bland\"}\n\n\n############################################\n# HiGHS-Simplex-Dual Option-Specific Tests #\n############################################\n\n\nclass TestLinprogHiGHSSimplexDual(LinprogHiGHSTests):\n method = \"highs-ds\"\n options = {}\n\n def test_lad_regression(self):\n '''The scaled model should be optimal but unscaled model infeasible.'''\n c, A_ub, b_ub, bnds = l1_regression_prob()\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds,\n method=self.method, options=self.options)\n assert_equal(res.status, 4)\n assert_('An optimal solution to the scaled '\n 'model was found but' in res.message)\n assert_(res.x is not None)\n assert_(np.all(res.slack > -1e-6))\n assert_(np.all(res.x <= [np.inf if u is None else u for l, u in bnds]))\n assert_(np.all(res.x >= [-np.inf if l is None else l for l, u in bnds]))\n\n\n###################################\n# HiGHS-IPM Option-Specific Tests #\n###################################\n\n\nclass TestLinprogHiGHSIPM(LinprogHiGHSTests):\n method = \"highs-ipm\"\n options = {}\n\n\n###########################\n# Autoscale-Specific Tests#\n###########################\n\n\nclass AutoscaleTests:\n options = {\"autoscale\": True}\n\n test_bug_6139 = LinprogCommonTests.test_bug_6139\n test_bug_6690 = LinprogCommonTests.test_bug_6690\n test_bug_7237 = LinprogCommonTests.test_bug_7237\n\n\nclass TestAutoscaleIP(AutoscaleTests):\n method = \"interior-point\"\n\n def test_bug_6139(self):\n self.options['tol'] = 1e-10\n return AutoscaleTests.test_bug_6139(self)\n\n\nclass TestAutoscaleSimplex(AutoscaleTests):\n method = \"simplex\"\n\n\nclass TestAutoscaleRS(AutoscaleTests):\n method = \"revised simplex\"\n\n def test_nontrivial_problem_with_guess(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=x_star)\n _assert_success(res, desired_fun=f_star, desired_x=x_star)\n assert_equal(res.nit, 0)\n\n def test_nontrivial_problem_with_bad_guess(self):\n c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()\n bad_guess = [1, 2, 3, .5]\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,\n method=self.method, options=self.options, x0=bad_guess)\n assert_equal(res.status, 6)\n\n\n###########################\n# Redundancy Removal Tests#\n###########################\n\n\nclass RRTests:\n method = \"interior-point\"\n LCT = LinprogCommonTests\n # these are a few of the existing tests that have redundancy\n test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility\n test_bug_10349 = LCT.test_bug_10349\n test_bug_7044 = LCT.test_bug_7044\n test_NFLC = LCT.test_network_flow_limited_capacity\n test_enzo_example_b = LCT.test_enzo_example_b\n\n\nclass TestRRSVD(RRTests):\n options = {\"rr_method\": \"SVD\"}\n\n\nclass TestRRPivot(RRTests):\n options = {\"rr_method\": \"pivot\"}\n\n\nclass TestRRID(RRTests):\n options = {\"rr_method\": \"ID\"}\n",
"import numpy as np\nfrom .base import OdeSolver, DenseOutput\nfrom .common import (validate_max_step, validate_tol, select_initial_step,\n norm, warn_extraneous, validate_first_step)\nfrom . import dop853_coefficients\n\n# Multiply steps computed from asymptotic behaviour of errors by this.\nSAFETY = 0.9\n\nMIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.\nMAX_FACTOR = 10 # Maximum allowed increase in a step size.\n\n\ndef rk_step(fun, t, y, f, h, A, B, C, K):\n \"\"\"Perform a single Runge-Kutta step.\n\n This function computes a prediction of an explicit Runge-Kutta method and\n also estimates the error of a less accurate method.\n\n Notation for Butcher tableau is as in [1]_.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system.\n t : float\n Current time.\n y : ndarray, shape (n,)\n Current state.\n f : ndarray, shape (n,)\n Current value of the derivative, i.e., ``fun(x, y)``.\n h : float\n Step to use.\n A : ndarray, shape (n_stages, n_stages)\n Coefficients for combining previous RK stages to compute the next\n stage. For explicit methods the coefficients at and above the main\n diagonal are zeros.\n B : ndarray, shape (n_stages,)\n Coefficients for combining RK stages for computing the final\n prediction.\n C : ndarray, shape (n_stages,)\n Coefficients for incrementing time for consecutive RK stages.\n The value for the first stage is always zero.\n K : ndarray, shape (n_stages + 1, n)\n Storage array for putting RK stages here. Stages are stored in rows.\n The last row is a linear combination of the previous rows with\n coefficients\n\n Returns\n -------\n y_new : ndarray, shape (n,)\n Solution at t + h computed with a higher accuracy.\n f_new : ndarray, shape (n,)\n Derivative ``fun(t + h, y_new)``.\n\n References\n ----------\n .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential\n Equations I: Nonstiff Problems\", Sec. II.4.\n \"\"\"\n K[0] = f\n for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):\n dy = np.dot(K[:s].T, a[:s]) * h\n K[s] = fun(t + c * h, y + dy)\n\n y_new = y + h * np.dot(K[:-1].T, B)\n f_new = fun(t + h, y_new)\n\n K[-1] = f_new\n\n return y_new, f_new\n\n\nclass RungeKutta(OdeSolver):\n \"\"\"Base class for explicit Runge-Kutta methods.\"\"\"\n C: np.ndarray = NotImplemented\n A: np.ndarray = NotImplemented\n B: np.ndarray = NotImplemented\n E: np.ndarray = NotImplemented\n P: np.ndarray = NotImplemented\n order: int = NotImplemented\n error_estimator_order: int = NotImplemented\n n_stages: int = NotImplemented\n\n def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,\n rtol=1e-3, atol=1e-6, vectorized=False,\n first_step=None, **extraneous):\n warn_extraneous(extraneous)\n super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,\n support_complex=True)\n self.y_old = None\n self.max_step = validate_max_step(max_step)\n self.rtol, self.atol = validate_tol(rtol, atol, self.n)\n self.f = self.fun(self.t, self.y)\n if first_step is None:\n self.h_abs = select_initial_step(\n self.fun, self.t, self.y, self.f, self.direction,\n self.error_estimator_order, self.rtol, self.atol)\n else:\n self.h_abs = validate_first_step(first_step, t0, t_bound)\n self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)\n self.error_exponent = -1 / (self.error_estimator_order + 1)\n self.h_previous = None\n\n def _estimate_error(self, K, h):\n return np.dot(K.T, self.E) * h\n\n def _estimate_error_norm(self, K, h, scale):\n return norm(self._estimate_error(K, h) / scale)\n\n def _step_impl(self):\n t = self.t\n y = self.y\n\n max_step = self.max_step\n rtol = self.rtol\n atol = self.atol\n\n min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)\n\n if self.h_abs > max_step:\n h_abs = max_step\n elif self.h_abs < min_step:\n h_abs = min_step\n else:\n h_abs = self.h_abs\n\n step_accepted = False\n step_rejected = False\n\n while not step_accepted:\n if h_abs < min_step:\n return False, self.TOO_SMALL_STEP\n\n h = h_abs * self.direction\n t_new = t + h\n\n if self.direction * (t_new - self.t_bound) > 0:\n t_new = self.t_bound\n\n h = t_new - t\n h_abs = np.abs(h)\n\n y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,\n self.B, self.C, self.K)\n scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol\n error_norm = self._estimate_error_norm(self.K, h, scale)\n\n if error_norm < 1:\n if error_norm == 0:\n factor = MAX_FACTOR\n else:\n factor = min(MAX_FACTOR,\n SAFETY * error_norm ** self.error_exponent)\n\n if step_rejected:\n factor = min(1, factor)\n\n h_abs *= factor\n\n step_accepted = True\n else:\n h_abs *= max(MIN_FACTOR,\n SAFETY * error_norm ** self.error_exponent)\n step_rejected = True\n\n self.h_previous = h\n self.y_old = y\n\n self.t = t_new\n self.y = y_new\n\n self.h_abs = h_abs\n self.f = f_new\n\n return True, None\n\n def _dense_output_impl(self):\n Q = self.K.T.dot(self.P)\n return RkDenseOutput(self.t_old, self.t, self.y_old, Q)\n\n\nclass RK23(RungeKutta):\n \"\"\"Explicit Runge-Kutta method of order 3(2).\n\n This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled\n assuming accuracy of the second-order method, but steps are taken using the\n third-order accurate formula (local extrapolation is done). A cubic Hermite\n polynomial is used for the dense output.\n\n Can be applied in the complex domain.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system. The calling signature is ``fun(t, y)``.\n Here ``t`` is a scalar and there are two options for ndarray ``y``.\n It can either have shape (n,), then ``fun`` must return array_like with\n shape (n,). Or alternatively it can have shape (n, k), then ``fun``\n must return array_like with shape (n, k), i.e. each column\n corresponds to a single column in ``y``. The choice between the two\n options is determined by `vectorized` argument (see below).\n t0 : float\n Initial time.\n y0 : array_like, shape (n,)\n Initial state.\n t_bound : float\n Boundary time - the integration won't continue beyond it. It also\n determines the direction of the integration.\n first_step : float or None, optional\n Initial step size. Default is ``None`` which means that the algorithm\n should choose.\n max_step : float, optional\n Maximum allowed step size. Default is np.inf, i.e., the step size is not\n bounded and determined solely by the solver.\n rtol, atol : float and array_like, optional\n Relative and absolute tolerances. The solver keeps the local error\n estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a\n relative accuracy (number of correct digits). But if a component of `y`\n is approximately below `atol`, the error only needs to fall within\n the same `atol` threshold, and the number of correct digits is not\n guaranteed. If components of y have different scales, it might be\n beneficial to set different `atol` values for different components by\n passing array_like with shape (n,) for `atol`. Default values are\n 1e-3 for `rtol` and 1e-6 for `atol`.\n vectorized : bool, optional\n Whether `fun` is implemented in a vectorized fashion. Default is False.\n\n Attributes\n ----------\n n : int\n Number of equations.\n status : string\n Current status of the solver: 'running', 'finished' or 'failed'.\n t_bound : float\n Boundary time.\n direction : float\n Integration direction: +1 or -1.\n t : float\n Current time.\n y : ndarray\n Current state.\n t_old : float\n Previous time. None if no steps were made yet.\n step_size : float\n Size of the last successful step. None if no steps were made yet.\n nfev : int\n Number evaluations of the system's right-hand side.\n njev : int\n Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.\n nlu : int\n Number of LU decompositions. Is always 0 for this solver.\n\n References\n ----------\n .. [1] P. Bogacki, L.F. Shampine, \"A 3(2) Pair of Runge-Kutta Formulas\",\n Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.\n \"\"\"\n order = 3\n error_estimator_order = 2\n n_stages = 3\n C = np.array([0, 1/2, 3/4])\n A = np.array([\n [0, 0, 0],\n [1/2, 0, 0],\n [0, 3/4, 0]\n ])\n B = np.array([2/9, 1/3, 4/9])\n E = np.array([5/72, -1/12, -1/9, 1/8])\n P = np.array([[1, -4 / 3, 5 / 9],\n [0, 1, -2/3],\n [0, 4/3, -8/9],\n [0, -1, 1]])\n\n\nclass RK45(RungeKutta):\n \"\"\"Explicit Runge-Kutta method of order 5(4).\n\n This uses the Dormand-Prince pair of formulas [1]_. The error is controlled\n assuming accuracy of the fourth-order method accuracy, but steps are taken\n using the fifth-order accurate formula (local extrapolation is done).\n A quartic interpolation polynomial is used for the dense output [2]_.\n\n Can be applied in the complex domain.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system. The calling signature is ``fun(t, y)``.\n Here ``t`` is a scalar, and there are two options for the ndarray ``y``:\n It can either have shape (n,); then ``fun`` must return array_like with\n shape (n,). Alternatively it can have shape (n, k); then ``fun``\n must return an array_like with shape (n, k), i.e., each column\n corresponds to a single column in ``y``. The choice between the two\n options is determined by `vectorized` argument (see below).\n t0 : float\n Initial time.\n y0 : array_like, shape (n,)\n Initial state.\n t_bound : float\n Boundary time - the integration won't continue beyond it. It also\n determines the direction of the integration.\n first_step : float or None, optional\n Initial step size. Default is ``None`` which means that the algorithm\n should choose.\n max_step : float, optional\n Maximum allowed step size. Default is np.inf, i.e., the step size is not\n bounded and determined solely by the solver.\n rtol, atol : float and array_like, optional\n Relative and absolute tolerances. The solver keeps the local error\n estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a\n relative accuracy (number of correct digits). But if a component of `y`\n is approximately below `atol`, the error only needs to fall within\n the same `atol` threshold, and the number of correct digits is not\n guaranteed. If components of y have different scales, it might be\n beneficial to set different `atol` values for different components by\n passing array_like with shape (n,) for `atol`. Default values are\n 1e-3 for `rtol` and 1e-6 for `atol`.\n vectorized : bool, optional\n Whether `fun` is implemented in a vectorized fashion. Default is False.\n\n Attributes\n ----------\n n : int\n Number of equations.\n status : string\n Current status of the solver: 'running', 'finished' or 'failed'.\n t_bound : float\n Boundary time.\n direction : float\n Integration direction: +1 or -1.\n t : float\n Current time.\n y : ndarray\n Current state.\n t_old : float\n Previous time. None if no steps were made yet.\n step_size : float\n Size of the last successful step. None if no steps were made yet.\n nfev : int\n Number evaluations of the system's right-hand side.\n njev : int\n Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.\n nlu : int\n Number of LU decompositions. Is always 0 for this solver.\n\n References\n ----------\n .. [1] J. R. Dormand, P. J. Prince, \"A family of embedded Runge-Kutta\n formulae\", Journal of Computational and Applied Mathematics, Vol. 6,\n No. 1, pp. 19-26, 1980.\n .. [2] L. W. Shampine, \"Some Practical Runge-Kutta Formulas\", Mathematics\n of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.\n \"\"\"\n order = 5\n error_estimator_order = 4\n n_stages = 6\n C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])\n A = np.array([\n [0, 0, 0, 0, 0],\n [1/5, 0, 0, 0, 0],\n [3/40, 9/40, 0, 0, 0],\n [44/45, -56/15, 32/9, 0, 0],\n [19372/6561, -25360/2187, 64448/6561, -212/729, 0],\n [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]\n ])\n B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])\n E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,\n 1/40])\n # Corresponds to the optimum value of c_6 from [2]_.\n P = np.array([\n [1, -8048581381/2820520608, 8663915743/2820520608,\n -12715105075/11282082432],\n [0, 0, 0, 0],\n [0, 131558114200/32700410799, -68118460800/10900136933,\n 87487479700/32700410799],\n [0, -1754552775/470086768, 14199869525/1410260304,\n -10690763975/1880347072],\n [0, 127303824393/49829197408, -318862633887/49829197408,\n 701980252875 / 199316789632],\n [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],\n [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])\n\n\nclass DOP853(RungeKutta):\n \"\"\"Explicit Runge-Kutta method of order 8.\n\n This is a Python implementation of \"DOP853\" algorithm originally written\n in Fortran [1]_, [2]_. Note that this is not a literate translation, but\n the algorithmic core and coefficients are the same.\n\n Can be applied in the complex domain.\n\n Parameters\n ----------\n fun : callable\n Right-hand side of the system. The calling signature is ``fun(t, y)``.\n Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:\n It can either have shape (n,); then ``fun`` must return array_like with\n shape (n,). Alternatively it can have shape (n, k); then ``fun``\n must return an array_like with shape (n, k), i.e. each column\n corresponds to a single column in ``y``. The choice between the two\n options is determined by `vectorized` argument (see below).\n t0 : float\n Initial time.\n y0 : array_like, shape (n,)\n Initial state.\n t_bound : float\n Boundary time - the integration won't continue beyond it. It also\n determines the direction of the integration.\n first_step : float or None, optional\n Initial step size. Default is ``None`` which means that the algorithm\n should choose.\n max_step : float, optional\n Maximum allowed step size. Default is np.inf, i.e. the step size is not\n bounded and determined solely by the solver.\n rtol, atol : float and array_like, optional\n Relative and absolute tolerances. The solver keeps the local error\n estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a\n relative accuracy (number of correct digits). But if a component of `y`\n is approximately below `atol`, the error only needs to fall within\n the same `atol` threshold, and the number of correct digits is not\n guaranteed. If components of y have different scales, it might be\n beneficial to set different `atol` values for different components by\n passing array_like with shape (n,) for `atol`. Default values are\n 1e-3 for `rtol` and 1e-6 for `atol`.\n vectorized : bool, optional\n Whether `fun` is implemented in a vectorized fashion. Default is False.\n\n Attributes\n ----------\n n : int\n Number of equations.\n status : string\n Current status of the solver: 'running', 'finished' or 'failed'.\n t_bound : float\n Boundary time.\n direction : float\n Integration direction: +1 or -1.\n t : float\n Current time.\n y : ndarray\n Current state.\n t_old : float\n Previous time. None if no steps were made yet.\n step_size : float\n Size of the last successful step. None if no steps were made yet.\n nfev : int\n Number evaluations of the system's right-hand side.\n njev : int\n Number of evaluations of the Jacobian. Is always 0 for this solver\n as it does not use the Jacobian.\n nlu : int\n Number of LU decompositions. Is always 0 for this solver.\n\n References\n ----------\n .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential\n Equations I: Nonstiff Problems\", Sec. II.\n .. [2] `Page with original Fortran code of DOP853\n <http://www.unige.ch/~hairer/software.html>`_.\n \"\"\"\n n_stages = dop853_coefficients.N_STAGES\n order = 8\n error_estimator_order = 7\n A = dop853_coefficients.A[:n_stages, :n_stages]\n B = dop853_coefficients.B\n C = dop853_coefficients.C[:n_stages]\n E3 = dop853_coefficients.E3\n E5 = dop853_coefficients.E5\n D = dop853_coefficients.D\n\n A_EXTRA = dop853_coefficients.A[n_stages + 1:]\n C_EXTRA = dop853_coefficients.C[n_stages + 1:]\n\n def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,\n rtol=1e-3, atol=1e-6, vectorized=False,\n first_step=None, **extraneous):\n super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,\n rtol, atol, vectorized, first_step,\n **extraneous)\n self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,\n self.n), dtype=self.y.dtype)\n self.K = self.K_extended[:self.n_stages + 1]\n\n def _estimate_error(self, K, h): # Left for testing purposes.\n err5 = np.dot(K.T, self.E5)\n err3 = np.dot(K.T, self.E3)\n denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))\n correction_factor = np.ones_like(err5)\n mask = denom > 0\n correction_factor[mask] = np.abs(err5[mask]) / denom[mask]\n return h * err5 * correction_factor\n\n def _estimate_error_norm(self, K, h, scale):\n err5 = np.dot(K.T, self.E5) / scale\n err3 = np.dot(K.T, self.E3) / scale\n err5_norm_2 = np.linalg.norm(err5)**2\n err3_norm_2 = np.linalg.norm(err3)**2\n if err5_norm_2 == 0 and err3_norm_2 == 0:\n return 0.0\n denom = err5_norm_2 + 0.01 * err3_norm_2\n return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))\n\n def _dense_output_impl(self):\n K = self.K_extended\n h = self.h_previous\n for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),\n start=self.n_stages + 1):\n dy = np.dot(K[:s].T, a[:s]) * h\n K[s] = self.fun(self.t_old + c * h, self.y_old + dy)\n\n F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),\n dtype=self.y_old.dtype)\n\n f_old = K[0]\n delta_y = self.y - self.y_old\n\n F[0] = delta_y\n F[1] = h * f_old - delta_y\n F[2] = 2 * delta_y - h * (self.f + f_old)\n F[3:] = h * np.dot(self.D, K)\n\n return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)\n\n\nclass RkDenseOutput(DenseOutput):\n def __init__(self, t_old, t, y_old, Q):\n super(RkDenseOutput, self).__init__(t_old, t)\n self.h = t - t_old\n self.Q = Q\n self.order = Q.shape[1] - 1\n self.y_old = y_old\n\n def _call_impl(self, t):\n x = (t - self.t_old) / self.h\n if t.ndim == 0:\n p = np.tile(x, self.order + 1)\n p = np.cumprod(p)\n else:\n p = np.tile(x, (self.order + 1, 1))\n p = np.cumprod(p, axis=0)\n y = self.h * np.dot(self.Q, p)\n if y.ndim == 2:\n y += self.y_old[:, None]\n else:\n y += self.y_old\n\n return y\n\n\nclass Dop853DenseOutput(DenseOutput):\n def __init__(self, t_old, t, y_old, F):\n super(Dop853DenseOutput, self).__init__(t_old, t)\n self.h = t - t_old\n self.F = F\n self.y_old = y_old\n\n def _call_impl(self, t):\n x = (t - self.t_old) / self.h\n\n if t.ndim == 0:\n y = np.zeros_like(self.y_old)\n else:\n x = x[:, None]\n y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)\n\n for i, f in enumerate(reversed(self.F)):\n y += f\n if i % 2 == 0:\n y *= x\n else:\n y *= 1 - x\n y += self.y_old\n\n return y.T\n"
] | [
[
"scipy.optimize.linprog",
"numpy.all",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.testing.assert_equal",
"numpy.hstack",
"numpy.testing.suppress_warnings",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.column_stack",
"numpy.zeros",
"numpy.identity",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.assert_warns",
"numpy.sum",
"numpy.random.seed",
"numpy.intp",
"numpy.random.exponential",
"numpy.cos",
"numpy.ones",
"numpy.testing.assert_array_less",
"numpy.random.normal",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.ones_like",
"numpy.abs",
"numpy.linalg.norm",
"numpy.tile",
"numpy.nextafter",
"numpy.cumprod",
"numpy.zeros_like",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RodrigoATorres/hermione | [
"c51f5e54a41609099eef48990c7ad7018dcdf41a"
] | [
"hermione/module_templates/__IMPLEMENTED_BASE__/src/predict.py"
] | [
"import pandas as pd\nimport io\nfrom joblib import load\nimport logging\n\nlogging.getLogger().setLevel(logging.INFO)\n\ndef generate_data():\n new_data = pd.DataFrame({\n 'Pclass':[3,2,1],\n 'Sex': ['male', 'female', 'male'],\n 'Age':[4, 22, 28]\n })\n return new_data\n\n\ndef load_model():\n try:\n return load('../output/titanic_model_rf.pkl')\n except:\n try: \n return load('../../output/titanic_model_rf.pkl')\n except:\n logging.error('Model not loaded')\n\n\ndef predict_new(X, probs=True):\n model = load_model()\n p = model.get_preprocessing()\n \n X = p.clean_data(X)\n X = p.categ_encoding(X)\n \n columns = model.get_columns()\n for col in columns:\n if col not in X.columns:\n X[col] = 0\n if probs:\n return model.predict_proba(X)[:,1]\n else:\n return model.predict(X)\n\n\n\nif __name__ == \"__main__\":\n df = generate_data()\n preds = predict_new(df, probs=True)\n logging.info(\"Predictions:\")\n print(preds)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bee-hive/nested-policy-rl | [
"56b0be37ed814265cb3ef26ea0a1a62b5cd7f05c"
] | [
"tests/test_networks.py"
] | [
"import torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n# import sys\n# sys.path.append(\"../simulated_fqi/\")\nfrom simulated_fqi import NFQNetwork, ContrastiveNFQNetwork\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef train(x, y, groups, network, optimizer):\n\n predicted_q_values = network(x, groups).squeeze()\n loss = F.mse_loss(predicted_q_values, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n# def test_contrastive_network():\n\n# # Setup agent\n# network = ContrastiveNFQNetwork(state_dim=0, is_contrastive=True, nonlinearity=nn.Identity)\n# optimizer = optim.Rprop(network.parameters())\n\n# # Generate data\n# n, m = 100, 100\n# beta_shared = -1\n# beta_fg = 2.1\n# x_bg, x_fg = np.linspace(-3, 3, m), np.linspace(-3, 3, n)\n# x = np.concatenate([x_bg, x_fg])\n# groups = np.concatenate([np.zeros(m), np.ones(n)])\n# y = beta_shared * x + beta_fg * groups * x# + np.random.normal(scale=0.5, size=m+n)\n\n# x = torch.FloatTensor(x).unsqueeze(1)\n# y = torch.FloatTensor(y)\n# groups = torch.FloatTensor(groups).unsqueeze(1)\n \n# for epoch in range(200):\n\n# loss = train(x, y, groups, network, optimizer)\n \n# # if epoch % 10 == 0:\n# # print(\"Epoch: {:4d}, Loss: {:4f}\".format(epoch, loss))\n\n# network.eval()\n# with torch.no_grad():\n# preds = network(x, groups)\n\n# assert np.allclose(preds.squeeze().numpy(), y.squeeze().numpy(), atol=1e-4)\n # plt.scatter(x, preds, c=groups)\n # plt.show()\n # import ipdb; ipdb.set_trace()\n \nif __name__ == \"__main__\":\n test_contrastive_network()\n"
] | [
[
"torch.nn.functional.mse_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SixHeo/IVOS-ATNet | [
"1cf574953a96bd680c518c6362b510fd103ff271"
] | [
"libs/utils_torch.py"
] | [
"import torch\n\ndef combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False):\n \"\"\" Combine mask for different objects.\n\n Different methods are the following:\n\n * `max_per_pixel`: Computes the final mask taking the pixel with the highest\n probability for every object.\n\n # Arguments\n masks: Tensor with shape[B, nobj, H, W]. H, W on batches must be same\n method: String. Method that specifies how the masks are fused.\n\n # Returns\n [B, 1, H, W]\n \"\"\"\n\n # masks : B, nobj, h, w\n # output : h,w\n marker = torch.argmax(masks, dim=1, keepdim=True) #\n if not return_as_onehot:\n out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) #[B, 1, H, W]\n for obj_id in range(n_obj):\n try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)\n except: raise NotImplementedError\n out_mask[tmp_mask] = obj_id + 1 # [B, 1, H, W]\n\n if return_as_onehot:\n out_mask = torch.zeros_like(masks) # [B, nobj, H, W]\n for obj_id in range(n_obj):\n try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)\n except: raise NotImplementedError\n out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor)\n\n return out_mask\n"
] | [
[
"torch.zeros_like",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muchemwal/models | [
"49fd0a8a61b0e5dab196014bf47de7f62d97c884"
] | [
"tensorflow/super_resolution/syndicai.py"
] | [
"import os\nimport io\nimport time\nimport base64\nimport functools\n\nfrom PIL import Image\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nfrom helpers import *\nos.environ[\"TFHUB_DOWNLOAD_PROGRESS\"] = \"True\"\n\n\nclass PythonPredictor:\n\n def __init__(self, config):\n # Import TF-Hub module\n self.hub_module = hub.load(\"https://tfhub.dev/captain-pool/esrgan-tf2/1\")\n\n def predict(self, payload):\n # Preprocess image\n hr_image = preprocess_image(payload[\"image_b64\"])\n\n # Run model\n fake_image = self.hub_module(hr_image)\n\n # convert to base64\n img = get_image(tf.squeeze(fake_image))\n im_file = io.BytesIO()\n img.save(im_file, format=\"PNG\")\n im_bytes = base64.b64encode(im_file.getvalue()).decode(\"utf-8\")\n\n return im_bytes\n"
] | [
[
"tensorflow.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Ricechrispi/sc2_academy | [
"9ffed467fe019262035ac61d10c5cc3ee64a7bb2"
] | [
"sc2_academy/ppo/my_epsilon_greedy_policy.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# ------------------------------------------------------------------------------------------\n# DISCLAIMER: This is just a slightly adjusted version of the EpsilonGreedyPolicy in TF-Agents.\n# Most of the code here is directly copied from there.\n# I changed it such that the policy in the epsilon case is not random, but sampled from\n# the original policy distribution.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"Policy implementation that generates epsilon-greedy actions from a policy.\n\nTODO(kbanoop): Make policy state optional in the action method.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nfrom typing import Optional, Text\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nimport tensorflow_probability as tfp\n\nfrom tf_agents.bandits.policies import policy_utilities\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.typing import types\nfrom tf_agents.utils import nest_utils\n\ntfd = tfp.distributions\n\n\nclass EpsilonGreedyPolicy(tf_policy.TFPolicy):\n \"\"\"Returns epsilon-greedy samples of a given policy.\"\"\"\n\n def __init__(self,\n policy: tf_policy.TFPolicy,\n epsilon: types.FloatOrReturningFloat,\n name: Optional[Text] = None):\n \"\"\"Builds an epsilon-greedy MixturePolicy wrapping the given policy.\n\n Args:\n policy: A policy implementing the tf_policy.TFPolicy interface.\n epsilon: The probability of taking the random action represented as a\n float scalar, a scalar Tensor of shape=(), or a callable that returns a\n float scalar or Tensor.\n name: The name of this policy.\n\n Raises:\n ValueError: If epsilon is invalid.\n \"\"\"\n try:\n observation_and_action_constraint_splitter = (\n policy.observation_and_action_constraint_splitter)\n except AttributeError:\n observation_and_action_constraint_splitter = None\n try:\n accepts_per_arm_features = policy.accepts_per_arm_features\n except AttributeError:\n accepts_per_arm_features = False\n self._greedy_policy = greedy_policy.GreedyPolicy(policy)\n self._epsilon = epsilon\n self._epsilon_policy = self._greedy_policy.wrapped_policy # this is my main change from the original code\n super(EpsilonGreedyPolicy, self).__init__(\n policy.time_step_spec,\n policy.action_spec,\n policy.policy_state_spec,\n policy.info_spec,\n emit_log_probability=policy.emit_log_probability,\n observation_and_action_constraint_splitter=(\n observation_and_action_constraint_splitter),\n name=name)\n\n @property\n def wrapped_policy(self) -> tf_policy.TFPolicy:\n return self._greedy_policy.wrapped_policy\n\n def _variables(self):\n return self._greedy_policy.variables()\n\n def _get_epsilon(self):\n if callable(self._epsilon):\n return self._epsilon()\n else:\n return self._epsilon\n\n def _action(self, time_step, policy_state, seed):\n seed_stream = tfp.util.SeedStream(seed=seed, salt='epsilon_greedy')\n greedy_action = self._greedy_policy.action(time_step, policy_state)\n epsilon_action = self._epsilon_policy.action(time_step, (), seed_stream())\n\n outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec)\n rng = tf.random.uniform(\n outer_shape, maxval=1.0, seed=seed_stream(), name='epsilon_rng')\n cond = tf.greater(rng, self._get_epsilon())\n\n # Selects the action/info from the random policy with probability epsilon.\n # TODO(b/133175894): tf.compat.v1.where only supports a condition which is\n # either a scalar or a vector. Use tf.compat.v2 so that it can support any\n # condition whose leading dimensions are the same as the other operands of\n # tf.where.\n outer_ndims = int(outer_shape.shape[0])\n if outer_ndims >= 2:\n raise ValueError(\n 'Only supports batched time steps with a single batch dimension')\n action = tf.nest.map_structure(lambda g, r: tf.compat.v1.where(cond, g, r),\n greedy_action.action, epsilon_action.action)\n\n if greedy_action.info:\n if not epsilon_action.info:\n raise ValueError('Incompatible info field')\n # Note that the objects in PolicyInfo may have different shapes, so we\n # need to call nest_utils.where() on each type of object.\n info = tf.nest.map_structure(lambda x, y: nest_utils.where(cond, x, y),\n greedy_action.info, epsilon_action.info)\n if self._emit_log_probability:\n # At this point, info.log_probability contains the log prob of the\n # action chosen, conditioned on the policy that was chosen. We want to\n # emit the full log probability of the action, so we'll add in the log\n # probability of choosing the policy.\n random_log_prob = tf.nest.map_structure(\n lambda t: tf.math.log(tf.zeros_like(t) + self._get_epsilon()),\n info.log_probability)\n greedy_log_prob = tf.nest.map_structure(\n lambda t: tf.math.log(tf.ones_like(t) - self._get_epsilon()),\n random_log_prob)\n log_prob_of_chosen_policy = nest_utils.where(cond, greedy_log_prob,\n random_log_prob)\n log_prob = tf.nest.map_structure(lambda a, b: a + b,\n log_prob_of_chosen_policy,\n info.log_probability)\n info = policy_step.set_log_probability(info, log_prob)\n # Overwrite bandit policy info type.\n if policy_utilities.has_bandit_policy_type(info, check_for_tensor=True):\n # Generate mask of the same shape as bandit_policy_type (batch_size, 1).\n # This is the opposite of `cond`, which is 1-D bool tensor (batch_size,)\n # that is true when greedy policy was used, otherwise `cond` is false.\n random_policy_mask = tf.reshape(tf.logical_not(cond),\n tf.shape(info.bandit_policy_type))\n bandit_policy_type = policy_utilities.bandit_policy_uniform_mask(\n info.bandit_policy_type, mask=random_policy_mask)\n info = policy_utilities.set_bandit_policy_type(\n info, bandit_policy_type)\n else:\n if epsilon_action.info:\n raise ValueError('Incompatible info field')\n info = ()\n\n # The state of the epsilon greedy policy is the state of the underlying\n # greedy policy (the random policy carries no state).\n # It is commonly assumed that the new policy state only depends only\n # on the previous state and \"time_step\", the action (be it the greedy one\n # or the random one) does not influence the new policy state.\n state = greedy_action.state\n\n return policy_step.PolicyStep(action, state, info)\n\n def _distribution(self, time_step, policy_state):\n raise NotImplementedError(\n 'EpsilonGreedyPolicy does not support distributions yet.')\n"
] | [
[
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"tensorflow.logical_not",
"tensorflow.nest.map_structure",
"tensorflow.compat.v1.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ronnith24/NeuralNetworksFromScratch | [
"5c831de8954a4b84fef7b70b16f9d9e6c1cb24b9"
] | [
"NeuralNetwork.py"
] | [
"import numpy as np\n\nclass NeuralNetwork(object):\n def __init__(self, topology, epsilon, numLabels):\n self.theta = []\n self.topology = topology\n self.numLabels = numLabels\n self.gradientChecking = False\n for layer in range(len(self.topology)):\n if layer == 0:\n continue\n self.theta.append(np.random.rand(self.topology[layer], self.topology[layer - 1] + 1) * 2 * epsilon - epsilon)\n \n \n def gradientDescent(self, iters, alpha, lamda, X, Y):\n self.X = X\n self.Y = Y\n for i in range(iters):\n (J, thetaGrad) = self.getCostAndGradient(lamda)\n # gradient checking\n if self.gradientChecking:\n thetaCopy = self.theta.copy()\n for i in range(len(self.topology) - 1):\n for j in range(self.topology[i + 1]):\n for k in range(self.topology[i]):\n EPS = 0.00001\n self.theta[i][j, k] += EPS\n J2 = self.getCostAndGradient(lamda)[0]\n self.theta[i][j, k] -= 2 * EPS\n J1 = self.getCostAndGradient(lamda)[0]\n print(str((J2 - J1) / (2 * EPS) - thetaGrad[i][j, k]))\n self.theta = thetaCopy\n # end\n for layer in range(len(self.topology) - 1):\n self.theta[layer] -= thetaGrad[layer] * alpha\n print(\"Iter \" + str(i) + \": \" + str(J))\n \n \n def predict(self, x):\n x = x.reshape((x.shape[0], 1))\n x = np.concatenate(([[1]], x))\n for layer in range(1, len(self.topology)):\n x = np.matmul(self.theta[layer - 1], x)\n for i in range(x.shape[0]):\n x[i, 0] = self.sigmoid(x[i, 0])\n if layer != len(self.topology) - 1:\n x = np.concatenate(([[1]], x))\n \n prediction = -1\n predictionSurety = -1\n for i in range(self.numLabels):\n if x[i, 0] > predictionSurety:\n prediction = i\n predictionSurety = x[i, 0]\n \n return prediction\n \n \n def getCostAndGradient(self, lamda):\n J = 0\n thetaGrad = []\n for layer in range(len(self.topology)):\n if layer == 0:\n continue\n thetaGrad.append(np.zeros((self.topology[layer], self.topology[layer - 1] + 1)))\n \n m = self.X.shape[0]\n for example in range(m):\n x = self.X[example].copy()\n x = x.reshape((x.shape[0], 1))\n y = np.zeros(self.numLabels)\n y[self.Y[example]] = 1\n y = y.reshape((y.shape[0], 1))\n a = []\n z = []\n delta = []\n \n for layer in range(len(self.topology)):\n if layer == 0:\n a.append(np.concatenate(([[1]], x)))\n z.append(np.concatenate(([[1]], x)))\n delta.append(0)\n continue\n z.append(np.matmul(self.theta[layer - 1], a[layer - 1]))\n a.append(z[layer].copy())\n for i in range(self.topology[layer]):\n a[layer][i, 0] = self.sigmoid(a[layer][i, 0])\n if layer != len(self.topology) - 1:\n a[layer] = np.concatenate(([[1]], a[layer]))\n z[layer] = np.concatenate(([[1]], z[layer]))\n delta.append(0)\n \n for layer in range(len(self.topology) - 1, 0, -1):\n if layer == len(self.topology) - 1:\n delta[layer] = a[layer] - y\n thetaGrad[layer - 1] += np.matmul(delta[layer], a[layer - 1].transpose())\n continue\n \n sigDerZ = z[layer].copy()\n for i in range(self.topology[layer] + 1):\n sigDerZ[i] = self.sigmoidDerivative(sigDerZ[i])\n \n if layer >= len(self.topology) - 2:\n delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1]) * sigDerZ\n else:\n delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1][1:, :]) * sigDerZ\n \n thetaGrad[layer - 1] += np.matmul(delta[layer][1:, :], a[layer - 1].transpose())\n \n J += np.sum(-(1 - y) * np.log(1 - a[len(self.topology) - 1])) - np.sum(y * np.log(a[len(self.topology) - 1]))\n \n J /= m\n \n for layer in range(len(self.topology) - 1):\n thetaGrad[layer] *= (1 / m)\n \n for i in range(len(self.topology) - 1):\n for j in range(self.topology[i + 1]):\n for k in range(1, self.topology[i]):\n J += (lamda / (2 * m)) * self.theta[i][j, k] ** 2\n thetaGrad[i][j, k] += (lamda / m) * self.theta[i][j, k]\n \n return (J, thetaGrad)\n \n \n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n \n \n def sigmoidDerivative(self, x):\n sig = self.sigmoid(x)\n return sig * (1 - sig)"
] | [
[
"numpy.matmul",
"numpy.concatenate",
"numpy.random.rand",
"numpy.exp",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swcho84/image-segmentation | [
"ef9b9b3d832e9efe6f43522cc5ca0e17279d6608"
] | [
"image-segmentation/data_generators/kitti/kitti_dataset.py"
] | [
"from collections import namedtuple\n\nimport os\nimport json\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom data_generators.utils import load_image_rgb\n\n# Copied from: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py\n#\n# Cityscapes labels\n#\n#--------------------------------------------------------------------------------\n# Definitions\n#--------------------------------------------------------------------------------\n\n# a label and all meta information\nLabel = namedtuple( 'Label' , [\n\n 'name' , # The identifier of this label, e.g. 'car', 'person', ... .\n # We use them to uniquely name a class\n\n 'id' , # An integer ID that is associated with this label.\n # The IDs are used to represent the label in ground truth images\n # An ID of -1 means that this label does not have an ID and thus\n # is ignored when creating ground truth images (e.g. license plate).\n # Do not modify these IDs, since exactly these IDs are expected by the\n # evaluation server.\n\n 'trainId' , # Feel free to modify these IDs as suitable for your method. Then create\n # ground truth images with train IDs, using the tools provided in the\n # 'preparation' folder. However, make sure to validate or submit results\n # to our evaluation server using the regular IDs above!\n # For trainIds, multiple labels might have the same ID. Then, these labels\n # are mapped to the same class in the ground truth images. For the inverse\n # mapping, we use the label that is defined first in the list below.\n # For example, mapping all void-type classes to the same ID in training,\n # might make sense for some approaches.\n # Max value is 255!\n\n 'category' , # The name of the category that this label belongs to\n\n 'categoryId' , # The ID of this category. Used to create ground truth images\n # on category level.\n\n 'hasInstances', # Whether this label distinguishes between single instances or not\n\n 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored\n # during evaluations or not\n\n 'color' , # The color of this label\n ] )\n\n\ndef label2dict(label):\n return {\n 'name': label.name, 'id': label.id, 'trainId': label.trainId,\n 'category': label.category, 'catId': label.categoryId, 'hasInstances': label.hasInstances,\n 'ignoreInEval': label.ignoreInEval, 'color': label.color\n }\n\n\ndef save_labels(labels, fpath):\n l = []\n for label in labels:\n l.append(label2dict(label))\n\n fp = open(fpath, 'w')\n json.dump(l, fp)\n fp.close()\n\n\ndef load_labels(fpath):\n fp = open(fpath, 'r')\n l = json.load(fp)\n fp.close()\n labels = []\n for item in l:\n labels.append(\n Label(\n item['name'], item['id'], item['trainId'],\n item['category'], item['catId'], item['hasInstances'],\n item['ignoreInEval'], tuple(item['color']))\n )\n return labels\n\n\nclass KittiDataset:\n def __init__(self):\n self.image_ids = []\n\n def load_kitti(self, dataset_dir, subset, tag='simple'):\n 'Initialization'\n assert subset in ['train', 'val'], 'subset must be either train or val but {} is given'.format(subset)\n\n self.labels = load_labels(os.path.join(dataset_dir, 'annotations', 'semantic_{}.json'.format(tag)))\n\n # trainId to colors\n self.trainId2colors = {label.trainId: [] for label in self.labels}\n for label in self.labels:\n self.trainId2colors[label.trainId].append(label.color)\n # trainId to name\n self.trainId2name = {label.trainId: label.name for label in self.labels}\n\n # number of valid trainIds + background class\n self.num_classes = max([label.trainId for label in self.labels if label.trainId >= 0 and label.trainId < 255]) + 2\n self.class_names = [self.trainId2name[i] for i in range(self.num_classes - 1)]\n\n self.image_dir = os.path.join(dataset_dir, subset, 'images')\n self.label_dir = os.path.join(dataset_dir, subset, 'semantic_rgb')\n\n assert os.path.exists(self.image_dir), 'No such directory: {}'.format(self.image_dir)\n assert os.path.exists(self.label_dir), 'No such directory: {}'.format(self.label_dir)\n\n self.image_files = sorted([x for x in os.listdir(self.image_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')])\n self.label_files = sorted([x for x in os.listdir(self.label_dir) if x.lower().endswith('.png')])\n\n assert len(self.image_files) == len(self.label_files), \\\n 'image - label size mismatch! There are {} image files and {} label files'.format(len(self.image_files), len(self.label_files))\n\n self.num_images = len(self.image_files)\n self.image_ids = np.arange(self.num_images)\n\n def check_sanity(self):\n for i in tqdm(self.image_ids):\n assert self.image_files[i][:-4] == self.label_files[i][:-4],\\\n 'image - label filename mismatch: {} - {}'.format(self.image_files[i], self.label_files[i])\n img = load_image_rgb(os.path.join(self.image_dir, self.image_files[i]))\n msk = load_image_rgb(os.path.join(self.label_dir, self.label_files[i]))\n assert img.shape == msk.shape,\\\n 'img.shape: {}, msk.shape: {}'.format(img.shape, msk.shape)\n\n def load_image(self, image_id):\n return load_image_rgb(os.path.join(self.image_dir, self.image_files[image_id]))\n\n def load_mask(self, image_id):\n rgb_mask = load_image_rgb(os.path.join(self.label_dir, self.label_files[image_id]))\n mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1], self.num_classes - 1))\n for cls in range(self.num_classes - 1):\n colors = self.trainId2colors[cls]\n cls_mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1]))\n for color in colors:\n cls_mask = np.logical_or(cls_mask, (rgb_mask == color).all(axis=2))\n mask[:,:,cls] = cls_mask\n return mask\n"
] | [
[
"numpy.arange",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pearcandy/pennylane | [
"dfa35989cd0798496e41999a197bcf0eb26185df",
"dfa35989cd0798496e41999a197bcf0eb26185df"
] | [
"tests/devices/test_default_qubit_jax.py",
"tests/qnodes/test_qnode_base.py"
] | [
"import pytest\r\n\r\njax = pytest.importorskip(\"jax\", minversion=\"0.2\")\r\njnp = jax.numpy\r\nimport numpy as np\r\nimport pennylane as qml\r\nfrom pennylane.devices.default_qubit_jax import DefaultQubitJax\r\n\r\npytestmark = pytest.mark.usefixtures(\"tape_mode\")\r\n\r\n\r\nclass TestQNodeIntegration:\r\n \"\"\"Integration tests for default.qubit.jax. This test ensures it integrates\r\n properly with the PennyLane UI, in particular the new QNode.\"\"\"\r\n\r\n def test_defines_correct_capabilities(self):\r\n \"\"\"Test that the device defines the right capabilities\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n cap = dev.capabilities()\r\n capabilities = {\r\n \"model\": \"qubit\",\r\n \"supports_finite_shots\": True,\r\n \"supports_tensor_observables\": True,\r\n \"returns_probs\": True,\r\n \"returns_state\": True,\r\n \"supports_reversible_diff\": False,\r\n \"supports_inverse_operations\": True,\r\n \"supports_analytic_computation\": True,\r\n \"passthru_interface\": \"jax\",\r\n }\r\n assert cap == capabilities\r\n\r\n def test_defines_correct_capabilities_directly_from_class(self):\r\n \"\"\"Test that the device defines the right capabilities\"\"\"\r\n\r\n dev = DefaultQubitJax(wires=1)\r\n cap = dev.capabilities()\r\n assert cap[\"supports_reversible_diff\"] == False\r\n assert cap[\"passthru_interface\"] == \"jax\"\r\n\r\n def test_load_device(self):\r\n \"\"\"Test that the plugin device loads correctly\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n assert dev.num_wires == 2\r\n assert dev.shots == 1000\r\n assert dev.analytic\r\n assert dev.short_name == \"default.qubit.jax\"\r\n assert dev.capabilities()[\"passthru_interface\"] == \"jax\"\r\n\r\n def test_qubit_circuit(self, tol):\r\n \"\"\"Test that the device provides the correct\r\n result for a simple circuit.\"\"\"\r\n p = jnp.array(0.543)\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"jax\")\r\n def circuit(x):\r\n qml.RX(x, wires=0)\r\n return qml.expval(qml.PauliY(0))\r\n\r\n expected = -jnp.sin(p)\r\n if not qml.tape_mode_active():\r\n assert isinstance(circuit, qml.qnodes.PassthruQNode)\r\n assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)\r\n\r\n def test_qubit_circuit_with_jit(self, tol):\r\n \"\"\"Test that the device provides the correct\r\n result for a simple circuit under a jax.jit.\"\"\"\r\n p = jnp.array(0.543)\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @jax.jit\r\n @qml.qnode(dev, interface=\"jax\")\r\n def circuit(x):\r\n qml.RX(x, wires=0)\r\n return qml.expval(qml.PauliY(0))\r\n\r\n expected = -jnp.sin(p)\r\n # Do not test isinstance here since the @jax.jit changes the function\r\n # type.\r\n # Just test that it works and spits our the right value.\r\n assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)\r\n\r\n def test_correct_state(self, tol):\r\n \"\"\"Test that the device state is correct after applying a\r\n quantum function on the device\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n state = dev.state\r\n expected = jnp.array([1, 0, 0, 0])\r\n assert jnp.allclose(state, expected, atol=tol, rtol=0)\r\n\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.Hadamard(wires=0)\r\n qml.RZ(jnp.pi / 4, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n circuit()\r\n state = dev.state\r\n\r\n amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)\r\n\r\n expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])\r\n assert jnp.allclose(state, expected, atol=tol, rtol=0)\r\n\r\n def test_correct_state_returned(self, tol):\r\n \"\"\"Test that the device state is correct after applying a\r\n quantum function on the device\"\"\"\r\n if not qml.tape_mode_active():\r\n pytest.skip(\"Only supported in tape mode\")\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.Hadamard(wires=0)\r\n qml.RZ(jnp.pi / 4, wires=0)\r\n return qml.state()\r\n\r\n state = circuit()\r\n\r\n amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)\r\n\r\n expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])\r\n assert jnp.allclose(state, expected, atol=tol, rtol=0)\r\n\r\n def test_sampling_with_jit(self):\r\n \"\"\"Test that sampling works with a jax.jit\"\"\"\r\n @jax.jit\r\n def circuit(key):\r\n dev = qml.device(\"default.qubit.jax\", wires=1, prng_key=key)\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def inner_circuit():\r\n qml.Hadamard(0)\r\n return qml.sample(qml.PauliZ(wires=0))\r\n return inner_circuit()\r\n\r\n a = circuit(jax.random.PRNGKey(0))\r\n b = circuit(jax.random.PRNGKey(0))\r\n c = circuit(jax.random.PRNGKey(1))\r\n np.testing.assert_array_equal(a, b)\r\n assert not np.all(a == c)\r\n\r\n def test_sampling_op_by_op(self):\r\n \"\"\"Test that op-by-op sampling works as a new user would expect\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.Hadamard(0)\r\n return qml.sample(qml.PauliZ(wires=0))\r\n\r\n a = circuit()\r\n b = circuit()\r\n assert not np.all(a == b)\r\n\r\n def test_gates_dont_crash(self):\r\n \"\"\"Test for gates that weren't covered by other tests. \"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.CRZ(0.0, wires=[0, 1])\r\n qml.CRot(1.0, 0.0, 0.0, wires=[0, 1])\r\n qml.CRY(0.0, wires=[0, 1])\r\n return qml.sample(qml.PauliZ(wires=0))\r\n circuit() # Just don't crash.\r\n\r\n def test_diagonal_doesnt_crash(self):\r\n \"\"\"Test that diagonal gates can be used.\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.DiagonalQubitUnitary(np.array([1.0, 1.0]), wires=0)\r\n return qml.sample(qml.PauliZ(wires=0))\r\n circuit() # Just don't crash.\r\n \r\n\r\nclass TestPassthruIntegration:\r\n \"\"\"Tests for integration with the PassthruQNode\"\"\"\r\n\r\n @pytest.mark.parametrize(\"jacobian_transform\", [jax.jacfwd, jax.jacrev])\r\n def test_jacobian_variable_multiply(self, tol, jacobian_transform):\r\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.jax device\r\n gives the correct result in the case of parameters multiplied by scalars\"\"\"\r\n x = 0.43316321\r\n y = 0.2162158\r\n z = 0.75110998\r\n weights = jnp.array([x, y, z])\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"jax\")\r\n def circuit(p):\r\n qml.RX(3 * p[0], wires=0)\r\n qml.RY(p[1], wires=0)\r\n qml.RX(p[2] / 2, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n if not qml.tape_mode_active():\r\n assert isinstance(circuit, qml.qnodes.PassthruQNode)\r\n res = circuit(weights)\r\n\r\n expected = jnp.cos(3 * x) * jnp.cos(y) * jnp.cos(z / 2) - jnp.sin(3 * x) * jnp.sin(z / 2)\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad_fn = jacobian_transform(circuit, 0)\r\n res = grad_fn(jnp.array(weights))\r\n\r\n expected = jnp.array(\r\n [\r\n -3\r\n * (jnp.sin(3 * x) * jnp.cos(y) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.sin(z / 2)),\r\n -jnp.cos(3 * x) * jnp.sin(y) * jnp.cos(z / 2),\r\n -0.5\r\n * (jnp.sin(3 * x) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.cos(y) * jnp.sin(z / 2)),\r\n ]\r\n )\r\n\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"jacobian_transform\", [jax.jacfwd, jax.jacrev])\r\n def test_jacobian_repeated(self, tol, jacobian_transform):\r\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.jax device\r\n gives the correct result in the case of repeated parameters\"\"\"\r\n x = 0.43316321\r\n y = 0.2162158\r\n z = 0.75110998\r\n p = jnp.array([x, y, z])\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"jax\")\r\n def circuit(x):\r\n qml.RX(x[1], wires=0)\r\n qml.Rot(x[0], x[1], x[2], wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n res = circuit(p)\r\n\r\n expected = jnp.cos(y) ** 2 - jnp.sin(x) * jnp.sin(y) ** 2\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad_fn = jacobian_transform(circuit, 0)\r\n res = grad_fn(p)\r\n\r\n expected = jnp.array(\r\n [-jnp.cos(x) * jnp.sin(y) ** 2, -2 * (jnp.sin(x) + 1) * jnp.sin(y) * jnp.cos(y), 0]\r\n )\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_state_differentiability(self, tol):\r\n \"\"\"Test that the device state can be differentiated\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"jax\")\r\n def circuit(a):\r\n qml.RY(a, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n a = jnp.array(0.54)\r\n\r\n def cost(a):\r\n \"\"\"A function of the device quantum state, as a function\r\n of ijnput QNode parameters.\"\"\"\r\n circuit(a)\r\n res = jnp.abs(dev.state) ** 2\r\n return res[1] - res[0]\r\n\r\n grad = jax.grad(cost)(a)\r\n expected = jnp.sin(a)\r\n assert jnp.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_prob_differentiability(self, tol):\r\n \"\"\"Test that the device probability can be differentiated\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"jax\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.probs(wires=[1])\r\n\r\n a = jnp.array(0.54)\r\n b = jnp.array(0.12)\r\n\r\n def cost(a, b):\r\n prob_wire_1 = circuit(a, b).squeeze()\r\n return prob_wire_1[1] - prob_wire_1[0]\r\n\r\n res = cost(a, b)\r\n expected = -jnp.cos(a) * jnp.cos(b)\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad = jax.jit(jax.grad(cost, argnums=(0, 1)))(a, b)\r\n expected = [jnp.sin(a) * jnp.cos(b), jnp.cos(a) * jnp.sin(b)]\r\n assert jnp.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_backprop_gradient(self, tol):\r\n \"\"\"Tests that the gradient of the qnode is correct\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"jax\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=0)\r\n qml.CRX(b, wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\r\n\r\n a = jnp.array(-0.234)\r\n b = jnp.array(0.654)\r\n\r\n res = circuit(a, b)\r\n expected_cost = 0.5 * (jnp.cos(a) * jnp.cos(b) + jnp.cos(a) - jnp.cos(b) + 1)\r\n assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)\r\n res = jax.grad(lambda x, y: circuit(x, y).reshape(()), argnums=(0, 1))(a, b)\r\n expected_grad = jnp.array(\r\n [-0.5 * jnp.sin(a) * (jnp.cos(b) + 1), 0.5 * jnp.sin(b) * (1 - jnp.cos(a))]\r\n )\r\n assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"operation\", [qml.U3, qml.U3.decomposition])\r\n @pytest.mark.parametrize(\"diff_method\", [\"backprop\"])\r\n def test_jax_interface_gradient(self, operation, diff_method, tol):\r\n \"\"\"Tests that the gradient of an arbitrary U3 gate is correct\r\n using the Jax interface, using a variety of differentiation methods.\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n @qml.qnode(dev, diff_method=diff_method, interface=\"jax\")\r\n def circuit(x, weights, w=None):\r\n \"\"\"In this example, a mixture of scalar\r\n arguments, array arguments, and keyword arguments are used.\"\"\"\r\n qml.QubitStateVector(1j * jnp.array([1, -1]) / jnp.sqrt(2), wires=w)\r\n operation(x, weights[0], weights[1], wires=w)\r\n return qml.expval(qml.PauliX(w))\r\n\r\n # Check that the correct QNode type is being used.\r\n if not qml.tape_mode_active():\r\n if diff_method == \"backprop\":\r\n assert isinstance(circuit, qml.qnodes.PassthruQNode)\r\n assert not hasattr(circuit, \"jacobian\")\r\n else:\r\n assert not isinstance(circuit, qml.qnodes.PassthruQNode)\r\n assert hasattr(circuit, \"jacobian\")\r\n\r\n def cost(params):\r\n \"\"\"Perform some classical processing\"\"\"\r\n return (circuit(params[0], params[1:], w=0) ** 2).reshape(())\r\n\r\n theta = 0.543\r\n phi = -0.234\r\n lam = 0.654\r\n\r\n params = jnp.array([theta, phi, lam])\r\n\r\n res = cost(params)\r\n expected_cost = (\r\n jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi)\r\n ) ** 2\r\n assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)\r\n\r\n res = jax.grad(cost)(params)\r\n expected_grad = (\r\n jnp.array(\r\n [\r\n jnp.sin(theta) * jnp.cos(lam) * jnp.cos(phi),\r\n jnp.cos(theta) * jnp.cos(lam) * jnp.sin(phi) + jnp.sin(lam) * jnp.cos(phi),\r\n jnp.cos(theta) * jnp.sin(lam) * jnp.cos(phi) + jnp.cos(lam) * jnp.sin(phi),\r\n ]\r\n )\r\n * 2\r\n * (jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi))\r\n )\r\n assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"interface\", [\"autograd\", \"tf\", \"torch\"])\r\n def test_error_backprop_wrong_interface(self, interface, tol):\r\n \"\"\"Tests that an error is raised if diff_method='backprop' but not using\r\n the Jax interface\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=1)\r\n\r\n def circuit(x, w=None):\r\n qml.RZ(x, wires=w)\r\n return qml.expval(qml.PauliX(w))\r\n\r\n error_type = qml.QuantumFunctionError if qml.tape_mode_active() else ValueError\r\n with pytest.raises(\r\n error_type,\r\n match=\"default.qubit.jax only supports diff_method='backprop' when using the jax interface\",\r\n ):\r\n qml.qnode(dev, diff_method=\"backprop\", interface=interface)(circuit)\r\n\r\n\r\nclass TestHighLevelIntegration:\r\n \"\"\"Tests for integration with higher level components of PennyLane.\"\"\"\r\n\r\n def test_template_integration(self):\r\n \"\"\"Test that a PassthruQNode using default.qubit.jax works with templates.\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"jax\")\r\n def circuit(weights):\r\n qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))\r\n\r\n grad = jax.grad(lambda a: circuit(a).reshape(()))(weights)\r\n assert grad.shape == weights.shape\r\n\r\n def test_qnode_collection_integration(self):\r\n \"\"\"Test that a PassthruQNode using default.qubit.jax works with QNodeCollections.\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n def ansatz(weights, **kwargs):\r\n qml.RX(weights[0], wires=0)\r\n qml.RY(weights[1], wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n\r\n obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)]\r\n qnodes = qml.map(ansatz, obs_list, dev, interface=\"jax\")\r\n if not qml.tape_mode_active():\r\n assert qnodes.interface == \"jax\"\r\n\r\n weights = jnp.array([0.1, 0.2])\r\n\r\n def cost(weights):\r\n return jnp.sum(jnp.array(qnodes(weights)))\r\n\r\n grad = jax.grad(cost)(weights)\r\n assert grad.shape == weights.shape\r\n\r\n def test_non_backprop_error(self):\r\n \"\"\"Test that an error is raised in tape mode if the diff method is not backprop\"\"\"\r\n if not qml.tape_mode_active():\r\n pytest.skip(\"Test only applies in tape mode\")\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n def circuit(weights):\r\n qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n qnode = qml.QNode(circuit, dev, interface=\"jax\", diff_method=\"parameter-shift\")\r\n weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))\r\n\r\n with pytest.raises(qml.QuantumFunctionError, match=\"The JAX interface can only be used with\"):\r\n qnode(weights)\r\n\r\n\r\nclass TestOps:\r\n \"\"\"Unit tests for operations supported by the default.qubit.jax device\"\"\"\r\n\r\n @pytest.mark.parametrize(\"jacobian_transform\", [jax.jacfwd, jax.jacrev])\r\n def test_multirz_jacobian(self, jacobian_transform):\r\n \"\"\"Test that the patched numpy functions are used for the MultiRZ\r\n operation and the jacobian can be computed.\"\"\"\r\n wires = 4\r\n dev = qml.device(\"default.qubit.jax\", wires=wires)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"jax\")\r\n def circuit(param):\r\n qml.MultiRZ(param, wires=[0, 1])\r\n return qml.probs(wires=list(range(wires)))\r\n\r\n param = 0.3\r\n res = jacobian_transform(circuit)(param)\r\n assert jnp.allclose(res, jnp.zeros(wires ** 2))\r\n\r\n def test_full_subsystem(self, mocker):\r\n \"\"\"Test applying a state vector to the full subsystem\"\"\"\r\n dev = DefaultQubitJax(wires=[\"a\", \"b\", \"c\"])\r\n state = jnp.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.0\r\n state_wires = qml.wires.Wires([\"a\", \"b\", \"c\"])\r\n\r\n spy = mocker.spy(dev, \"_scatter\")\r\n dev._apply_state_vector(state=state, device_wires=state_wires)\r\n\r\n assert jnp.all(dev._state.flatten() == state)\r\n spy.assert_not_called()\r\n\r\n def test_partial_subsystem(self, mocker):\r\n \"\"\"Test applying a state vector to a subset of wires of the full subsystem\"\"\"\r\n\r\n dev = DefaultQubitJax(wires=[\"a\", \"b\", \"c\"])\r\n state = jnp.array([1, 0, 1, 0]) / jnp.sqrt(2.0)\r\n state_wires = qml.wires.Wires([\"a\", \"c\"])\r\n\r\n spy = mocker.spy(dev, \"_scatter\")\r\n dev._apply_state_vector(state=state, device_wires=state_wires)\r\n res = jnp.sum(dev._state, axis=(1,)).flatten()\r\n\r\n assert jnp.all(res == state)\r\n spy.assert_called()\r\n",
"# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit tests for the :mod:`pennylane` :class:`QNode` class.\n\"\"\"\nimport contextlib\nimport io\nimport textwrap\n\nimport pytest\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane._device import Device\nfrom pennylane.qnodes.base import BaseQNode, QuantumFunctionError, decompose_queue\nfrom pennylane.variable import Variable\nfrom pennylane.wires import Wires, WireError\n\n\[email protected](scope=\"function\")\ndef mock_qnode(mock_device):\n \"\"\"Provides a circuit for the subsequent tests of the operation queue\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.RY(0.4, wires=[0])\n qml.RZ(-0.2, wires=[1])\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, mock_device)\n node._construct([1.0], {})\n return node\n\n\[email protected](scope=\"function\")\ndef operable_mock_device_2_wires(monkeypatch):\n \"\"\"A mock instance of the abstract Device class that can support Qubit qfuncs.\"\"\"\n\n dev = Device\n with monkeypatch.context() as m:\n m.setattr(dev, \"__abstractmethods__\", frozenset())\n m.setattr(dev, \"capabilities\", lambda cls: {\"model\": \"qubit\"})\n m.setattr(dev, \"operations\", [\"BasisState\", \"RX\", \"RY\", \"CNOT\", \"Rot\", \"PhaseShift\"])\n m.setattr(dev, \"observables\", [\"PauliX\", \"PauliY\", \"PauliZ\"])\n m.setattr(dev, \"reset\", lambda self: None)\n m.setattr(dev, \"apply\", lambda self, x, y, z: None)\n m.setattr(dev, \"expval\", lambda self, x, y, z: 1)\n yield Device(wires=2)\n\n\[email protected](scope=\"function\")\ndef operable_mock_device_2_wires_with_inverses(monkeypatch):\n \"\"\"A mock instance of the abstract Device class that can support Qubit qfuncs.\"\"\"\n\n dev = Device\n with monkeypatch.context() as m:\n m.setattr(dev, \"__abstractmethods__\", frozenset())\n m.setattr(dev, \"capabilities\", lambda cls: {\"model\": \"qubit\", \"supports_inverse_operations\": True})\n m.setattr(dev, \"operations\", [\"BasisState\", \"RX\", \"RY\", \"RZ\", \"CNOT\", \"PhaseShift\"])\n m.setattr(dev, \"observables\", [\"PauliX\", \"PauliY\", \"PauliZ\"])\n m.setattr(dev, \"reset\", lambda self: None)\n m.setattr(dev, \"apply\", lambda self, x, y, z: None)\n m.setattr(dev, \"expval\", lambda self, x, y, z: 1)\n yield Device(wires=2)\n\n\[email protected](scope=\"function\")\ndef operable_mock_CV_device_2_wires(monkeypatch):\n \"\"\"A mock instance of the abstract Device class that can support CV qfuncs.\"\"\"\n\n dev = Device\n with monkeypatch.context() as m:\n m.setattr(dev, \"__abstractmethods__\", frozenset())\n m.setattr(\n dev,\n \"operations\",\n [\"Displacement\", \"CubicPhase\", \"Squeezing\", \"Rotation\", \"Kerr\", \"Beamsplitter\"],\n )\n m.setattr(dev, \"observables\", [\"X\", \"NumberOperator\"])\n m.setattr(dev, \"reset\", lambda self: None)\n m.setattr(dev, \"apply\", lambda self, x, y, z: None)\n m.setattr(dev, \"expval\", lambda self, x, y, z: 1)\n yield Device(wires=2)\n\n\nclass TestQNodeOperationQueue:\n \"\"\"Tests that the QNode operation queue is properly filled and interacted with\"\"\"\n\n def test_operation_ordering(self, mock_qnode):\n \"\"\"Tests that the ordering of the operations is correct\"\"\"\n\n qnode = mock_qnode\n assert qnode.ops[0].name == \"RX\"\n assert qnode.ops[1].name == \"CNOT\"\n assert qnode.ops[2].name == \"RY\"\n assert qnode.ops[3].name == \"RZ\"\n assert qnode.ops[4].name == \"PauliX\"\n assert qnode.ops[5].name == \"PauliZ\"\n\n def test_op_descendants_operations_only(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts the successors that are operations\"\"\"\n\n qnode = mock_qnode\n operation_successors = qnode._op_descendants(qnode.ops[0], only=\"G\")\n assert qnode.ops[0] not in operation_successors\n assert qnode.ops[1] in operation_successors\n assert qnode.ops[4] not in operation_successors\n\n def test_op_descendants_observables_only(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts the successors that are observables\"\"\"\n\n qnode = mock_qnode\n observable_successors = qnode._op_descendants(qnode.ops[0], only=\"O\")\n assert qnode.ops[0] not in observable_successors\n assert qnode.ops[1] not in observable_successors\n assert qnode.ops[4] in observable_successors\n\n def test_op_descendants_both_operations_and_observables(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts all successors\"\"\"\n\n qnode = mock_qnode\n successors = qnode._op_descendants(qnode.ops[0], only=None)\n assert qnode.ops[0] not in successors\n assert qnode.ops[1] in successors\n assert qnode.ops[4] in successors\n\n def test_op_descendants_both_operations_and_observables_nodes(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts all successor nodes\"\"\"\n\n qnode = mock_qnode\n successors = qnode._op_descendants(qnode.ops[0], only=None)\n assert qnode.circuit.operations[0] not in successors\n assert qnode.circuit.operations[1] in successors\n assert qnode.circuit.operations[2] in successors\n assert qnode.circuit.operations[3] in successors\n assert qnode.circuit.observables[0] in successors\n\n def test_op_descendants_both_operations_and_observables_strict_ordering(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts all successors\"\"\"\n\n qnode = mock_qnode\n successors = qnode._op_descendants(qnode.ops[2], only=None)\n assert qnode.circuit.operations[0] not in successors\n assert qnode.circuit.operations[1] not in successors\n assert qnode.circuit.operations[2] not in successors\n assert qnode.circuit.operations[3] not in successors\n assert qnode.circuit.observables[0] in successors\n\n def test_op_descendants_extracts_all_successors(self, mock_qnode):\n \"\"\"Tests that _op_descendants properly extracts all successors\"\"\"\n\n qnode = mock_qnode\n successors = qnode._op_descendants(qnode.ops[2], only=None)\n assert qnode.ops[4] in successors\n assert qnode.ops[5] not in successors\n\n def test_print_applied(self, mock_device):\n \"\"\"Test that printing applied gates works correctly\"\"\"\n\n H = np.array([[0, 1], [1, 0]])\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.RY(0.4, wires=[0])\n qml.RZ(-0.2, wires=[1])\n return qml.expval(qml.PauliX(0)), qml.var(qml.Hermitian(H, wires=1))\n\n expected_qnode_print = textwrap.dedent(\n \"\"\"\\\n Operations\n ==========\n RX({x}, wires=[0])\n CNOT(wires=[0, 1])\n RY(0.4, wires=[0])\n RZ(-0.2, wires=[1])\n\n Observables\n ===========\n expval(PauliX(wires=[0]))\n var(Hermitian(array([[0, 1],\n [1, 0]]), wires=[1]))\"\"\"\n )\n\n node = BaseQNode(circuit, mock_device)\n\n # test before construction\n f = io.StringIO()\n\n with contextlib.redirect_stdout(f):\n node.print_applied()\n out = f.getvalue().strip()\n\n assert out == \"QNode has not yet been executed.\"\n\n # construct QNode\n f = io.StringIO()\n node._set_variables([0.1], {})\n node._construct([0.1], {})\n\n with contextlib.redirect_stdout(f):\n node.print_applied()\n out = f.getvalue().strip()\n\n assert out == expected_qnode_print.format(x=0.1)\n\n def test_print_applied_with_probs(self, mock_device):\n \"\"\"Test that printing applied gates works correctly when probs are returned\"\"\"\n\n H = np.array([[0, 1], [1, 0]])\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.SWAP(wires=[1, 0])\n qml.RZ(-0.2, wires=[1])\n return qml.probs(wires=[0]), qml.var(qml.Hermitian(H, wires=1))\n\n expected_qnode_print = textwrap.dedent(\n \"\"\"\\\n Operations\n ==========\n RX({x}, wires=[0])\n CNOT(wires=[0, 1])\n SWAP(wires=[1, 0])\n RZ(-0.2, wires=[1])\n\n Observables\n ===========\n probs(wires=[0])\n var(Hermitian(array([[0, 1],\n [1, 0]]), wires=[1]))\"\"\"\n )\n\n node = BaseQNode(circuit, mock_device)\n\n # test before construction\n f = io.StringIO()\n\n with contextlib.redirect_stdout(f):\n node.print_applied()\n out = f.getvalue().strip()\n\n assert out == \"QNode has not yet been executed.\"\n\n # construct QNode\n f = io.StringIO()\n node._set_variables([0.1], {})\n node._construct([0.1], {})\n\n with contextlib.redirect_stdout(f):\n node.print_applied()\n out = f.getvalue().strip()\n\n assert out == expected_qnode_print.format(x=0.1)\n\n def test_operation_appending(self, mock_device):\n \"\"\"Tests that operations are correctly appended.\"\"\"\n CNOT = qml.CNOT(wires=[0, 1])\n\n def circuit(x):\n qml.QueuingContext.append(CNOT)\n qml.RY(0.4, wires=[0])\n qml.RZ(-0.2, wires=[1])\n\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n qnode = BaseQNode(circuit, mock_device)\n qnode._construct([1.0], {})\n\n assert qnode.ops[0].name == \"CNOT\"\n assert qnode.ops[1].name == \"RY\"\n assert qnode.ops[2].name == \"RZ\"\n assert qnode.ops[3].name == \"PauliX\"\n\n def test_operation_removal(self, mock_device):\n \"\"\"Tests that operations are correctly removed.\"\"\"\n\n def circuit(x):\n RX = qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.RY(0.4, wires=[0])\n qml.RZ(-0.2, wires=[1])\n\n qml.QueuingContext.remove(RX)\n\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliZ(1))\n\n qnode = BaseQNode(circuit, mock_device)\n qnode._construct([1.0], {})\n\n assert qnode.ops[0].name == \"CNOT\"\n assert qnode.ops[1].name == \"RY\"\n assert qnode.ops[2].name == \"RZ\"\n assert qnode.ops[3].name == \"PauliX\"\n\n def test_prune_tensors(self, mock_device):\n \"\"\"Test that the _prune_tensors auxiliary method prunes correct for\n a single Identity in the Tensor.\"\"\"\n px = qml.PauliX(1)\n obs = qml.Identity(0) @ px\n\n def circuit(x):\n return qml.expval(obs)\n\n qnode = BaseQNode(circuit, mock_device)\n\n assert qnode._prune_tensors(obs) == px\n\n def test_prune_tensors_no_pruning_took_place(self, mock_device):\n \"\"\"Test that the _prune_tensors auxiliary method returns\n the original tensor if no observables were pruned.\"\"\"\n px = qml.PauliX(1)\n obs = px\n\n def circuit(x):\n return qml.expval(obs)\n\n qnode = BaseQNode(circuit, mock_device)\n\n assert qnode._prune_tensors(obs) == px\n\n def test_prune_tensors_construct(self, mock_device):\n \"\"\"Test that the tensors are pruned in construct.\"\"\"\n\n def circuit(x):\n return qml.expval(qml.PauliX(0) @ qml.Identity(1))\n\n qnode = BaseQNode(circuit, mock_device)\n qnode._construct([1.0], {})\n\n assert qnode.ops[0].name == \"PauliX\"\n assert len(qnode.ops[0].wires) == 1\n assert qnode.ops[0].wires[0] == Wires(0)\n\n\nclass TestQNodeExceptions:\n \"\"\"Tests that QNode raises proper errors\"\"\"\n\n def test_operations_after_observables(self, operable_mock_device_2_wires):\n \"\"\"Error: qfunc contains operations after observables.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"gates must precede measured\"):\n node(0.5)\n\n def test_return_of_non_observable(self, operable_mock_device_2_wires):\n \"\"\"Error: qfunc returns something besides observables.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"A quantum function must return either\"):\n node(0.5)\n\n def test_observable_with_no_measurement_type(self, operable_mock_device_2_wires):\n \"\"\"Error: observable lacks the measurement type.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), qml.PauliZ(wires=1)\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(\n QuantumFunctionError, match=\"does not have the measurement type specified\"\n ):\n node(0.5)\n\n def test_observable_not_returned(self, operable_mock_device_2_wires):\n \"\"\"Error: qfunc does not return all observables.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"All measured observables must be returned\"):\n node(0.5)\n\n def test_observable_order_violated(self, operable_mock_device_2_wires):\n \"\"\"Error: qfunc does not return all observables in the correct order.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n return qml.expval(qml.PauliZ(wires=0)), ex\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"All measured observables must be returned\"):\n node(0.5)\n\n def test_mixing_of_cv_and_qubit_operations(self, operable_mock_device_2_wires):\n \"\"\"Error: qubit and CV operations are mixed in the same qfunc.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.Displacement(0.5, 0, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(\n QuantumFunctionError, match=\"Continuous and discrete operations are not allowed\"\n ):\n node(0.5)\n\n def test_cv_operations_on_qubit_device(self, operable_mock_device_2_wires):\n \"\"\"Error: cannot use CV operations on a qubit device.\"\"\"\n\n def circuit(x):\n qml.Displacement(0.5, 0, wires=[0])\n return qml.expval(qml.X(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(\n QuantumFunctionError, match=\"a qubit device; CV operations are not allowed\"\n ):\n node(0.5)\n\n def test_qubit_operations_on_CV_device(self, operable_mock_device_2_wires, monkeypatch):\n \"\"\"Error: cannot use qubit operations on a CV device.\"\"\"\n monkeypatch.setattr(operable_mock_device_2_wires, \"capabilities\", lambda: {\"model\": \"cv\"})\n\n def circuit(x):\n qml.RX(0.5, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(\n QuantumFunctionError, match=\"a CV device; qubit operations are not allowed\"\n ):\n node(0.5)\n\n def test_multiple_measurements_on_same_wire(self, operable_mock_device_2_wires):\n \"\"\"Error: the same wire is measured multiple times.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"can only be measured once\"):\n node(0.5)\n\n def test_invisible_operations(self, operable_mock_device_2_wires):\n \"\"\"Error: an operation does not affect the measurements.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.RX(x, wires=[1]) # on its own component in the circuit graph\n return qml.expval(qml.PauliZ(0))\n\n kwargs = {\"vis_check\": True}\n node = BaseQNode(circuit, operable_mock_device_2_wires, **kwargs)\n with pytest.raises(QuantumFunctionError, match=\"cannot affect the circuit output\"):\n node(0.5)\n\n def test_operation_requiring_all_wires(self, operable_mock_device_2_wires):\n \"\"\"Error: an operation that must be applied to all wires is not\n applied to all wires.\"\"\"\n\n class DummyOp(qml.operation.Operation):\n \"\"\"Dummy operation\"\"\"\n\n num_wires = qml.operation.WiresEnum.AllWires\n num_params = 0\n par_domain = None\n\n def circuit():\n DummyOp(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"must act on all wires\"):\n node()\n\n def test_operation_on_nonexistant_wire(self, operable_mock_device_2_wires):\n \"\"\"Error: an operation is applied to a non-existant wire.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 2])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)\n\n def test_observable_on_nonexistant_wire(self, operable_mock_device_2_wires):\n \"\"\"Error: an observable is measured on a non-existant wire.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(QuantumFunctionError, match=\"applied to invalid wire\"):\n node(0.5)\n\n def test_bad_wire_argument(self, operable_mock_device_2_wires):\n \"\"\"Error: wire arguments must be intergers.\"\"\"\n\n def circuit(x):\n qml.RX(x, wires=[qml.PauliX])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(WireError, match=\"Wires must be represented by\"):\n node(1)\n\n def test_arg_as_wire_argument(self, operable_mock_device_2_wires):\n \"\"\"Error: trying to use a differentiable parameter as a wire argument.\"\"\"\n\n def circuit(x):\n qml.RX(0.5, wires=[x])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(2))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n with pytest.raises(WireError, match=\"Wires must be represented by\"):\n node(1)\n\n def test_kwarg_as_wire_argument(self, operable_mock_device_2_wires):\n \"\"\"Error: trying to use a keyword-only parameter as a wire argument in an immutable circuit.\"\"\"\n\n def circuit(*, x=None):\n qml.RX(0.5, wires=[x])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires, mutable=False)\n with pytest.raises(WireError, match=\"Wires must be represented by\"):\n node(x=1)\n\n @pytest.mark.xfail(\n reason=\"Tests the auxiliary-equals-keyword-only syntax\", raises=TypeError, strict=True\n )\n def test_simple_valid_call(self, operable_mock_device_2_wires):\n \"\"\"BaseQNode gives an error here, \"got multiple values for argument 'x'\"\n \"\"\"\n\n def circuit(x=0):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n node(0.3)\n assert node.ops[0].parameters[0] == 0.3\n\n @pytest.mark.xfail(\n reason=\"Tests the auxiliary-equals-keyword-only syntax\", raises=AssertionError, strict=True\n )\n def test_calling_no_kwargs(self, operable_mock_device_2_wires):\n \"\"\"Various quantum func calling syntax errors.\"\"\"\n\n def circuit(x, y=0.2, *args, m=0.3, n):\n circuit.in_args = (x, y, m, n)\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires, mutable=True)\n\n with pytest.raises(QuantumFunctionError, match=\"parameter 'x' given twice\"):\n node(0.1, x=1.1)\n with pytest.raises(QuantumFunctionError, match=\"Unknown quantum function parameter 'foo'\"):\n node(foo=1)\n with pytest.raises(\n QuantumFunctionError, match=\"'args' cannot be given using the keyword syntax\"\n ):\n node(args=1)\n with pytest.raises(QuantumFunctionError, match=\"positional parameter 'x' missing\"):\n node(n=0.4)\n with pytest.raises(QuantumFunctionError, match=\"keyword-only parameter 'n' missing\"):\n node(0.1)\n\n # valid calls\n node(x=0.1, n=0.4)\n assert circuit.in_args[2:] == (0.3, 0.4) # first two are Variables\n node(0.1, n=0.4)\n assert circuit.in_args[2:] == (0.3, 0.4)\n\n def test_unused_positional_parameter(self, operable_mock_device_2_wires):\n \"\"\"Error: a positional parameter is not used in the circuit.\"\"\"\n\n def circuit(a, x):\n qml.RX(a, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n kwargs = {\"par_check\": True}\n node = BaseQNode(circuit, operable_mock_device_2_wires, **kwargs)\n with pytest.raises(QuantumFunctionError, match=\"The positional parameters\"):\n node(1.0, 2.0)\n\n @pytest.mark.xfail(\n reason=\"Tests the auxiliary-equals-keyword-only syntax\", raises=AssertionError, strict=True\n )\n def test_calling_with_kwargs(self, operable_mock_device_2_wires):\n \"\"\"Various quantum func calling syntax errors.\"\"\"\n\n def circuit(x, y=0.2, *, m=0.3, n, **kwargs):\n circuit.in_args = (x, y, m, n)\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires, mutable=True)\n\n with pytest.raises(QuantumFunctionError, match=\"parameter 'x' given twice\"):\n node(0.1, x=1.1)\n with pytest.raises(\n QuantumFunctionError, match=\"'kwargs' cannot be given using the keyword syntax\"\n ):\n node(kwargs=1)\n with pytest.raises(QuantumFunctionError, match=\"takes 2 positional parameters, 3 given\"):\n node(0.1, 0.2, 100, n=0.4)\n with pytest.raises(QuantumFunctionError, match=\"positional parameter 'x' missing\"):\n node(n=0.4)\n with pytest.raises(QuantumFunctionError, match=\"keyword-only parameter 'n' missing\"):\n node(0.1)\n\n # valid calls\n node(x=0.1, n=0.4)\n assert circuit.in_args[2:] == (0.3, 0.4) # first two are Variables\n node(0.1, n=0.4)\n assert circuit.in_args[2:] == (0.3, 0.4)\n\n def test_calling_bad_errors(self, operable_mock_device_2_wires):\n \"\"\"Confusing quantum func calling errors and bugs (auxiliary-equals-parameters-with-default syntax).\"\"\"\n\n def circuit(x=0.1):\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(TypeError, match=\"got multiple values for argument 'x'\"):\n node(0.3) # default arg given positionally, wrong error message\n\n def test_calling_errors(self, operable_mock_device_2_wires):\n \"\"\"Good quantum func calling syntax errors (auxiliary-equals-parameters-with-default syntax).\"\"\"\n\n def circuit(x, y=0.2, *args, z=0.3):\n circuit.in_args = (x, y, z)\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, operable_mock_device_2_wires, mutable=True)\n\n with pytest.raises(\n QuantumFunctionError, match=\"'x' cannot be given using the keyword syntax\"\n ):\n node(0.1, x=1.1)\n with pytest.raises(QuantumFunctionError, match=\"Unknown quantum function parameter 'foo'\"):\n node(foo=1)\n with pytest.raises(\n QuantumFunctionError, match=\"'args' cannot be given using the keyword syntax\"\n ):\n node(args=1)\n with pytest.raises(TypeError, match=\"missing 1 required positional argument: 'x'\"):\n node(z=0.4)\n\n # valid calls\n node(0.1)\n assert circuit.in_args[1:] == (0.2, 0.3) # first is a Variable\n node(0.1, y=1.2)\n assert circuit.in_args[1:] == (1.2, 0.3)\n node(0.1, z=1.3, y=1.2)\n assert circuit.in_args[1:] == (1.2, 1.3)\n\n\nclass TestQNodeArgs:\n \"\"\"Tests the handling of calling arguments in the QNode\"\"\"\n\n @pytest.mark.parametrize(\n \"x,y\",\n zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),\n )\n def test_fanout(self, qubit_device_1_wire, tol, x, y):\n \"\"\"Tests that qnodes can compute the correct function when the\n same parameter is used in multiple gates.\"\"\"\n\n def circuit(x, y):\n qml.RX(x, wires=[0])\n qml.RZ(y, wires=[0])\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n def analytic_expval(x, y):\n return np.cos(x) ** 2 - np.cos(y) * np.sin(x) ** 2\n\n node = BaseQNode(circuit, qubit_device_1_wire)\n res = node(x, y)\n assert res == pytest.approx(analytic_expval(x, y), abs=tol)\n\n def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes return multiple expectation values.\"\"\"\n\n a, b, c = 0.5, 0.54, 0.3\n\n def circuit(x, y, z):\n qml.RX(x, wires=[0])\n qml.RZ(y, wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.RY(y, wires=[0])\n qml.RX(z, wires=[0])\n return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1))\n\n def analytic_expval(a, b, c):\n return [-1 * np.cos(a) * np.cos(b) * np.sin(c), np.cos(a)]\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n res = node(a, b, c)\n assert res == pytest.approx(analytic_expval(a, b, c), abs=tol)\n\n def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes can use multiple keyword-only arguments.\"\"\"\n\n def circuit(w, *, x=None, y=None):\n qml.RX(x, wires=[0])\n qml.RX(y, wires=[1])\n qml.RZ(w, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n c = node(1.0, x=np.pi, y=np.pi / 2)\n assert c == pytest.approx([-1.0, 0.0], abs=tol)\n\n def test_arraylike_args_used(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes use array-like positional arguments.\"\"\"\n\n def circuit(x):\n qml.RX(x[0], wires=[0])\n qml.RX(x[1], wires=[1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n c = node([np.pi, np.pi])\n assert c == pytest.approx([-1.0, -1.0], abs=tol)\n\n def test_arraylike_keywordargs_used(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes use array-like keyword-only arguments.\"\"\"\n\n def circuit(w, *, x=None):\n qml.RX(x[0], wires=[0])\n qml.RX(x[1], wires=[1])\n qml.RZ(w, wires=[0])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n c = node(1.0, x=[np.pi, np.pi / 2])\n assert c == pytest.approx([-1.0, 0.0], abs=tol)\n\n def test_keywordargs_for_wires(self, qubit_device_2_wires, tol):\n \"\"\"Tests that wires can be passed as keyword-only arguments in mutable circuits.\"\"\"\n\n default_q = 0\n\n def circuit(x, *, q=default_q):\n qml.RX(x, wires=[q])\n return qml.expval(qml.PauliZ(q))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n c = node(np.pi, q=1)\n assert node.ops[0].wires == Wires([1])\n assert c == pytest.approx(-1.0, abs=tol)\n\n c = node(np.pi)\n assert node.ops[0].wires == Wires([default_q])\n assert c == pytest.approx(-1.0, abs=tol)\n\n def test_keywordargs_used(self, qubit_device_1_wire, tol):\n \"\"\"Tests that qnodes use keyword arguments.\"\"\"\n\n def circuit(w, x=None):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, qubit_device_1_wire)\n c = node(1.0, x=np.pi)\n assert c == pytest.approx(-1.0, abs=tol)\n\n def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes update keyword arguments in consecutive calls.\"\"\"\n\n def circuit(w, x=None):\n qml.RX(w, wires=[0])\n qml.RX(x, wires=[1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n c1 = node(0.1, x=0.0)\n c2 = node(0.1, x=np.pi)\n assert c1[1] != c2[1]\n\n def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol):\n \"\"\"Tests that qnodes' keyword arguments pass through classical nodes.\"\"\"\n\n def circuit(w, x=None):\n qml.RX(w, wires=[0])\n qml.RX(x, wires=[1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n node = BaseQNode(circuit, qubit_device_2_wires)\n\n def classical_node(w, x=None):\n return node(w, x=x)\n\n c = classical_node(0.0, x=np.pi)\n assert c == pytest.approx([1.0, -1.0], abs=tol)\n\n def test_keywordargs_with_kwargs(self, qubit_device_1_wire, tol):\n \"\"\"Tests that nothing happens if unknown keyword arg passed with\n qnodes accepting **kwargs.\"\"\"\n\n def circuit(w, x=None, **kwargs):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(circuit, qubit_device_1_wire)\n c = node(1.0, x=np.pi, y=10)\n assert c == pytest.approx(-1.0, abs=tol)\n\n def test_complex_positional_argument_qubitunitary(self, tol):\n \"\"\"Tests that matrices containing complex positional arguments can be\n passed to the QubitUnitary operation.\"\"\"\n\n dev = qml.device('default.qubit', wires=1)\n\n @qml.qnode(dev)\n def circuit(phi, matrix):\n qml.RZ(phi, wires=0)\n qml.PauliY(0)\n qml.QubitUnitary(matrix, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n matrix = np.array([[1, 0], [0, 0.70710678 + 0.70710678*1.j]])\n arg = 0\n res = circuit(arg, matrix)\n assert np.isclose(res, -1, atol=tol)\n\n\nclass TestQNodeCaching:\n \"\"\"Tests for the QNode construction caching\"\"\"\n\n def test_no_caching(self):\n \"\"\"Test that mutable circuit structure changes on subsequent evalutions.\"\"\"\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n def mutable_circuit(x, *, c=None):\n qml.RX(x, wires=0)\n for i in range(c):\n qml.RX(x, wires=i)\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(mutable_circuit, dev, mutable=True)\n\n # first evaluation\n node(0, c=0)\n assert len(node.circuit.operations) == 1\n temp = node.ops[0]\n\n # second evaluation\n node(0, c=1)\n assert len(node.circuit.operations) == 2\n node.ops[0] is not temp # all Operations in the circuit are generated anew\n\n def test_caching(self):\n \"\"\"Test that non-mutable circuit structure does not change on subsequent evalutions.\"\"\"\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n def non_mutable_circuit(x, *, c=None):\n qml.RX(x, wires=0)\n qml.RX(c, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n node = BaseQNode(non_mutable_circuit, dev, mutable=False)\n\n # first evaluation\n node(0, c=0)\n assert len(node.circuit.operations) == 2\n temp = node.ops[0]\n\n # second evaluation\n node(0, c=1)\n assert len(node.circuit.operations) == 2\n node.ops[0] is temp # it's the same circuit with the same objects\n\n THETA = np.linspace(0.11, 1, 3)\n PHI = np.linspace(0.32, 1, 3)\n VARPHI = np.linspace(0.02, 1, 3)\n\n @pytest.mark.parametrize(\"theta,phi,varphi\", list(zip(THETA, PHI, VARPHI)))\n def test_mutable_qnode(self, theta, phi, varphi, tol):\n \"\"\"Test that a mutable QNode evaluated multiple times mutates well and produces\n the desired result.\n \"\"\"\n dev = qml.device(\"default.qubit\", wires=1)\n\n @qml.qnode(dev)\n def circuit(weights, n_layers=1):\n for idx in range(n_layers):\n qml.RX(weights[idx], wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n res = circuit([phi], n_layers=1)\n exp = np.cos(phi)\n assert np.allclose(res, exp, atol=tol, rtol=0)\n\n res = circuit([phi, theta], n_layers=2)\n exp = np.cos(phi + theta)\n assert np.allclose(res, exp, atol=tol, rtol=0)\n\n res = circuit([phi, theta, varphi], n_layers=3)\n exp = np.cos(phi + theta + varphi)\n assert np.allclose(res, exp, atol=tol, rtol=0)\n\n def test_mutable_qnode_for_loop_varying_executions(self, tol):\n \"\"\"Test that a mutable QNode containing a for loop correctly mutates\n when called with different auxiliary arguments and different shaped positional\n arguments.\n \"\"\"\n dev = qml.device(\"default.qubit\", wires=1)\n\n @qml.qnode(dev)\n def node(x, n=1):\n for k in range(2):\n for j in range(min(n, k + 1)):\n qml.RX(x[k][j], wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n res = node([[0.1], [0.2]], n=1)\n exp = np.cos(sum([0.1] + [0.2]))\n assert np.allclose(res, exp, atol=tol, rtol=0)\n\n res = node([[0.1], [0.2, 0.3]], n=2)\n exp = np.cos(sum([0.1] + [0.2, 0.3]))\n assert np.allclose(res, exp, atol=tol, rtol=0)\n\n\nclass TestQNodeEvaluate:\n \"\"\"Test for observable statistic evaluation\"\"\"\n\n @pytest.mark.parametrize(\n \"x,y\",\n zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),\n )\n def test_evaluate(self, x, y, tol):\n \"\"\"Tests correct evaluation\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n def circuit(x, y):\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n node = BaseQNode(circuit, dev)\n res = node.evaluate([x, y], {})\n expected = np.sin(y) * np.cos(x)\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\n \"x,y\",\n zip(np.linspace(-2 * np.pi, 2 * np.pi, 7), np.linspace(-2 * np.pi, 2 * np.pi, 7) ** 2 / 11),\n )\n def test_obs_evaluate(self, x, y, tol):\n \"\"\"Tests correct evaluation swapping out the observables\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n def circuit(x, y):\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n node = BaseQNode(circuit, dev)\n\n # test standard evaluation\n node = BaseQNode(circuit, dev)\n res = node.evaluate([x, y], {})\n expected = np.sin(y) * np.cos(x)\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n # hot-swap the observable\n res = node.evaluate_obs([qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))], [x, y], {})\n expected = np.cos(y)\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n def test_single_mode_sample(self):\n \"\"\"Test that there is only one array of values returned\n for single mode samples\"\"\"\n shots = 10\n dev = qml.device(\"default.qubit\", wires=2, shots=shots)\n\n def circuit(x, y):\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.sample(qml.PauliZ(0) @ qml.PauliX(1))\n\n node = BaseQNode(circuit, dev)\n res = node(0.432, 0.12)\n assert res.shape == (10,)\n\n\nclass TestDecomposition:\n \"\"\"Test for queue decomposition\"\"\"\n\n def test_no_decomposition(self, operable_mock_device_2_wires):\n \"\"\"Test that decompose queue makes no changes\n if there are no operations to be decomposed\"\"\"\n\n queue = [qml.Rot(0, 1, 2, wires=0), qml.CNOT(wires=[0, 1]), qml.RX(6, wires=0)]\n\n res = decompose_queue(queue, operable_mock_device_2_wires)\n assert res == queue\n\n def test_decompose_queue(self, operable_mock_device_2_wires):\n \"\"\"Test that decompose queue works correctly\n when an operation exists that can be decomposed\"\"\"\n\n queue = [qml.Rot(0, 1, 2, wires=0), qml.U3(3, 4, 5, wires=0), qml.RX(6, wires=0)]\n\n res = decompose_queue(queue, operable_mock_device_2_wires)\n\n assert len(res) == 5\n\n assert res[0].name == \"Rot\"\n assert res[0].parameters == [0, 1, 2]\n\n assert res[1].name == \"Rot\"\n assert res[1].parameters == [5, 3, -5]\n\n assert res[2].name == \"PhaseShift\"\n assert res[2].parameters == [5]\n\n assert res[3].name == \"PhaseShift\"\n assert res[3].parameters == [4]\n\n assert res[4].name == \"RX\"\n assert res[4].parameters == [6]\n\n def test_decompose_queue_recursive(self, operable_mock_device_2_wires_with_inverses):\n \"\"\"Test that decompose queue works correctly\n when an operation exists that can be decomposed\"\"\"\n\n queue = [qml.CRY(1, wires=[0, 1]), qml.U3(3, 4, 5, wires=0)]\n\n res = decompose_queue(queue, operable_mock_device_2_wires_with_inverses)\n\n assert len(res) == 9\n\n assert res[0].name == \"RY\"\n assert res[0].parameters == [0.5]\n\n assert res[1].name == \"CNOT\"\n\n assert res[2].name == \"RY\"\n assert res[2].parameters == [-0.5]\n\n assert res[3].name == \"CNOT\"\n\n assert res[4].name == \"RZ\"\n assert res[4].parameters == [5]\n\n assert res[5].name == \"RY\"\n assert res[5].parameters == [3]\n\n assert res[6].name == \"RZ\"\n assert res[6].parameters == [-5]\n\n assert res[7].name == \"PhaseShift\"\n assert res[7].parameters == [5]\n\n assert res[8].name == \"PhaseShift\"\n assert res[8].parameters == [4]\n\n def test_decompose_queue_inv(self, operable_mock_device_2_wires_with_inverses):\n \"\"\"Test that decompose queue works correctly\n when an operation exists that can be decomposed\"\"\"\n\n queue = [\n qml.Rot(0, 1, 2, wires=0).inv(),\n qml.U3(3, 4, 5, wires=0).inv(),\n qml.RX(6, wires=0).inv(),\n ]\n\n res = decompose_queue(queue, operable_mock_device_2_wires_with_inverses)\n\n\n assert len(res) == 9\n\n assert res[0].name == \"RZ.inv\"\n assert res[0].parameters == [2]\n\n assert res[1].name == \"RY.inv\"\n assert res[1].parameters == [1]\n\n assert res[2].name == \"RZ.inv\"\n assert res[2].parameters == [0]\n\n assert res[3].name == \"PhaseShift.inv\"\n assert res[3].parameters == [4]\n\n assert res[4].name == \"PhaseShift.inv\"\n assert res[4].parameters == [5]\n\n assert res[5].name == \"RZ.inv\"\n assert res[5].parameters == [-5]\n\n assert res[6].name == \"RY.inv\"\n assert res[6].parameters == [3]\n\n assert res[7].name == \"RZ.inv\"\n assert res[7].parameters == [5]\n\n assert res[8].name == \"RX.inv\"\n assert res[8].parameters == [6]\n\n def test_invalid_decompose(self, operable_mock_device_2_wires):\n \"\"\"Test that an error is raised if the device\n does not support an operation arising from a\n decomposition.\"\"\"\n\n class DummyOp(qml.operation.Operation):\n \"\"\"Dummy operation\"\"\"\n\n num_params = 0\n num_wires = 1\n par_domain = \"R\"\n grad_method = \"A\"\n\n @staticmethod\n def decomposition(wires=None):\n phi = 0.3\n ops = [qml.RZ(phi, wires=wires)]\n return ops\n\n queue = [qml.Rot(0, 1, 2, wires=0), DummyOp(wires=0), qml.RX(6, wires=0)]\n\n with pytest.raises(qml.DeviceError, match=\"DummyOp not supported on device\"):\n decompose_queue(queue, operable_mock_device_2_wires)\n\n\nclass TestQNodeVariableMap:\n \"\"\"Test the conversion of arguments to Variable instances.\"\"\"\n\n def test_regular_arguments(self, mock_device):\n \"\"\"Test that regular arguments are properly converted to Variable instances.\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n arg_vars, kwarg_vars = node._make_variables([1.0, 2.0, 3.0, 4.0], {})\n\n expected_arg_vars = [\n Variable(0, \"a\"),\n Variable(1, \"b\"),\n Variable(2, \"c\"),\n Variable(3, \"d\"),\n ]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n assert not kwarg_vars\n\n def test_array_arguments(self, mock_device):\n \"\"\"Test that array arguments are properly converted to Variable instances.\"\"\"\n\n def circuit(weights):\n qml.RX(weights[0, 0], wires=[0])\n qml.RY(weights[0, 1], wires=[0])\n qml.RZ(weights[1, 0], wires=[0])\n qml.RZ(weights[1, 1], wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n\n weights = np.array([[1, 2], [3, 4]])\n arg_vars, kwarg_vars = node._make_variables([weights], {})\n\n expected_arg_vars = [\n Variable(0, \"weights[0,0]\"),\n Variable(1, \"weights[0,1]\"),\n Variable(2, \"weights[1,0]\"),\n Variable(3, \"weights[1,1]\"),\n ]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n assert not kwarg_vars\n\n def test_regular_keyword_arguments(self, mock_device):\n \"\"\"Test that regular keyword arguments are properly converted to Variable instances.\"\"\"\n\n def circuit(*, a=1, b=2, c=3, d=4):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n arg_vars, kwarg_vars = node._make_variables([], {\"b\": 3})\n\n expected_kwarg_vars = {\n \"a\": [Variable(0, \"a\", is_kwarg=True)],\n \"b\": [Variable(0, \"b\", is_kwarg=True)],\n \"c\": [Variable(0, \"c\", is_kwarg=True)],\n \"d\": [Variable(0, \"d\", is_kwarg=True)],\n }\n\n assert not arg_vars\n\n for expected_key in expected_kwarg_vars:\n for var, expected in zip(\n qml.utils._flatten(kwarg_vars[expected_key]),\n qml.utils._flatten(expected_kwarg_vars[expected_key]),\n ):\n assert var == expected\n\n def test_array_keyword_arguments(self, mock_device):\n \"\"\"Test that array keyword arguments are properly converted to Variable instances.\"\"\"\n\n def circuit(*, a=np.array([[1, 0], [0, 1]]), b=np.array([1, 2, 3])):\n qml.RX(a[0, 0], wires=[0])\n qml.RX(a[0, 1], wires=[0])\n qml.RX(a[1, 0], wires=[0])\n qml.RX(a[1, 1], wires=[0])\n qml.RY(b[0], wires=[0])\n qml.RY(b[1], wires=[0])\n qml.RY(b[2], wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n arg_vars, kwarg_vars = node._make_variables([], {\"b\": np.array([6, 7, 8, 9])})\n\n expected_kwarg_vars = {\n \"a\": [\n Variable(0, \"a[0,0]\", is_kwarg=True),\n Variable(1, \"a[0,1]\", is_kwarg=True),\n Variable(2, \"a[1,0]\", is_kwarg=True),\n Variable(3, \"a[1,1]\", is_kwarg=True),\n ],\n \"b\": [\n Variable(0, \"b[0]\", is_kwarg=True),\n Variable(1, \"b[1]\", is_kwarg=True),\n Variable(2, \"b[2]\", is_kwarg=True),\n Variable(3, \"b[3]\", is_kwarg=True),\n ],\n }\n\n assert not arg_vars\n\n for expected_key in expected_kwarg_vars:\n for var, expected in zip(\n qml.utils._flatten(kwarg_vars[expected_key]),\n qml.utils._flatten(expected_kwarg_vars[expected_key]),\n ):\n assert var == expected\n\n def test_variadic_arguments(self, mock_device):\n \"\"\"Test that variadic arguments are properly converted to Variable instances.\"\"\"\n\n def circuit(a, *b):\n qml.RX(a, wires=[0])\n qml.RX(b[0], wires=[0])\n qml.RX(b[1][1], wires=[0])\n qml.RX(b[2], wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n arg_vars, kwarg_vars = node._make_variables([0.1, 0.2, np.array([0, 1, 2, 3]), 0.5], {})\n\n expected_arg_vars = [\n Variable(0, \"a\"),\n Variable(1, \"b[0]\"),\n Variable(2, \"b[1][0]\"),\n Variable(3, \"b[1][1]\"),\n Variable(4, \"b[1][2]\"),\n Variable(5, \"b[1][3]\"),\n Variable(6, \"b[2]\"),\n ]\n\n assert not kwarg_vars\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n def test_non_trainable_args(self, mock_device):\n \"\"\"Test that non trainable args are not converted to Variables\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n node.set_trainable_args({0, 3})\n var_values = [1.0, 2.0, 3.0, 4.0]\n arg_vars, kwarg_vars = node._make_variables(var_values, {})\n\n expected_arg_vars = [\n Variable(0, \"a\"),\n var_values[1],\n var_values[2],\n Variable(3, \"d\"),\n ]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n assert not kwarg_vars\n\n def test_numpy_scalars(self, mock_device):\n \"\"\"Test that non-differentiable NumPy scalars are correctly cast to Python numeric literals\n during Variable creation.\"\"\"\n\n def circuit(a, b):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n node.set_trainable_args({0})\n var_values = [np.array(1.0), np.array(2.0)]\n arg_vars, kwarg_vars = node._make_variables(var_values, {})\n\n expected_arg_vars = [\n Variable(0, \"a[]\"),\n var_values[1].item(),\n ]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n assert not kwarg_vars\n\n\nclass TestQNodeDraw:\n \"\"\"Test functionality related to draw.\"\"\"\n\n def test_unknown_charset_error(self, mock_qnode):\n \"\"\"Test that an error is raised for an unsupported charset.\"\"\"\n with pytest.raises(ValueError, match=\"Charset does_not_exist is not supported\"):\n mock_qnode.draw(charset=\"does_not_exist\")\n\n def test_draw_before_construction_error(self):\n \"\"\"Test that an error is raised when drawing a QNode that is not yet constructed is attempted.\"\"\"\n dev = qml.device(\"default.qubit\", wires=1)\n\n @qml.qnode(dev)\n def circuit(a):\n qml.RX(a, wires=[0])\n\n return qml.expval(qml.PauliZ(0))\n\n with pytest.raises(\n RuntimeError,\n match=\"The QNode can only be drawn after its CircuitGraph has been constructed\",\n ):\n circuit.draw()\n\n\nclass TestTrainableArgs:\n \"\"\"Test functionality related to trainable argument setting and validation\"\"\"\n\n def test_all_trainable(self, mock_device):\n \"\"\"Test that setting trainable_args to None treats all\n arguments as differentiable\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n node.set_trainable_args(None)\n var_values = [1.0, 2.0, 3.0, 4.0]\n arg_vars, kwarg_vars = node._make_variables(var_values, {})\n\n expected_arg_vars = [\n Variable(0, \"a\"),\n Variable(1, \"b\"),\n Variable(2, \"c\"),\n Variable(3, \"d\"),\n ]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n def test_none_trainable(self, mock_device):\n \"\"\"Test that an empty set results in no trainable arguments\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n node.set_trainable_args(set())\n var_values = [1.0, 2.0, 3.0, 4.0]\n arg_vars, kwarg_vars = node._make_variables(var_values, {})\n\n expected_arg_vars = [1.0, 2.0, 3.0, 4.0]\n\n for var, expected in zip(qml.utils._flatten(arg_vars), expected_arg_vars):\n assert var == expected\n\n def test_invalid_index_type(self, mock_device):\n \"\"\"Test floats and/or negative integers passed raise an exception\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n\n with pytest.raises(ValueError, match=\"Argument indices must be positive integers\"):\n node.set_trainable_args({-1, 2})\n\n with pytest.raises(ValueError, match=\"Argument indices must be positive integers\"):\n node.set_trainable_args({0.5})\n\n def test_invalid_index_value(self, mock_device):\n \"\"\"Test that an exception is raised if a specified trainable argument doesn't exist\"\"\"\n\n def circuit(a, b, c, d):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n\n with pytest.raises(ValueError, match=r\"not available\\. QNode has at most 4 arguments\"):\n node.set_trainable_args({0, 1, 5})\n\n # QNodes with variable positional arguments turn this check off\n\n def circuit(a, b, c, d, *args):\n qml.RX(a, wires=[0])\n qml.RY(b, wires=[0])\n qml.RZ(c, wires=[0])\n qml.RZ(d, wires=[0])\n\n return qml.expval(qml.PauliX(0))\n\n node = BaseQNode(circuit, mock_device)\n\n assert node.func.var_pos\n assert node.func.n_pos == 4\n\n # The following will no longer raise an exception,\n # since we do not know in advance how many arguments\n # the user will evaluate the QNode with.\n node.set_trainable_args({0, 1, 6})\n assert node.get_trainable_args() == {0, 1, 6}\n\n\ndef test_old_qnode_in_tape_mode():\n \"\"\"Test that the old QNode can still be evaluated when running in tape mode\"\"\"\n\n # tape mode should not be active so that we can use the old QNode\n assert not qml.tape_mode_active()\n\n try:\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def f(x):\n qml.RX(x, wires=0)\n qml.RY(0.4, wires=1)\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0))\n\n qml.enable_tape()\n res = f(0.4)\n exp = 0.9210609940028851\n\n assert np.allclose(res, exp)\n\n # check that tape mode is turned on again after evaluating the old QNode\n assert qml.tape_mode_active()\n\n finally: # always make sure we turn off tape mode to prevent disrupting the other tests\n qml.disable_tape()\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.all",
"numpy.array"
],
[
"numpy.allclose",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RotemBadash/IML.HUJI | [
"2b20d074c159123f61b321a7e84312ab82400949"
] | [
"IMLearn/learners/regressors/polynomial_fitting.py"
] | [
"from __future__ import annotations\nfrom typing import NoReturn\nfrom . import LinearRegression\nfrom ...base import BaseEstimator\nimport numpy as np\n\n\nclass PolynomialFitting(BaseEstimator):\n \"\"\"\n Polynomial Fitting using Least Squares estimation\n \"\"\"\n def __init__(self, k: int) -> PolynomialFitting:\n \"\"\"\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n \"\"\"\n super().__init__()\n self.degree = k\n self.linear_regression_model = LinearRegression(\n include_intercept=False)\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n x = self.__transform(X)\n self.linear_regression_model.fit(x, y)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n x = self.__transform(X)\n return self.linear_regression_model.predict(x)\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n x = self.__transform(X)\n return self.linear_regression_model.loss(x, y)\n\n def __transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform given input according to the univariate polynomial\n transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n \"\"\"\n return np.vander(X, N=self.degree+1, increasing=True)"
] | [
[
"numpy.vander"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LvJC/cpp-libtorch | [
"4a56dda616bde50423591e7a4d4d7be6a978f6bf"
] | [
"MyModule.py"
] | [
"import torch\nimport torchvision\n\n# An instance of your model.\nmodel = torchvision.models.resnet18()\n\n# An example input you would normally provide to your model's forward() method.\nexample = torch.rand(1, 3, 224, 224)\n\n# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\ntraced_script_module = torch.jit.trace(model, example)\n\n# save\ntraced_script_module.save(\"model.pt\")\n"
] | [
[
"torch.jit.trace",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
demarley/leopard | [
"52c5eb2dd732798972d429887c273f8449039c8f"
] | [
"python/deepLearningTorch.py"
] | [
"\"\"\"\nCreated: 16 August 2018\nLast Updated: 16 August 2018\n\nDan Marley\[email protected]\nTexas A&M University\n-----\n\nClass for performing deep learning in pytorch\n\nDesigned for running on desktop at TAMU\nwith specific set of software installed\n--> not guaranteed to work in CMSSW environment!\n\nDoes not use ROOT directly.\nInstead, this is setup to use flat ntuples\nthat are accessed via uproot.\n\n> UPROOT: https://github.com/scikit-hep/uproot\n> KERAS: https://keras.io/\n> TENSORFLOW: https://www.tensorflow.org/\n> PYTORCH: http://pytorch.org/\n> LWTNN: https://github.com/lwtnn/lwtnn\n\"\"\"\nimport json\nimport util\nimport datetime\nimport collections\n\nfrom deepLearning import DeepLearning\n\nimport uproot\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as tf\nfrom torch.autograd import Variable\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_curve\n\n\nclass LeopardNet(nn.Module):\n \"\"\"Neural Network for Leopard in PyTorch\n Adapted from (16 August 2018)\n https://github.com/thongonary/surf18-tutorial/blob/master/tuto-8-torch.ipynb\n \"\"\"\n def __init__(self,layers):\n super(LeopardNet,self).__init__()\n self.dense = nn.ModuleList()\n for l,layer in enumerate(layers):\n self.dense.append( nn.Linear(layer['in'],layer['out']) )\n \n def forward(self, x): \n \"\"\"All the computation steps of the input are defined in this function\"\"\"\n nlayers = len(self.dense)\n for i,d in enumerate(self.dense):\n x = d(x)\n x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x)\n return x\n\n\n\nclass DeepLearningTorch(DeepLearning):\n \"\"\"Deep Learning pytorch class\"\"\"\n def __init__(self):\n DeepLearning.__init__(self)\n\n ## PyTorch objects\n self.loss_fn = None # pytorch loss function\n self.torch_opt = None # pytorch optimizer\n\n def initialize(self): #,config):\n \"\"\"Initialize a few parameters after they've been set by user\"\"\"\n DeepLearning.initialize(self)\n return\n\n\n ## Specific functions to perform training/inference tasks\n def build_model(self):\n \"\"\"Construct the NN model -- only Keras support for now\"\"\"\n self.msg_svc.INFO(\"DLPYTORCH : Build the neural network model\")\n\n ## Declare the model\n layers = []\n layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} )\n for i,n in enumerate(self.nNodes):\n if i==len(self.nNodes)-1: continue\n layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} )\n layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} )\n\n self.model = LeopardNet(layers)\n self.model.cuda()\n\n self.loss_fn = torch.nn.BCELoss()\n self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) #1e-4)\n\n return\n\n\n def train_epoch(self,X,Y):\n \"\"\"\"\"\"\n losses = []\n for beg_i in range(0, len(X), self.batch_size):\n x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:])\n y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size])\n x_batch = Variable(x_batch).cuda()\n y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() # modify dimensions (X,) -> (X,1)\n\n self.torch_opt.zero_grad()\n\n y_hat = self.model(x_batch) # forward\n loss = self.loss_fn(y_hat, y_batch) # compute loss\n loss.backward() # compute gradients\n self.torch_opt.step() # update weights\n\n losses.append(loss.data.cpu().numpy())\n\n return losses\n\n\n\n def train_model(self):\n \"\"\"Setup for training the model using k-fold cross-validation\"\"\"\n X = self.df[self.features].values\n Y = self.df['target'].values\n\n kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)\n nsplits = kfold.get_n_splits(X,Y)\n cvpredictions = [] # compare outputs from each cross-validation\n\n self.msg_svc.INFO(\"DLPYTORCH : Fitting K-Fold cross validations\")\n for ind,(train,test) in enumerate(kfold.split(X,Y)):\n self.msg_svc.INFO(\"DLPYTORCH : - Fitting K-Fold {0}\".format(ind))\n\n Y_train = Y[train]\n Y_test = Y[test]\n\n # -- store test/train data from each k-fold as histograms (to compare later)\n h_tests = {}\n h_trains = {}\n for n,v in self.targets.iteritems():\n h_tests[n] = ROOT.TH1D(\"test_\"+n,\"test_\"+n,10,0,10)\n h_trains[n] = ROOT.TH1D(\"train_\"+n,\"train_\"+n,10,0,10)\n\n # fill histogram for each target\n for (n,v) in enumerate(self.targets.iteritems()):\n [h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]]\n [h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]]\n\n\n ## Fit the model to training data & save the history\n self.model.train()\n e_losses = []\n for t in range(self.epochs):\n e_losses += self.train_epoch(X[train],Y_train)\n self.msg_svc.INFO(\"DLPYTORCH : Epoch {0} -- Loss {1}\".format(t,e_losses[-1]))\n self.histories.append(e_losses)\n\n # evaluate the model\n self.msg_svc.DEBUG(\"DLPYTORCH : Evaluate the model: \")\n self.model.eval()\n\n # Evaluate training sample\n self.msg_svc.INFO(\"DLPYTORCH : Predictions from training sample\")\n train_predictions = self.predict(X[train])\n self.train_predictions.append(train_predictions)\n\n # Evaluate test sample\n self.msg_svc.INFO(\"DLPYTORCH : Predictions from testing sample\")\n test_predictions = self.predict(X[test])\n self.test_predictions.append(test_predictions)\n\n # Make ROC curve from test sample\n self.msg_svc.INFO(\"DLPYTORCH : Make ROC curves\")\n fpr,tpr,_ = roc_curve(Y[test], test_predictions)\n self.fpr.append(fpr)\n self.tpr.append(tpr)\n\n # Plot the predictions to compare test/train\n self.msg_svc.INFO(\"DLPYTORCH : Plot the train/test predictions\")\n self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets\n\n self.msg_svc.INFO(\"DLPYTORCH : Finished K-Fold cross-validation: \")\n self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}\n self.msg_svc.INFO(\"DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)\".format(np.mean(cvpredictions), np.std(cvpredictions)))\n\n return\n\n\n def predict(self,data=None):\n \"\"\"Return the prediction from a test sample\"\"\"\n self.msg_svc.DEBUG(\"DLPYTORCH : Get the DNN prediction\")\n if data is None:\n self.msg_svc.ERROR(\"DLPYTORCH : predict() given NoneType data. Returning -999.\")\n return -999.\n data = torch.from_numpy(data)\n\n return self.model( Variable(data,volatile=True).cuda() )\n\n def load_model(self,from_lwtnn=False):\n \"\"\"Load existing model to make plots or predictions\"\"\"\n output = self.output_dir+'/'+self.model_name\n self.model.load_state_dict(torch.load(output))\n self.model.eval()\n return\n\n def save_model(self,to_lwtnn=False):\n \"\"\"Save the model for use later\"\"\"\n output = self.output_dir+'/'+self.model_name\n torch.save(self.model.state_dict(),output)\n return\n\n\n## THE END ##\n"
] | [
[
"torch.load",
"torch.nn.ModuleList",
"torch.from_numpy",
"sklearn.metrics.roc_curve",
"sklearn.model_selection.StratifiedKFold",
"torch.nn.BCELoss",
"torch.nn.Linear",
"numpy.std",
"torch.nn.functional.relu",
"numpy.mean",
"torch.nn.functional.sigmoid",
"numpy.where",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
THU-DA-6D-Pose-Group/self6dpp | [
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16",
"c267cfa55e440e212136a5e9940598720fa21d16"
] | [
"core/csrc/torch_nndistance/test.py",
"core/self6dpp/datasets/ycbv_d2.py",
"core/self6dpp/tools/lm/lm_pbr_so_mlBCE_1_init_poses_error_distribution.py",
"core/self6dpp/datasets/ycbv_bop_test.py",
"core/utils/pose_utils.py",
"lib/torch_utils/color/hsv.py",
"lib/pysixd/view_sampler.py",
"lib/torch_utils/solver/lr_scheduler.py",
"det/yolov4/datasets/data_loader.py",
"core/deepim/tools/lmo/lmo_1_prepare_posecnn_init_pose.py",
"core/deepim/tools/lm/lm_1_prepare_posecnn_init_pose.py"
] | [
"import torch\nimport os.path as osp\nimport sys\nfrom torch.autograd import Variable\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nsys.path.insert(0, cur_dir)\nimport torch_nndistance as NND\n\n\np1 = torch.rand(10, 1000, 3)\np2 = torch.rand(10, 1500, 3)\npoints1 = Variable(p1, requires_grad=True)\npoints2 = p2\npoints1 = points1.cuda()\nprint(points1.requires_grad)\npoints2 = points2.cuda()\ndist1, dist2 = NND.nnd(points1, points2)\nprint(dist1, dist2)\nloss = torch.sum(dist1)\nprint(\"loss\", loss)\nloss.backward()\nprint(points1.grad, points2.grad)\n\nprint(\"====================\")\npoints1 = Variable(p1.cuda(), requires_grad=True)\npoints2 = p2.cuda()\ndist1, dist2 = NND.nnd(points1, points2)\nprint(dist1, dist2)\nloss = torch.sum(dist1)\nprint(\"loss\", loss)\nloss.backward()\nprint(points1.grad, points2.grad)\n",
"import hashlib\nimport copy\nimport logging\nimport os\nimport os.path as osp\nimport sys\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../..\"))\nsys.path.insert(0, PROJ_ROOT)\nimport time\nfrom collections import OrderedDict\nimport mmcv\nimport numpy as np\nfrom tqdm import tqdm\nfrom transforms3d.quaternions import mat2quat, quat2mat\nimport ref\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import BoxMode\nfrom lib.pysixd import inout, misc\nfrom lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask\nfrom lib.utils.utils import dprint, iprint, lazy_property\n\n\nlogger = logging.getLogger(__name__)\nDATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, \"datasets\"))\n\n\nclass YCBV_Dataset:\n \"\"\"use image_sets(scene/image_id) and image root to get data; Here we use\n bop models, which are center aligned and have some offsets compared to\n original models.\"\"\"\n\n def __init__(self, data_cfg):\n \"\"\"\n Set with_depth and with_masks default to True,\n and decide whether to load them into dataloader/network later\n with_masks:\n \"\"\"\n self.name = data_cfg[\"name\"]\n self.data_cfg = data_cfg\n\n self.objs = data_cfg[\"objs\"] # selected objects\n\n self.ann_files = data_cfg[\"ann_files\"] # provide scene/im_id list\n self.image_prefixes = data_cfg[\"image_prefixes\"] # image root\n\n self.dataset_root = data_cfg[\"dataset_root\"] # BOP_DATASETS/ycbv/\n assert osp.exists(self.dataset_root), self.dataset_root\n self.models_root = data_cfg[\"models_root\"] # BOP_DATASETS/ycbv/models\n self.scale_to_meter = data_cfg[\"scale_to_meter\"] # 0.001\n\n self.with_masks = data_cfg[\"with_masks\"] # True (load masks but may not use it)\n self.with_depth = data_cfg[\"with_depth\"] # True (load depth path here, but may not use it)\n self.with_xyz = data_cfg[\"with_xyz\"]\n\n self.height = data_cfg[\"height\"] # 480\n self.width = data_cfg[\"width\"] # 640\n\n self.cache_dir = data_cfg.get(\"cache_dir\", osp.join(PROJ_ROOT, \".cache\")) # .cache\n self.use_cache = data_cfg.get(\"use_cache\", True)\n self.num_to_load = data_cfg[\"num_to_load\"] # -1\n self.filter_invalid = data_cfg[\"filter_invalid\"]\n\n self.align_K_by_change_pose = data_cfg.get(\"align_K_by_change_pose\", False)\n # default: 0000~0059 and synt\n self.cam = np.array(\n [\n [1066.778, 0.0, 312.9869],\n [0.0, 1067.487, 241.3109],\n [0.0, 0.0, 1.0],\n ],\n dtype=\"float32\",\n )\n # 0060~0091\n # cmu_cam = np.array([[1077.836, 0.0, 323.7872], [0.0, 1078.189, 279.6921], [0.0, 0.0, 1.0]], dtype='float32')\n ##################################################\n\n # NOTE: careful! Only the selected objects\n self.cat_ids = [cat_id for cat_id, obj_name in ref.ycbv.id2obj.items() if obj_name in self.objs]\n # map selected objs to [0, num_objs-1]\n self.cat2label = {v: i for i, v in enumerate(self.cat_ids)} # id_map\n self.label2cat = {label: cat for cat, label in self.cat2label.items()}\n self.obj2label = OrderedDict((obj, obj_id) for obj_id, obj in enumerate(self.objs))\n ##########################################################\n\n def _load_from_idx_file(self, idx_file, image_root):\n \"\"\"\n idx_file: the scene/image ids\n image_root/scene contains:\n scene_gt.json\n scene_gt_info.json\n scene_camera.json\n \"\"\"\n xyz_root = osp.join(image_root, \"xyz_crop\")\n scene_gt_dicts = {}\n scene_gt_info_dicts = {}\n scene_cam_dicts = {}\n scene_im_ids = [] # store tuples of (scene_id, im_id)\n with open(idx_file, \"r\") as f:\n for line in f:\n line_split = line.strip(\"\\r\\n\").split(\"/\")\n scene_id = int(line_split[0])\n im_id = int(line_split[1])\n scene_im_ids.append((scene_id, im_id))\n if scene_id not in scene_gt_dicts:\n scene_gt_file = osp.join(image_root, f\"{scene_id:06d}/scene_gt.json\")\n assert osp.exists(scene_gt_file), scene_gt_file\n scene_gt_dicts[scene_id] = mmcv.load(scene_gt_file)\n\n if scene_id not in scene_gt_info_dicts:\n scene_gt_info_file = osp.join(image_root, f\"{scene_id:06d}/scene_gt_info.json\")\n assert osp.exists(scene_gt_info_file), scene_gt_info_file\n scene_gt_info_dicts[scene_id] = mmcv.load(scene_gt_info_file)\n\n if scene_id not in scene_cam_dicts:\n scene_cam_file = osp.join(image_root, f\"{scene_id:06d}/scene_camera.json\")\n assert osp.exists(scene_cam_file), scene_cam_file\n scene_cam_dicts[scene_id] = mmcv.load(scene_cam_file)\n ######################################################\n scene_im_ids = sorted(scene_im_ids) # sort to make it reproducible\n dataset_dicts = []\n\n num_instances_without_valid_segmentation = 0\n num_instances_without_valid_box = 0\n\n for (scene_id, im_id) in tqdm(scene_im_ids):\n rgb_path = osp.join(image_root, f\"{scene_id:06d}/rgb/{im_id:06d}.png\")\n assert osp.exists(rgb_path), rgb_path\n str_im_id = str(im_id)\n\n scene_im_id = f\"{scene_id}/{im_id}\"\n\n # for ycbv/tless, load cam K from image infos\n cam_anno = np.array(scene_cam_dicts[scene_id][str_im_id][\"cam_K\"], dtype=np.float32).reshape(3, 3)\n adapth_this_K = False\n if self.align_K_by_change_pose:\n if (cam_anno != self.cam).any():\n adapth_this_K = True\n cam_anno_ori = cam_anno.copy()\n cam_anno = self.cam\n\n depth_factor = 1000.0 / scene_cam_dicts[scene_id][str_im_id][\"depth_scale\"]\n # dprint(record['cam'])\n if \"/train_synt/\" in rgb_path:\n img_type = \"syn\"\n else:\n img_type = \"real\"\n record = {\n \"dataset_name\": self.name,\n \"file_name\": osp.relpath(rgb_path, PROJ_ROOT),\n \"height\": self.height,\n \"width\": self.width,\n \"image_id\": self._unique_im_id,\n \"scene_im_id\": scene_im_id, # for evaluation\n \"cam\": cam_anno, # self.cam,\n \"depth_factor\": depth_factor,\n \"img_type\": img_type,\n }\n\n if self.with_depth:\n depth_file = osp.join(image_root, f\"{scene_id:06d}/depth/{im_id:06d}.png\")\n assert osp.exists(depth_file), depth_file\n record[\"depth_file\"] = osp.relpath(depth_file, PROJ_ROOT)\n\n insts = []\n anno_dict_list = scene_gt_dicts[scene_id][str(im_id)]\n info_dict_list = scene_gt_info_dicts[scene_id][str(im_id)]\n for anno_i, anno in enumerate(anno_dict_list):\n info = info_dict_list[anno_i]\n obj_id = anno[\"obj_id\"]\n if obj_id not in self.cat_ids:\n continue\n # 0-based label now\n cur_label = self.cat2label[obj_id]\n ################ pose ###########################\n R = np.array(anno[\"cam_R_m2c\"], dtype=\"float32\").reshape(3, 3)\n trans = np.array(anno[\"cam_t_m2c\"], dtype=\"float32\") / 1000.0 # mm->m\n pose = np.hstack([R, trans.reshape(3, 1)])\n if adapth_this_K:\n # pose_uw = inv(K_uw) @ K_cmu @ pose_cmu\n pose = np.linalg.inv(cam_anno) @ cam_anno_ori @ pose\n # R = pose[:3, :3]\n trans = pose[:3, 3]\n\n quat = mat2quat(pose[:3, :3])\n\n ############# bbox ############################\n bbox = info[\"bbox_obj\"]\n x1, y1, w, h = bbox\n x2 = x1 + w\n y2 = y1 + h\n x1 = max(min(x1, self.width), 0)\n y1 = max(min(y1, self.height), 0)\n x2 = max(min(x2, self.width), 0)\n y2 = max(min(y2, self.height), 0)\n bbox = [x1, y1, x2, y2]\n if self.filter_invalid:\n bw = bbox[2] - bbox[0]\n bh = bbox[3] - bbox[1]\n if bh <= 1 or bw <= 1:\n num_instances_without_valid_box += 1\n continue\n\n ############## mask #######################\n if self.with_masks: # either list[list[float]] or dict(RLE)\n mask_visib_file = osp.join(\n image_root,\n f\"{scene_id:06d}/mask_visib/{im_id:06d}_{anno_i:06d}.png\",\n )\n assert osp.exists(mask_visib_file), mask_visib_file\n mask = mmcv.imread(mask_visib_file, \"unchanged\")\n area = mask.sum()\n if area < 30 and self.filter_invalid:\n num_instances_without_valid_segmentation += 1\n continue\n mask_rle = binary_mask_to_rle(mask)\n\n mask_full_file = osp.join(\n image_root,\n f\"{scene_id:06d}/mask/{im_id:06d}_{anno_i:06d}.png\",\n )\n assert osp.exists(mask_full_file), mask_full_file\n\n # load mask full\n mask_full = mmcv.imread(mask_full_file, \"unchanged\")\n mask_full = mask_full.astype(\"bool\")\n mask_full_rle = binary_mask_to_rle(mask_full, compressed=True)\n\n proj = (self.cam @ trans.T).T # NOTE: use self.cam here\n proj = proj[:2] / proj[2]\n\n inst = {\n \"category_id\": cur_label, # 0-based label\n \"bbox\": bbox, # TODO: load both bbox_obj and bbox_visib\n \"bbox_mode\": BoxMode.XYXY_ABS,\n \"pose\": pose,\n \"quat\": quat,\n \"trans\": trans,\n \"centroid_2d\": proj, # absolute (cx, cy)\n \"segmentation\": mask_rle,\n \"mask_full\": mask_full_rle,\n }\n\n if self.with_xyz:\n xyz_path = osp.join(\n xyz_root,\n f\"{scene_id:06d}/{im_id:06d}_{anno_i:06d}-xyz.pkl\",\n )\n assert osp.exists(xyz_path), xyz_path\n inst[\"xyz_path\"] = xyz_path\n\n model_info = self.models_info[str(obj_id)]\n inst[\"model_info\"] = model_info\n # TODO: using full mask and full xyz\n for key in [\"bbox3d_and_center\"]:\n inst[key] = self.models[cur_label][key]\n insts.append(inst)\n if len(insts) == 0: # and self.filter_invalid:\n continue\n record[\"annotations\"] = insts\n dataset_dicts.append(record)\n self._unique_im_id += 1\n\n if num_instances_without_valid_segmentation > 0:\n logger.warning(\n \"Filtered out {} instances without valid segmentation. \"\n \"There might be issues in your dataset generation process.\".format(\n num_instances_without_valid_segmentation\n )\n )\n if num_instances_without_valid_box > 0:\n logger.warning(\n \"Filtered out {} instances without valid box. \"\n \"There might be issues in your dataset generation process.\".format(num_instances_without_valid_box)\n )\n return dataset_dicts\n\n def __call__(self): # YCBV_Dataset\n \"\"\"Load light-weight instance annotations of all images into a list of\n dicts in Detectron2 format.\n\n Do not load heavy data into memory in this file, since we will\n load the annotations of all images into memory.\n \"\"\"\n # cache the dataset_dicts to avoid loading masks from files\n hashed_file_name = hashlib.md5(\n (\n \"\".join([str(fn) for fn in self.objs])\n + \"dataset_dicts_{}_{}_{}_{}_{}_{}\".format(\n self.name,\n self.dataset_root,\n self.with_masks,\n self.with_depth,\n self.with_xyz,\n __name__,\n )\n ).encode(\"utf-8\")\n ).hexdigest()\n cache_path = osp.join(\n self.cache_dir,\n \"dataset_dicts_{}_{}.pkl\".format(self.name, hashed_file_name),\n )\n\n if osp.exists(cache_path) and self.use_cache:\n logger.info(\"load cached dataset dicts from {}\".format(cache_path))\n return mmcv.load(cache_path)\n\n logger.info(\"loading dataset dicts: {}\".format(self.name))\n t_start = time.perf_counter()\n dataset_dicts = []\n self._unique_im_id = 0\n for ann_file, image_root in zip(self.ann_files, self.image_prefixes):\n # logger.info(\"loading coco json: {}\".format(ann_file))\n dataset_dicts.extend(self._load_from_idx_file(ann_file, image_root))\n\n ##########################################################################\n if self.num_to_load > 0:\n self.num_to_load = min(int(self.num_to_load), len(dataset_dicts))\n dataset_dicts = dataset_dicts[: self.num_to_load]\n logger.info(\"loaded {} dataset dicts, using {}s\".format(len(dataset_dicts), time.perf_counter() - t_start))\n\n mmcv.mkdir_or_exist(osp.dirname(cache_path))\n mmcv.dump(dataset_dicts, cache_path, protocol=4)\n logger.info(\"Dumped dataset_dicts to {}\".format(cache_path))\n return dataset_dicts\n\n @lazy_property\n def models_info(self):\n models_info_path = osp.join(self.models_root, \"models_info.json\")\n assert osp.exists(models_info_path), models_info_path\n models_info = mmcv.load(models_info_path) # key is str(obj_id)\n return models_info\n\n @lazy_property\n def models(self):\n \"\"\"Load models into a list.\"\"\"\n cache_path = osp.join(self.models_root, \"models_{}.pkl\".format(self.name))\n if osp.exists(cache_path) and self.use_cache:\n # dprint(\"{}: load cached object models from {}\".format(self.name, cache_path))\n return mmcv.load(cache_path)\n\n models = []\n for obj_name in self.objs:\n model = inout.load_ply(\n osp.join(\n self.models_root,\n f\"obj_{ref.ycbv.obj2id[obj_name]:06d}.ply\",\n ),\n vertex_scale=self.scale_to_meter,\n )\n # NOTE: the bbox3d_and_center is not obtained from centered vertices\n # for BOP models, not a big problem since they had been centered\n model[\"bbox3d_and_center\"] = misc.get_bbox3d_and_center(model[\"pts\"])\n\n models.append(model)\n logger.info(\"cache models to {}\".format(cache_path))\n mmcv.dump(models, cache_path, protocol=4)\n return models\n\n def image_aspect_ratio(self):\n return self.width / self.height # 4/3\n\n\n########### register datasets ############################################################\n\n\ndef get_ycbv_metadata(obj_names, ref_key):\n \"\"\"task specific metadata.\"\"\"\n data_ref = ref.__dict__[ref_key]\n\n cur_sym_infos = {} # label based key\n loaded_models_info = data_ref.get_models_info()\n\n for i, obj_name in enumerate(obj_names):\n obj_id = data_ref.obj2id[obj_name]\n model_info = loaded_models_info[str(obj_id)]\n if \"symmetries_discrete\" in model_info or \"symmetries_continuous\" in model_info:\n sym_transforms = misc.get_symmetry_transformations(model_info, max_sym_disc_step=0.01)\n sym_info = np.array([sym[\"R\"] for sym in sym_transforms], dtype=np.float32)\n else:\n sym_info = None\n cur_sym_infos[i] = sym_info\n\n meta = {\"thing_classes\": obj_names, \"sym_infos\": cur_sym_infos}\n return meta\n\n\nycbv_model_root = \"BOP_DATASETS/ycbv/models/\"\n################################################################################\ndefault_cfg = dict(\n # name=\"ycbv_train_real\",\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"), # models_simple\n objs=ref.ycbv.objects, # all objects\n # NOTE: this contains all classes\n # ann_files=[osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/train.txt\")],\n # image_prefixes=[osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n with_xyz=True,\n height=480,\n width=640,\n align_K_by_change_pose=False,\n cache_dir=osp.join(PROJ_ROOT, \".cache\"),\n use_cache=True,\n num_to_load=-1,\n filter_invalid=True,\n ref_key=\"ycbv\",\n)\nSPLITS_YCBV = {}\nupdate_cfgs = {\n \"ycbv_train_real\": {\n \"ann_files\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/train.txt\")],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n },\n \"ycbv_train_real_aligned_Kuw\": {\n \"ann_files\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/train.txt\")],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n \"align_K_by_change_pose\": True,\n },\n \"ycbv_train_real_uw\": {\n \"ann_files\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/train_real_uw.txt\")],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n },\n \"ycbv_train_real_uw_every10\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_real_uw_every10.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n },\n \"ycbv_train_real_cmu\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_real_cmu.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n },\n \"ycbv_train_real_cmu_aligned_Kuw\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_real_cmu.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_real\")],\n \"align_K_by_change_pose\": True,\n },\n \"ycbv_train_synt\": {\n \"ann_files\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/train_synt.txt\")],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_synt\")],\n },\n \"ycbv_train_synt_50k\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_synt_50k.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_synt\")],\n },\n \"ycbv_train_synt_30k\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_synt_30k.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_synt\")],\n },\n \"ycbv_train_synt_100\": {\n \"ann_files\": [\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/train_synt_100.txt\",\n )\n ],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_synt\")],\n },\n \"ycbv_test\": {\n \"ann_files\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/image_sets/keyframe.txt\")],\n \"image_prefixes\": [osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/test\")],\n \"with_xyz\": False,\n \"filter_invalid\": False,\n },\n}\nfor name, update_cfg in update_cfgs.items():\n used_cfg = copy.deepcopy(default_cfg)\n used_cfg[\"name\"] = name\n used_cfg.update(update_cfg)\n num_to_load = -1\n if \"_100\" in name:\n num_to_load = 100\n used_cfg[\"num_to_load\"] = num_to_load\n SPLITS_YCBV[name] = used_cfg\n\n# single object splits ######################################################\nfor obj in ref.ycbv.objects:\n for split in [\n \"train_real\",\n \"train_real_aligned_Kuw\",\n \"train_real_uw\",\n \"train_real_uw_every10\",\n \"train_real_cmu\",\n \"train_real_cmu_aligned_Kuw\",\n \"train_synt\",\n \"train_synt_30k\",\n \"test\",\n ]:\n name = \"ycbv_{}_{}\".format(obj, split)\n if split in [\n \"train_real\",\n \"train_real_aligned_Kuw\",\n \"train_real_uw\",\n \"train_real_uw_every10\",\n \"train_real_cmu\",\n \"train_real_cmu_aligned_Kuw\",\n \"train_synt\",\n \"train_synt_30k\",\n ]:\n filter_invalid = True\n with_xyz = True\n elif split in [\"test\"]:\n filter_invalid = False\n with_xyz = False\n else:\n raise ValueError(\"{}\".format(split))\n\n if split in [\"train_real_aligned_Kuw\", \"train_real_cmu_aligned_Kuw\"]:\n align_K_by_change_pose = True\n else:\n align_K_by_change_pose = False\n\n split_idx_file_dict = {\n \"train_real\": (\"train_real\", \"train.txt\"),\n \"train_real_aligned_Kuw\": (\"train_real\", \"train.txt\"),\n \"train_real_uw\": (\"train_real\", \"train_real_uw.txt\"),\n \"train_real_uw_every10\": (\n \"train_real\",\n \"train_real_uw_every10.txt\",\n ),\n \"train_real_cmu\": (\"train_real\", \"train_real_cmu.txt\"),\n \"train_real_cmu_aligned_Kuw\": (\"train_real\", \"train_real_cmu.txt\"),\n \"train_synt\": (\"train_synt\", \"train_synt.txt\"),\n \"train_synt_30k\": (\"train_synt\", \"train_synt_30k.txt\"),\n \"test\": (\"test\", \"keyframe.txt\"),\n }\n root_name, idx_file = split_idx_file_dict[split]\n\n if name not in SPLITS_YCBV:\n SPLITS_YCBV[name] = dict(\n name=name,\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"),\n objs=[obj],\n ann_files=[\n osp.join(\n DATASETS_ROOT,\n \"BOP_DATASETS/ycbv/image_sets/{}\".format(idx_file),\n )\n ],\n image_prefixes=[osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/{}\".format(root_name))],\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n with_xyz=with_xyz,\n height=480,\n width=640,\n align_K_by_change_pose=align_K_by_change_pose,\n cache_dir=osp.join(PROJ_ROOT, \".cache\"),\n use_cache=True,\n num_to_load=-1,\n filter_invalid=filter_invalid,\n ref_key=\"ycbv\",\n )\n\n\ndef register_with_name_cfg(name, data_cfg=None):\n \"\"\"Assume pre-defined datasets live in `./datasets`.\n\n Args:\n name: datasnet_name,\n data_cfg: if name is in existing SPLITS, use pre-defined data_cfg\n otherwise requires data_cfg\n data_cfg can be set in cfg.DATA_CFG.name\n \"\"\"\n dprint(\"register dataset: {}\".format(name))\n if name in SPLITS_YCBV:\n used_cfg = SPLITS_YCBV[name]\n else:\n assert (\n data_cfg is not None\n ), f\"dataset name {name} is not registered. available datasets: {list(SPLITS_YCBV.keys())}\"\n used_cfg = data_cfg\n DatasetCatalog.register(name, YCBV_Dataset(used_cfg))\n # something like eval_types\n MetadataCatalog.get(name).set(\n id=\"ycbv\", # NOTE: for pvnet to determine module\n ref_key=used_cfg[\"ref_key\"],\n objs=used_cfg[\"objs\"],\n eval_error_types=[\"ad\", \"rete\", \"proj\"],\n evaluator_type=\"bop\",\n **get_ycbv_metadata(obj_names=used_cfg[\"objs\"], ref_key=used_cfg[\"ref_key\"]),\n )\n\n\ndef get_available_datasets():\n return list(SPLITS_YCBV.keys())\n\n\n#### tests ###############################################\ndef test_vis():\n # python -m core.datasets.ycbv_d2 ycbv_test\n dataset_name = sys.argv[1]\n meta = MetadataCatalog.get(dataset_name)\n t_start = time.perf_counter()\n dicts = DatasetCatalog.get(dataset_name)\n with_xyz = False if \"test\" in dataset_name else True\n logger.info(\"Done loading {} samples with {:.3f}s.\".format(len(dicts), time.perf_counter() - t_start))\n\n dirname = \"output/ycbv_test-data-vis\"\n os.makedirs(dirname, exist_ok=True)\n objs = meta.objs\n for d in dicts:\n img = read_image_mmcv(d[\"file_name\"], format=\"BGR\")\n depth = mmcv.imread(d[\"depth_file\"], \"unchanged\") / 1000.0\n\n imH, imW = img.shape[:2]\n annos = d[\"annotations\"]\n masks = [cocosegm2mask(anno[\"segmentation\"], imH, imW) for anno in annos]\n bboxes = [anno[\"bbox\"] for anno in annos]\n bbox_modes = [anno[\"bbox_mode\"] for anno in annos]\n bboxes_xyxy = np.array(\n [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)]\n )\n kpts_3d_list = [anno[\"bbox3d_and_center\"] for anno in annos]\n quats = [anno[\"quat\"] for anno in annos]\n transes = [anno[\"trans\"] for anno in annos]\n Rs = [quat2mat(quat) for quat in quats]\n # 0-based label\n cat_ids = [anno[\"category_id\"] for anno in annos]\n K = d[\"cam\"]\n kpts_2d = [misc.project_pts(kpt3d, K, R, t) for kpt3d, R, t in zip(kpts_3d_list, Rs, transes)]\n # # TODO: visualize pose and keypoints\n labels = [objs[cat_id] for cat_id in cat_ids]\n for _i in range(len(annos)):\n img_vis = vis_image_mask_bbox_cv2(\n img,\n masks[_i : _i + 1],\n bboxes=bboxes_xyxy[_i : _i + 1],\n labels=labels[_i : _i + 1],\n )\n img_vis_kpts2d = misc.draw_projected_box3d(img_vis.copy(), kpts_2d[_i])\n if with_xyz:\n xyz_path = annos[_i][\"xyz_path\"]\n xyz_info = mmcv.load(xyz_path)\n x1, y1, x2, y2 = xyz_info[\"xyxy\"]\n xyz_crop = xyz_info[\"xyz_crop\"].astype(np.float32)\n xyz = np.zeros((imH, imW, 3), dtype=np.float32)\n xyz[y1 : y2 + 1, x1 : x2 + 1, :] = xyz_crop\n xyz_show = get_emb_show(xyz)\n xyz_crop_show = get_emb_show(xyz_crop)\n img_xyz = img.copy() / 255.0\n mask_xyz = ((xyz[:, :, 0] != 0) | (xyz[:, :, 1] != 0) | (xyz[:, :, 2] != 0)).astype(\"uint8\")\n fg_idx = np.where(mask_xyz != 0)\n img_xyz[fg_idx[0], fg_idx[1], :] = (\n 0.5 * xyz_show[fg_idx[0], fg_idx[1], :3] + 0.5 * img_xyz[fg_idx[0], fg_idx[1], :]\n )\n img_xyz_crop = img_xyz[y1 : y2 + 1, x1 : x2 + 1, :]\n img_vis_crop = img_vis[y1 : y2 + 1, x1 : x2 + 1, :]\n # diff mask\n diff_mask_xyz = np.abs(masks[_i] - mask_xyz)[y1 : y2 + 1, x1 : x2 + 1]\n\n grid_show(\n [\n img[:, :, [2, 1, 0]],\n img_vis[:, :, [2, 1, 0]],\n img_vis_kpts2d[:, :, [2, 1, 0]],\n depth,\n # xyz_show,\n diff_mask_xyz,\n xyz_crop_show,\n img_xyz[:, :, [2, 1, 0]],\n img_xyz_crop[:, :, [2, 1, 0]],\n img_vis_crop[:, :, ::-1],\n ],\n [\n \"img\",\n \"vis_img\",\n \"img_vis_kpts2d\",\n \"depth\",\n \"diff_mask_xyz\",\n \"xyz_crop_show\",\n \"img_xyz\",\n \"img_xyz_crop\",\n \"img_vis_crop\",\n ],\n row=3,\n col=3,\n )\n else:\n grid_show(\n [\n img[:, :, [2, 1, 0]],\n img_vis[:, :, [2, 1, 0]],\n img_vis_kpts2d[:, :, [2, 1, 0]],\n depth,\n ],\n [\"img\", \"vis_img\", \"img_vis_kpts2d\", \"depth\"],\n row=2,\n col=2,\n )\n\n\nif __name__ == \"__main__\":\n \"\"\"Test the dataset loader.\n\n Usage:\n python -m core.datasets.ycbv_d2 dataset_name\n \"dataset_name\" can be any pre-registered ones\n \"\"\"\n from lib.vis_utils.image import grid_show\n from lib.utils.setup_logger import setup_my_logger\n\n import detectron2.data.datasets # noqa # add pre-defined metadata\n from lib.vis_utils.image import vis_image_mask_bbox_cv2\n from core.utils.utils import get_emb_show\n from core.utils.data_utils import read_image_mmcv\n\n print(\"sys.argv:\", sys.argv)\n logger = setup_my_logger(name=\"core\")\n register_with_name_cfg(sys.argv[1])\n print(\"dataset catalog: \", DatasetCatalog.list())\n\n test_vis()\n",
"import mmcv\nimport numpy as np\nimport os.path as osp\nimport sys\nimport math\nfrom tqdm import tqdm\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom transforms3d.euler import mat2euler\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../../../\"))\nsys.path.insert(0, PROJ_ROOT)\n\nfrom core.deepim.datasets.dataset_factory import register_datasets\nfrom lib.utils.utils import dprint\nfrom lib.pysixd.pose_error import re, te\nfrom lib.pysixd.RT_transform import calc_RT_delta, se3_mul\n\n\nid2obj = {\n 1: \"ape\",\n 2: \"benchvise\",\n # 3: \"bowl\",\n 4: \"camera\",\n 5: \"can\",\n 6: \"cat\",\n # 7: \"cup\",\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n 13: \"iron\",\n 14: \"lamp\",\n 15: \"phone\",\n}\nobj_num = len(id2obj)\nobj2id = {_name: _id for _id, _name in id2obj.items()}\n\n\ndef main():\n init_pose_path = osp.join(\n PROJ_ROOT,\n \"datasets/BOP_DATASETS/lm/test/init_poses/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e_so.json\",\n )\n dset_name = \"lm_13_test\"\n print(dset_name)\n register_datasets([dset_name])\n\n meta = MetadataCatalog.get(dset_name)\n print(\"MetadataCatalog: \", meta)\n objs = meta.objs\n\n dset_dicts = DatasetCatalog.get(dset_name)\n scene_im_id_to_gt_index = {d[\"scene_im_id\"]: i for i, d in enumerate(dset_dicts)}\n\n init_results = mmcv.load(init_pose_path)\n r_errors = {obj_name: [] for obj_name in obj2id}\n euler_x_errors = {obj_name: [] for obj_name in obj2id}\n euler_y_errors = {obj_name: [] for obj_name in obj2id}\n euler_z_errors = {obj_name: [] for obj_name in obj2id}\n t_errors = {obj_name: [] for obj_name in obj2id}\n tx_errors = {obj_name: [] for obj_name in obj2id}\n ty_errors = {obj_name: [] for obj_name in obj2id}\n tz_errors = {obj_name: [] for obj_name in obj2id}\n\n for scene_im_id, init_res in tqdm(init_results.items()):\n if scene_im_id not in scene_im_id_to_gt_index:\n dprint(\"{} not in gt dicts\".format(scene_im_id))\n gt_idx = scene_im_id_to_gt_index[scene_im_id]\n gt_dict = dset_dicts[gt_idx]\n gt_annos = gt_dict[\"annotations\"]\n for pred in init_res:\n pred_obj_id = pred[\"obj_id\"]\n pred_obj_name = id2obj[pred_obj_id]\n for gt_anno in gt_annos:\n gt_label = gt_anno[\"category_id\"]\n gt_obj = objs[gt_label]\n gt_obj_id = obj2id[gt_obj]\n if pred_obj_id == gt_obj_id:\n gt_pose = gt_anno[\"pose\"]\n break\n pred_pose = np.array(pred[\"pose_est\"])\n if pred_obj_name in [\"eggbox\", \"glue\"]:\n r_error = re(pred_pose[:3, :3], gt_pose[:3, :3])\n if r_error > 90:\n RT_z = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0]])\n pred_pose = se3_mul(pred_pose, RT_z)\n\n R_delta = np.dot(pred_pose[:3, :3].transpose(), gt_pose[:3, :3])\n euler_delta = mat2euler(R_delta)\n\n # compute errors\n r_error = re(pred_pose[:3, :3], gt_pose[:3, :3])\n euler_x_error = np.abs(euler_delta[0] * 180 / math.pi)\n euler_y_error = np.abs(euler_delta[1] * 180 / math.pi)\n euler_z_error = np.abs(euler_delta[2] * 180 / math.pi)\n\n t_error = te(pred_pose[:3, 3], gt_pose[:3, 3])\n tx_error = pred_pose[:3, 0] - gt_pose[:3, 0]\n ty_error = pred_pose[:3, 1] - gt_pose[:3, 1]\n tz_error = pred_pose[:3, 2] - gt_pose[:3, 2]\n\n # record errors\n r_errors[pred_obj_name].append(r_error)\n euler_x_errors[pred_obj_name].append(euler_x_error)\n euler_y_errors[pred_obj_name].append(euler_y_error)\n euler_z_errors[pred_obj_name].append(euler_z_error)\n t_errors[pred_obj_name].append(t_error)\n tx_errors[pred_obj_name].append(tx_error)\n ty_errors[pred_obj_name].append(ty_error)\n tz_errors[pred_obj_name].append(tz_error)\n\n # summarize\n for obj_name in obj2id:\n print(obj_name)\n cur_r_errors = np.array(r_errors[obj_name])\n cur_euler_x_errors = np.array(euler_x_errors[obj_name])\n cur_euler_y_errors = np.array(euler_y_errors[obj_name])\n cur_euler_z_errors = np.array(euler_z_errors[obj_name])\n cur_t_errors = np.array(t_errors[obj_name])\n cur_tx_errors = np.array(tx_errors[obj_name])\n cur_ty_errors = np.array(ty_errors[obj_name])\n cur_tz_errors = np.array(tz_errors[obj_name])\n print(\n \"r error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_r_errors.mean(),\n cur_r_errors.std(),\n cur_r_errors.min(),\n cur_r_errors.max(),\n np.median(cur_r_errors),\n )\n )\n print(\n \"euler_x error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_euler_x_errors.mean(),\n cur_euler_x_errors.std(),\n cur_euler_x_errors.min(),\n cur_euler_x_errors.max(),\n np.median(cur_euler_x_errors),\n )\n )\n print(\n \"euler_y error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_euler_y_errors.mean(),\n cur_euler_y_errors.std(),\n cur_euler_y_errors.min(),\n cur_euler_y_errors.max(),\n np.median(cur_euler_y_errors),\n )\n )\n print(\n \"euler_z error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_euler_z_errors.mean(),\n cur_euler_z_errors.std(),\n cur_euler_z_errors.min(),\n cur_euler_z_errors.max(),\n np.median(cur_euler_z_errors),\n )\n )\n print(\n \"t error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_t_errors.mean(),\n cur_t_errors.std(),\n cur_t_errors.min(),\n cur_t_errors.max(),\n np.median(cur_t_errors),\n )\n )\n print(\n \"tx error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_tx_errors.mean(),\n cur_tx_errors.std(),\n cur_tx_errors.min(),\n cur_tx_errors.max(),\n np.median(cur_tx_errors),\n )\n )\n print(\n \"ty error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_ty_errors.mean(),\n cur_ty_errors.std(),\n cur_ty_errors.min(),\n cur_ty_errors.max(),\n np.median(cur_ty_errors),\n )\n )\n print(\n \"tz error, mean: {} std: {} min: {} max: {} median: {}\".format(\n cur_tz_errors.mean(),\n cur_tz_errors.std(),\n cur_tz_errors.min(),\n cur_tz_errors.max(),\n np.median(cur_tz_errors),\n )\n )\n # Visualize distributions.\n plt.figure(dpi=200)\n row = 2\n col = 4\n bins = 100\n font_size = 8\n matplotlib.rcParams[\"xtick.labelsize\"] = 5\n matplotlib.rcParams[\"ytick.labelsize\"] = 5\n\n plt.subplot(row, col, 1)\n plt.hist(cur_r_errors, bins=bins)\n plt.title(\"r error\", fontsize=font_size)\n\n plt.subplot(row, col, 2)\n plt.hist(cur_euler_x_errors, bins=bins)\n plt.title(\"euler x error\", fontsize=font_size)\n\n plt.subplot(row, col, 3)\n plt.hist(cur_euler_y_errors, bins=bins)\n plt.title(\"euler y error\", fontsize=font_size)\n\n plt.subplot(row, col, 4)\n plt.hist(cur_euler_z_errors, bins=bins)\n plt.title(\"euler z error\", fontsize=font_size)\n\n plt.subplot(row, col, 5)\n plt.hist(cur_t_errors, bins=bins)\n plt.title(\"t error\", fontsize=font_size)\n\n plt.subplot(row, col, 6)\n plt.hist(cur_tx_errors, bins=bins)\n plt.title(\"tx error\", fontsize=font_size)\n\n plt.subplot(row, col, 7)\n plt.hist(cur_ty_errors, bins=bins)\n plt.title(\"ty error\", fontsize=font_size)\n\n plt.subplot(row, col, 8)\n plt.hist(cur_tz_errors, bins=bins)\n plt.title(\"tz error\", fontsize=font_size)\n\n plt.suptitle(\"{}\".format(obj_name), fontsize=font_size)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"import hashlib\nimport logging\nimport os\nimport os.path as osp\nimport sys\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../..\"))\nsys.path.insert(0, PROJ_ROOT)\nimport time\nfrom collections import OrderedDict\nimport mmcv\nimport numpy as np\nfrom tqdm import tqdm\nfrom transforms3d.quaternions import mat2quat, quat2mat\nimport ref\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import BoxMode\nfrom lib.pysixd import inout, misc\nfrom lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask\nfrom lib.utils.utils import dprint, iprint, lazy_property\n\n\nlogger = logging.getLogger(__name__)\nDATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, \"datasets\"))\n\n\nclass YCBV_BOP_TEST_Dataset:\n \"\"\"ycbv bop test.\"\"\"\n\n def __init__(self, data_cfg):\n \"\"\"\n Set with_depth and with_masks default to True,\n and decide whether to load them into dataloader/network later\n with_masks:\n \"\"\"\n self.name = data_cfg[\"name\"]\n self.data_cfg = data_cfg\n\n self.objs = data_cfg[\"objs\"] # selected objects\n # all classes are self.objs, but this enables us to evaluate on selected objs\n self.select_objs = data_cfg.get(\"select_objs\", self.objs)\n\n self.ann_file = data_cfg[\"ann_file\"] # json file with scene_id and im_id items\n\n self.dataset_root = data_cfg[\"dataset_root\"] # BOP_DATASETS/ycbv/test\n self.models_root = data_cfg[\"models_root\"] # BOP_DATASETS/ycbv/models\n self.scale_to_meter = data_cfg[\"scale_to_meter\"] # 0.001\n\n self.with_masks = data_cfg[\"with_masks\"] # True (load masks but may not use it)\n self.with_depth = data_cfg[\"with_depth\"] # True (load depth path here, but may not use it)\n\n self.height = data_cfg[\"height\"] # 480\n self.width = data_cfg[\"width\"] # 640\n\n self.cache_dir = data_cfg.get(\"cache_dir\", osp.join(PROJ_ROOT, \".cache\")) # .cache\n self.use_cache = data_cfg.get(\"use_cache\", True)\n self.num_to_load = data_cfg[\"num_to_load\"] # -1\n self.filter_invalid = data_cfg[\"filter_invalid\"]\n ##################################################\n\n # NOTE: careful! Only the selected objects\n self.cat_ids = [cat_id for cat_id, obj_name in ref.ycbv.id2obj.items() if obj_name in self.objs]\n # map selected objs to [0, num_objs-1]\n self.cat2label = {v: i for i, v in enumerate(self.cat_ids)} # id_map\n self.label2cat = {label: cat for cat, label in self.cat2label.items()}\n self.obj2label = OrderedDict((obj, obj_id) for obj_id, obj in enumerate(self.objs))\n ##########################################################\n\n def __call__(self):\n \"\"\"Load light-weight instance annotations of all images into a list of\n dicts in Detectron2 format.\n\n Do not load heavy data into memory in this file, since we will\n load the annotations of all images into memory.\n \"\"\"\n # cache the dataset_dicts to avoid loading masks from files\n hashed_file_name = hashlib.md5(\n (\n \"\".join([str(fn) for fn in self.objs])\n + \"dataset_dicts_{}_{}_{}_{}_{}\".format(\n self.name,\n self.dataset_root,\n self.with_masks,\n self.with_depth,\n __name__,\n )\n ).encode(\"utf-8\")\n ).hexdigest()\n cache_path = osp.join(\n self.cache_dir,\n \"dataset_dicts_{}_{}.pkl\".format(self.name, hashed_file_name),\n )\n\n if osp.exists(cache_path) and self.use_cache:\n logger.info(\"load cached dataset dicts from {}\".format(cache_path))\n return mmcv.load(cache_path)\n\n t_start = time.perf_counter()\n\n logger.info(\"loading dataset dicts: {}\".format(self.name))\n self.num_instances_without_valid_segmentation = 0\n self.num_instances_without_valid_box = 0\n dataset_dicts = [] # ######################################################\n im_id_global = 0\n\n if True:\n targets = mmcv.load(self.ann_file)\n scene_im_ids = [(item[\"scene_id\"], item[\"im_id\"]) for item in targets]\n scene_im_ids = sorted(list(set(scene_im_ids)))\n\n # load infos for each scene\n gt_dicts = {}\n gt_info_dicts = {}\n cam_dicts = {}\n for scene_id, im_id in scene_im_ids:\n scene_root = osp.join(self.dataset_root, f\"{scene_id:06d}\")\n if scene_id not in gt_dicts:\n gt_dicts[scene_id] = mmcv.load(osp.join(scene_root, \"scene_gt.json\"))\n if scene_id not in gt_info_dicts:\n gt_info_dicts[scene_id] = mmcv.load(\n osp.join(scene_root, \"scene_gt_info.json\")\n ) # bbox_obj, bbox_visib\n if scene_id not in cam_dicts:\n cam_dicts[scene_id] = mmcv.load(osp.join(scene_root, \"scene_camera.json\"))\n\n for scene_id, im_id in tqdm(scene_im_ids):\n str_im_id = str(im_id)\n scene_root = osp.join(self.dataset_root, f\"{scene_id:06d}\")\n rgb_path = osp.join(scene_root, \"rgb/{:06d}.png\").format(im_id)\n assert osp.exists(rgb_path), rgb_path\n\n depth_path = osp.join(scene_root, \"depth/{:06d}.png\".format(im_id))\n\n scene_id = int(rgb_path.split(\"/\")[-3])\n\n cam = np.array(cam_dicts[scene_id][str_im_id][\"cam_K\"], dtype=np.float32).reshape(3, 3)\n depth_factor = 1000.0 / cam_dicts[scene_id][str_im_id][\"depth_scale\"]\n record = {\n \"dataset_name\": self.name,\n \"file_name\": osp.relpath(rgb_path, PROJ_ROOT),\n \"depth_file\": osp.relpath(depth_path, PROJ_ROOT),\n \"depth_factor\": depth_factor,\n \"height\": self.height,\n \"width\": self.width,\n \"image_id\": im_id_global, # unique image_id in the dataset, for coco evaluation\n \"scene_im_id\": \"{}/{}\".format(scene_id, im_id), # for evaluation\n \"cam\": cam,\n \"img_type\": \"real\",\n }\n im_id_global += 1\n insts = []\n for anno_i, anno in enumerate(gt_dicts[scene_id][str_im_id]):\n obj_id = anno[\"obj_id\"]\n if ref.ycbv.id2obj[obj_id] not in self.select_objs:\n continue\n cur_label = self.cat2label[obj_id] # 0-based label\n R = np.array(anno[\"cam_R_m2c\"], dtype=\"float32\").reshape(3, 3)\n t = np.array(anno[\"cam_t_m2c\"], dtype=\"float32\") / 1000.0\n pose = np.hstack([R, t.reshape(3, 1)])\n quat = mat2quat(R).astype(\"float32\")\n\n proj = (record[\"cam\"] @ t.T).T\n proj = proj[:2] / proj[2]\n\n bbox_visib = gt_info_dicts[scene_id][str_im_id][anno_i][\"bbox_visib\"]\n bbox_obj = gt_info_dicts[scene_id][str_im_id][anno_i][\"bbox_obj\"]\n x1, y1, w, h = bbox_visib\n if self.filter_invalid:\n if h <= 1 or w <= 1:\n self.num_instances_without_valid_box += 1\n continue\n\n mask_file = osp.join(\n scene_root,\n \"mask/{:06d}_{:06d}.png\".format(im_id, anno_i),\n )\n mask_visib_file = osp.join(\n scene_root,\n \"mask_visib/{:06d}_{:06d}.png\".format(im_id, anno_i),\n )\n assert osp.exists(mask_file), mask_file\n assert osp.exists(mask_visib_file), mask_visib_file\n # load mask visib\n mask_single = mmcv.imread(mask_visib_file, \"unchanged\")\n area = mask_single.sum()\n if area < 3: # filter out too small or nearly invisible instances\n self.num_instances_without_valid_segmentation += 1\n continue\n mask_rle = binary_mask_to_rle(mask_single, compressed=True)\n\n # load mask full\n mask_full = mmcv.imread(mask_file, \"unchanged\")\n mask_full = mask_full.astype(\"bool\")\n mask_full_rle = binary_mask_to_rle(mask_full, compressed=True)\n\n inst = {\n \"category_id\": cur_label, # 0-based label\n \"bbox\": bbox_visib, # TODO: load both bbox_obj and bbox_visib\n \"bbox_mode\": BoxMode.XYWH_ABS,\n \"pose\": pose,\n \"quat\": quat,\n \"trans\": t,\n \"centroid_2d\": proj, # absolute (cx, cy)\n \"segmentation\": mask_rle,\n \"mask_full\": mask_full_rle, # TODO: load as mask_full, rle\n }\n\n model_info = self.models_info[str(obj_id)]\n inst[\"model_info\"] = model_info\n # TODO: using full mask and full xyz\n for key in [\"bbox3d_and_center\"]:\n inst[key] = self.models[cur_label][key]\n insts.append(inst)\n if len(insts) == 0: # filter im without anno\n continue\n record[\"annotations\"] = insts\n dataset_dicts.append(record)\n\n if self.num_instances_without_valid_segmentation > 0:\n logger.warning(\n \"Filtered out {} instances without valid segmentation. \"\n \"There might be issues in your dataset generation process.\".format(\n self.num_instances_without_valid_segmentation\n )\n )\n if self.num_instances_without_valid_box > 0:\n logger.warning(\n \"Filtered out {} instances without valid box. \"\n \"There might be issues in your dataset generation process.\".format(self.num_instances_without_valid_box)\n )\n ##########################################################################\n if self.num_to_load > 0:\n self.num_to_load = min(int(self.num_to_load), len(dataset_dicts))\n dataset_dicts = dataset_dicts[: self.num_to_load]\n logger.info(\"loaded {} dataset dicts, using {}s\".format(len(dataset_dicts), time.perf_counter() - t_start))\n\n mmcv.mkdir_or_exist(osp.dirname(cache_path))\n mmcv.dump(dataset_dicts, cache_path, protocol=4)\n logger.info(\"Dumped dataset_dicts to {}\".format(cache_path))\n return dataset_dicts\n\n @lazy_property\n def models_info(self):\n models_info_path = osp.join(self.models_root, \"models_info.json\")\n assert osp.exists(models_info_path), models_info_path\n models_info = mmcv.load(models_info_path) # key is str(obj_id)\n return models_info\n\n @lazy_property\n def models(self):\n \"\"\"Load models into a list.\"\"\"\n cache_path = osp.join(self.models_root, f\"models_{self.name}.pkl\")\n if osp.exists(cache_path) and self.use_cache:\n # dprint(\"{}: load cached object models from {}\".format(self.name, cache_path))\n return mmcv.load(cache_path)\n\n models = []\n for obj_name in self.objs:\n model = inout.load_ply(\n osp.join(\n self.models_root,\n f\"obj_{ref.ycbv.obj2id[obj_name]:06d}.ply\",\n ),\n vertex_scale=self.scale_to_meter,\n )\n # NOTE: the bbox3d_and_center is not obtained from centered vertices\n # for BOP models, not a big problem since they had been centered\n model[\"bbox3d_and_center\"] = misc.get_bbox3d_and_center(model[\"pts\"])\n\n models.append(model)\n logger.info(\"cache models to {}\".format(cache_path))\n mmcv.dump(models, cache_path, protocol=4)\n return models\n\n def image_aspect_ratio(self):\n return self.width / self.height # 4/3\n\n\n########### register datasets ############################################################\n\n\ndef get_ycbv_metadata(obj_names, ref_key):\n \"\"\"task specific metadata.\"\"\"\n data_ref = ref.__dict__[ref_key]\n\n cur_sym_infos = {} # label based key\n loaded_models_info = data_ref.get_models_info()\n\n for i, obj_name in enumerate(obj_names):\n obj_id = data_ref.obj2id[obj_name]\n model_info = loaded_models_info[str(obj_id)]\n if \"symmetries_discrete\" in model_info or \"symmetries_continuous\" in model_info:\n sym_transforms = misc.get_symmetry_transformations(model_info, max_sym_disc_step=0.01)\n sym_info = np.array([sym[\"R\"] for sym in sym_transforms], dtype=np.float32)\n else:\n sym_info = None\n cur_sym_infos[i] = sym_info\n\n meta = {\"thing_classes\": obj_names, \"sym_infos\": cur_sym_infos}\n return meta\n\n\n################################################################################\n\nSPLITS_YCBV = dict(\n ycbv_bop_test=dict(\n name=\"ycbv_bop_test\",\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/test\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"),\n objs=ref.ycbv.objects, # selected objects\n ann_file=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/test_targets_bop19.json\"),\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n height=480,\n width=640,\n cache_dir=osp.join(PROJ_ROOT, \".cache\"),\n use_cache=True,\n num_to_load=-1,\n filter_invalid=False,\n ref_key=\"ycbv\",\n )\n)\n\n\n# single objs (num_class is from all objs)\nfor obj in ref.ycbv.objects:\n name = \"ycbv_bop_{}_test\".format(obj)\n select_objs = [obj]\n if name not in SPLITS_YCBV:\n SPLITS_YCBV[name] = dict(\n name=name,\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/test\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"),\n objs=ref.ycbv.objects,\n select_objs=select_objs, # selected objects\n ann_file=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/test_targets_bop19.json\"),\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n height=480,\n width=640,\n cache_dir=osp.join(PROJ_ROOT, \".cache\"),\n use_cache=True,\n num_to_load=-1,\n filter_invalid=False,\n ref_key=\"ycbv\",\n )\n\n\ndef register_with_name_cfg(name, data_cfg=None):\n \"\"\"Assume pre-defined datasets live in `./datasets`.\n\n Args:\n name: datasnet_name,\n data_cfg: if name is in existing SPLITS, use pre-defined data_cfg\n otherwise requires data_cfg\n data_cfg can be set in cfg.DATA_CFG.name\n \"\"\"\n dprint(\"register dataset: {}\".format(name))\n if name in SPLITS_YCBV:\n used_cfg = SPLITS_YCBV[name]\n else:\n assert data_cfg is not None, f\"dataset name {name} is not registered\"\n used_cfg = data_cfg\n DatasetCatalog.register(name, YCBV_BOP_TEST_Dataset(used_cfg))\n # something like eval_types\n MetadataCatalog.get(name).set(\n id=\"ycbv\", # NOTE: for pvnet to determine module\n ref_key=used_cfg[\"ref_key\"],\n objs=used_cfg[\"objs\"],\n eval_error_types=[\"ad\", \"rete\", \"proj\"],\n evaluator_type=\"bop\",\n **get_ycbv_metadata(obj_names=used_cfg[\"objs\"], ref_key=used_cfg[\"ref_key\"]),\n )\n\n\ndef get_available_datasets():\n return list(SPLITS_YCBV.keys())\n\n\n#### tests ###############################################\ndef test_vis():\n dset_name = sys.argv[1]\n assert dset_name in DatasetCatalog.list()\n\n meta = MetadataCatalog.get(dset_name)\n dprint(\"MetadataCatalog: \", meta)\n objs = meta.objs\n\n t_start = time.perf_counter()\n dicts = DatasetCatalog.get(dset_name)\n logger.info(\"Done loading {} samples with {:.3f}s.\".format(len(dicts), time.perf_counter() - t_start))\n\n dirname = \"output/{}-data-vis\".format(dset_name)\n os.makedirs(dirname, exist_ok=True)\n for d in dicts:\n img = read_image_mmcv(d[\"file_name\"], format=\"BGR\")\n depth = mmcv.imread(d[\"depth_file\"], \"unchanged\") / 1000.0\n\n imH, imW = img.shape[:2]\n annos = d[\"annotations\"]\n masks = [cocosegm2mask(anno[\"segmentation\"], imH, imW) for anno in annos]\n bboxes = [anno[\"bbox\"] for anno in annos]\n bbox_modes = [anno[\"bbox_mode\"] for anno in annos]\n bboxes_xyxy = np.array(\n [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)]\n )\n kpts_3d_list = [anno[\"bbox3d_and_center\"] for anno in annos]\n quats = [anno[\"quat\"] for anno in annos]\n transes = [anno[\"trans\"] for anno in annos]\n Rs = [quat2mat(quat) for quat in quats]\n # 0-based label\n cat_ids = [anno[\"category_id\"] for anno in annos]\n K = d[\"cam\"]\n kpts_2d = [misc.project_pts(kpt3d, K, R, t) for kpt3d, R, t in zip(kpts_3d_list, Rs, transes)]\n # # TODO: visualize pose and keypoints\n labels = [objs[cat_id] for cat_id in cat_ids]\n # img_vis = vis_image_bboxes_cv2(img, bboxes=bboxes_xyxy, labels=labels)\n img_vis = vis_image_mask_bbox_cv2(img, masks, bboxes=bboxes_xyxy, labels=labels)\n img_vis_kpts2d = img.copy()\n for anno_i in range(len(annos)):\n img_vis_kpts2d = misc.draw_projected_box3d(img_vis_kpts2d, kpts_2d[anno_i])\n grid_show(\n [\n img[:, :, [2, 1, 0]],\n img_vis[:, :, [2, 1, 0]],\n img_vis_kpts2d[:, :, [2, 1, 0]],\n depth,\n ],\n [f\"img:{d['file_name']}\", \"vis_img\", \"img_vis_kpts2d\", \"depth\"],\n row=2,\n col=2,\n )\n\n\nif __name__ == \"__main__\":\n \"\"\"Test the dataset loader.\n\n Usage:\n python -m core.datasets.ycbv_bop_test dataset_name\n \"\"\"\n from lib.vis_utils.image import grid_show\n from lib.utils.setup_logger import setup_my_logger\n\n import detectron2.data.datasets # noqa # add pre-defined metadata\n from core.utils.data_utils import read_image_mmcv\n from lib.vis_utils.image import vis_image_mask_bbox_cv2\n\n print(\"sys.argv:\", sys.argv)\n logger = setup_my_logger(name=\"core\")\n register_with_name_cfg(sys.argv[1])\n print(\"dataset catalog: \", DatasetCatalog.list())\n\n test_vis()\n",
"\"\"\"\nref:\nhttps://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py\nhttps://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py\nhttps://github.com/arraiyopensource/kornia/blob/master/kornia/geometry/conversions.py\n\"\"\"\nfrom math import acos, cos, pi, sin\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom numba import jit, njit\nfrom numpy import linalg as LA\nfrom transforms3d.axangles import axangle2mat, mat2axangle\nfrom transforms3d.euler import (\n _AXES2TUPLE,\n _NEXT_AXIS,\n _TUPLE2AXES,\n euler2mat,\n euler2quat,\n mat2euler,\n quat2euler,\n)\nfrom transforms3d.quaternions import mat2quat, quat2mat\n\nfrom lib.pysixd.pose_error import re\n\npixel_coords = None\n\n\ndef rot_from_axangle_chain(ax_angles):\n \"\"\"\n ax_angles: eg. [(1, 0, 0, 0.5), (0, 0, 1, -0.7)], the last value will be multiplied by pi\n \"\"\"\n return np.linalg.multi_dot([axangle2mat(_axa[:3], np.pi * _axa[3]) for _axa in ax_angles])\n\n\ndef qmul_torch(q, r):\n \"\"\"Multiply quaternion(s) q with quaternion(s) r.\n\n Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.\n Returns q*r as a tensor of shape (*, 4).\n \"\"\"\n assert q.shape[-1] == 4\n assert r.shape[-1] == 4\n\n original_shape = q.shape\n\n # Compute outer product\n terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))\n\n w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]\n x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]\n y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]\n z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]\n return torch.stack((w, x, y, z), dim=1).view(original_shape)\n\n\ndef qrot_torch(q, v):\n \"\"\"Rotate vector(s) v about the rotation described by quaternion(s) q.\n Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,\n\n where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n # qmult(q, qmult(varr, qconjugate(q)))[1:]\n \"\"\"\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = list(v.shape)\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)\n\n\ndef qrot_points_th(q, points):\n \"\"\"\n q: (4,)\n points: (N, 3)\n \"\"\"\n assert q.numel() == 4, q.numel()\n assert points.shape[1] == 3, points.shape[1]\n N = points.shape[0]\n points_q = qrot_torch(q.expand(N, 4), points)\n return points_q\n\n\ndef euler2quat_torch(ai, aj, ak, axes=\"sxyz\"):\n \"\"\"slower than numpy version batch.\"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis + 1\n j = _NEXT_AXIS[i + parity - 1] + 1\n k = _NEXT_AXIS[i - parity] + 1\n # print(i, j, k)\n\n ai, aj, ak = ai.clone(), aj.clone(), ak.clone()\n if frame:\n ai, ak = ak, ai\n if parity:\n aj = -aj\n\n ai /= 2.0\n aj /= 2.0\n ak /= 2.0\n ci = torch.cos(ai)\n si = torch.sin(ai)\n cj = torch.cos(aj)\n sj = torch.sin(aj)\n ck = torch.cos(ak)\n sk = torch.sin(ak)\n cc = ci * ck\n cs = ci * sk\n sc = si * ck\n ss = si * sk\n\n B = len(ai)\n if repetition:\n qw = cj * (cc - ss)\n qi = cj * (cs + sc)\n qj = sj * (cc + ss)\n qk = sj * (cs - sc)\n else:\n qw = cj * cc + sj * ss\n qi = cj * sc - sj * cs\n qj = cj * ss + sj * cc\n qk = cj * cs - sj * sc\n if parity:\n qj *= -1.0\n order = {i: 1, j: 2, k: 3}\n q = torch.stack((qw, qi, qj, qk), dim=1)[:, [0, order[1], order[2], order[3]]]\n if B == 1:\n q = q.view(4)\n return q\n\n\ndef quat2euler_torch(q, order=\"zyx\", epsilon=0):\n \"\"\"NOTE: zyx is the same as sxyz in transforms3d\n https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py\n # i,j,k ==> zyx\n Convert quaternion(s) q to Euler angles.\n Expects a tensor of shape (*, 4), where * denotes any number of dimensions.\n Returns a tensor of shape (*, 3).\n \"\"\"\n assert q.shape[-1] == 4\n norm_quat = q.norm(p=2, dim=-1, keepdim=True)\n # print('norm_quat: ', norm_quat) # Bx1\n q = q / norm_quat\n # print(q)\n\n original_shape = list(q.shape)\n original_shape[-1] = 3\n q = q.view(-1, 4)\n\n q0 = q[:, 0]\n q1 = q[:, 1]\n q2 = q[:, 2]\n q3 = q[:, 3]\n\n if order == \"xyz\":\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))\n elif order == \"yzx\":\n x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon))\n elif order == \"zxy\":\n x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon))\n y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3))\n elif order == \"xzy\":\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))\n y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))\n z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon))\n elif order == \"yxz\":\n x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon))\n y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2))\n z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3))\n elif order == \"zyx\":\n x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\n y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon))\n z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))\n else:\n raise\n\n return torch.stack((x, y, z), dim=1).view(original_shape)\n\n\ndef set_id_grid(depth):\n global pixel_coords\n b, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1, h, w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1, h, w).type_as(depth) # [1, H, W]\n ones = torch.ones(1, h, w).type_as(depth)\n\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n\n\ndef check_sizes(input, input_name, expected):\n condition = [input.ndimension() == len(expected)]\n for i, size in enumerate(expected):\n if size.isdigit():\n condition.append(input.size(i) == int(size))\n assert all(condition), \"wrong size for {}, expected {}, got {}\".format(\n input_name, \"x\".join(expected), list(input.size())\n )\n\n\ndef pixel2cam(depth, intrinsics_inv):\n global pixel_coords\n \"\"\"Transform coordinates in the pixel frame to the camera frame.\n Args:\n depth: depth maps -- [B, H, W]\n intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]\n Returns:\n array of (u,v,1) cam coordinates -- [B, 3, H, W]\n \"\"\"\n b, h, w = depth.size()\n if (pixel_coords is None) or pixel_coords.size(2) < h:\n set_id_grid(depth)\n current_pixel_coords = pixel_coords[:, :, :h, :w].expand(b, 3, h, w).reshape(b, 3, -1) # [B, 3, H*W]\n cam_coords = (intrinsics_inv @ current_pixel_coords).reshape(b, 3, h, w)\n return cam_coords * depth.unsqueeze(1)\n\n\ndef cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n \"\"\"Transform coordinates in the camera frame to the pixel frame.\n\n Args:\n cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]\n proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]\n proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]\n Returns:\n array of [-1,1] coordinates -- [B, 2, H, W]\n \"\"\"\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]\n if proj_c2p_rot is not None:\n pcoords = proj_c2p_rot @ cam_coords_flat\n else:\n pcoords = cam_coords_flat\n\n if proj_c2p_tr is not None:\n pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-3)\n\n X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.reshape(b, h, w, 2)\n\n\ndef euler2mat_torch(angle):\n \"\"\"Convert euler angles to rotation matrix.\n\n Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174\n Args:\n angle: rotation angle along 3 axis (in radians) -- size = [B, 3]\n Returns:\n Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]\n \"\"\"\n B = angle.size(0)\n x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach() * 0\n ones = zeros.detach() + 1\n zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).reshape(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).reshape(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).reshape(B, 3, 3)\n\n rotMat = xmat @ ymat @ zmat\n return rotMat\n\n\ndef axangle2mat_torch(axis, angle, is_normalized=False):\n \"\"\"Rotation matrix for rotation angle `angle` around `axis`\n Parameters\n ----------\n axis : [B, 3] element sequence\n vector specifying axis for rotation.\n angle :[B, ] scalar\n angle of rotation in radians.\n is_normalized : bool, optional\n True if `axis` is already normalized (has norm of 1). Default False.\n Returns\n -------\n mat : array shape (B, 3,3)\n rotation matrix for specified rotation\n Notes\n -----\n From: http://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle\n \"\"\"\n B = axis.shape[0]\n\n if not is_normalized:\n norm_axis = axis.norm(p=2, dim=1, keepdim=True)\n normed_axis = axis / norm_axis\n else:\n normed_axis = axis\n x, y, z = normed_axis[:, 0], normed_axis[:, 1], normed_axis[:, 2]\n c = torch.cos(angle)\n s = torch.sin(angle)\n C = 1 - c\n # yapf: disable\n xs = x * s; ys = y * s; zs = z * s # noqa\n xC = x * C; yC = y * C; zC = z * C # noqa\n xyC = x * yC; yzC = y * zC; zxC = z * xC # noqa\n # yapf: enable\n return torch.stack(\n [\n x * xC + c,\n xyC - zs,\n zxC + ys,\n xyC + zs,\n y * yC + c,\n yzC - xs,\n zxC - ys,\n yzC + xs,\n z * zC + c,\n ],\n dim=1,\n ).reshape(B, 3, 3)\n\n\ndef quat2mat_torch(quat, eps=0.0):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n\n Args:\n quat: [B, 4]\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n assert quat.ndim == 2 and quat.shape[1] == 4, quat.shape\n norm_quat = quat.norm(p=2, dim=1, keepdim=True)\n # print('quat', quat) # Bx4\n # print('norm_quat: ', norm_quat) # Bx1\n norm_quat = quat / (norm_quat + eps)\n # print('normed quat: ', norm_quat)\n qw, qx, qy, qz = (\n norm_quat[:, 0],\n norm_quat[:, 1],\n norm_quat[:, 2],\n norm_quat[:, 3],\n )\n B = quat.size(0)\n\n s = 2.0 # * Nq = qw*qw + qx*qx + qy*qy + qz*qz\n X = qx * s\n Y = qy * s\n Z = qz * s\n wX = qw * X\n wY = qw * Y\n wZ = qw * Z\n xX = qx * X\n xY = qx * Y\n xZ = qx * Z\n yY = qy * Y\n yZ = qy * Z\n zZ = qz * Z\n rotMat = torch.stack(\n [\n 1.0 - (yY + zZ),\n xY - wZ,\n xZ + wY,\n xY + wZ,\n 1.0 - (xX + zZ),\n yZ - wX,\n xZ - wY,\n yZ + wX,\n 1.0 - (xX + yY),\n ],\n dim=1,\n ).reshape(B, 3, 3)\n\n # rotMat = torch.stack([\n # qw * qw + qx * qx - qy * qy - qz * qz, 2 * (qx * qy - qw * qz), 2 * (qx * qz + qw * qy),\n # 2 * (qx * qy + qw * qz), qw * qw - qx * qx + qy * qy - qz * qz, 2 * (qy * qz - qw * qx),\n # 2 * (qx * qz - qw * qy), 2 * (qy * qz + qw * qx), qw * qw - qx * qx - qy * qy + qz * qz],\n # dim=1).reshape(B, 3, 3)\n\n # w2, x2, y2, z2 = qw*qw, qx*qx, qy*qy, qz*qz\n # wx, wy, wz = qw*qx, qw*qy, qw*qz\n # xy, xz, yz = qx*qy, qx*qz, qy*qz\n\n # rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n # 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n # 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\n return rotMat\n\n\ndef pose_vec2mat(vec, rotation_mode=\"quat\"):\n \"\"\"Convert 6DoF parameters to transformation matrix.\n\n Args:s\n vec: 6DoF parameters in the order of\n \"euler\": rx, ry, rz, tx, ty, tz -- [B, 6]\n \"quat\": qw, qx, qy, qz, tx, ty, tz -- [B, 7]\n Returns:\n A transformation matrix -- [B, 3, 4]\n \"\"\"\n if rotation_mode == \"euler\":\n rot = vec[:, :3]\n translation = vec[:, 3:6].unsqueeze(-1) # [B, 3, 1]\n rot_mat = euler2mat_torch(rot) # [B, 3, 3]\n elif rotation_mode == \"quat\":\n rot = vec[:, :4]\n translation = vec[:, 4:7].unsqueeze(-1) # [B, 3, 1]\n rot_mat = quat2mat_torch(rot) # [B, 3, 3]\n transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]\n return transform_mat\n\n\ndef inverse_warp(img, depth, pose, intrinsics, rotation_mode=\"euler\", padding_mode=\"zeros\"):\n \"\"\"Inverse warp a source image to the target image plane.\n\n Args:\n img: the source image (where to sample pixels) -- [B, 3, H, W]\n depth: depth map of the target image -- [B, H, W]\n pose: 6DoF pose parameters from target to source -- [B, 6] / [B, 7]\n intrinsics: camera intrinsic matrix -- [B, 3, 3]\n Returns:\n projected_img: Source image warped to the target image plane\n valid_points: Boolean array indicating point validity\n \"\"\"\n check_sizes(img, \"img\", \"B3HW\")\n check_sizes(depth, \"depth\", \"BHW\")\n check_sizes(pose, \"pose\", \"B6\")\n check_sizes(intrinsics, \"intrinsics\", \"B33\")\n\n batch_size, _, img_height, img_width = img.size()\n\n cam_coords = pixel2cam(depth, intrinsics.inverse()) # [B,3,H,W]\n\n pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]\n\n # Get projection matrix for tgt camera frame to source pixel frame\n proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]\n\n rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]\n src_pixel_coords = cam2pixel(cam_coords, rot, tr, padding_mode) # [B,H,W,2]\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n\n valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1\n\n return projected_img, valid_points\n\n\ndef get_closest_rot(rot_est, rot_gt, sym_info):\n \"\"\"get the closest rot_gt given rot_est and sym_info.\n\n rot_est: ndarray\n rot_gt: ndarray\n sym_info: None or Kx3x3 ndarray, m2m\n \"\"\"\n if sym_info is None:\n return rot_gt\n if isinstance(sym_info, torch.Tensor):\n sym_info = sym_info.cpu().numpy()\n if len(sym_info.shape) == 2:\n sym_info = sym_info.reshape((1, 3, 3))\n # find the closest rot_gt with smallest re\n r_err = re(rot_est, rot_gt)\n closest_rot_gt = rot_gt\n for i in range(sym_info.shape[0]):\n # R_gt_m2c x R_sym_m2m ==> R_gt_sym_m2c\n rot_gt_sym = rot_gt.dot(sym_info[i])\n cur_re = re(rot_est, rot_gt_sym)\n if cur_re < r_err:\n r_err = cur_re\n closest_rot_gt = rot_gt_sym\n\n return closest_rot_gt\n\n\ndef get_closest_rot_batch(pred_rots, gt_rots, sym_infos):\n \"\"\"\n get closest gt_rots according to current predicted poses_est and sym_infos\n --------------------\n pred_rots: [B, 4] or [B, 3, 3]\n gt_rots: [B, 4] or [B, 3, 3]\n sym_infos: list [Kx3x3 or None],\n stores K rotations regarding symmetries, if not symmetric, None\n -----\n closest_gt_rots: [B, 3, 3]\n \"\"\"\n batch_size = pred_rots.shape[0]\n device = pred_rots.device\n if pred_rots.shape[-1] == 4:\n pred_rots = quat2mat_torch(pred_rots[:, :4])\n if gt_rots.shape[-1] == 4:\n gt_rots = quat2mat_torch(gt_rots[:, :4])\n\n closest_gt_rots = gt_rots.clone().cpu().numpy() # B,3,3\n\n for i in range(batch_size):\n closest_rot = get_closest_rot(\n pred_rots[i].detach().cpu().numpy(),\n gt_rots[i].cpu().numpy(),\n sym_infos[i],\n )\n # TODO: automatically detect rot_gt's format in PM_Loss to avoid converting multiple times\n closest_gt_rots[i] = closest_rot\n closest_gt_rots = torch.tensor(closest_gt_rots, device=device, dtype=gt_rots.dtype)\n return closest_gt_rots\n\n\ndef get_closest_pose_batch(poses_est, poses_gt, sym_infos):\n \"\"\"\n get closest poses_gt according to current predicted poses_est and sym_infos\n --------------------\n poses_est: [B, 8]\n poses_gt: [B, 8]\n sym_infos: dict {label_idx: Kx3x3 or None}, stores K rotations regarding symmetries, if not symmetric, None\n -----\n closest_poses_gt: [B, 8]\n \"\"\"\n batch_size = poses_est.shape[0]\n device = poses_est.device\n\n rots_est = quat2mat_torch(poses_est[:, :4])\n rots_gt = quat2mat_torch(poses_gt[:, :4])\n labels = poses_est[:, 7].long()\n\n closest_poses_gt = poses_gt.clone().cpu().numpy()\n\n for i in range(batch_size):\n closest_rot = get_closest_rot(\n rots_est[i].detach().cpu().numpy(),\n rots_gt[i].cpu().numpy(),\n sym_infos[int(labels[i])],\n )\n # TODO: automatically detect rot_gt's format in PM_Loss to avoid converting multiple times\n closest_poses_gt[i][:4] = mat2quat(closest_rot)\n closest_poses_gt = torch.tensor(closest_poses_gt, device=device, dtype=poses_gt.dtype)\n return closest_poses_gt\n\n\ndef get_closest_pose_batch_cpu(poses_est, poses_gt, sym_infos):\n \"\"\"\n get closest poses_gt according to current predicted poses_est and sym_infos\n --------------------\n poses_est: [B, 8] ndarray\n poses_gt: [B, 8] ndarray\n sym_infos: dict {label_idx: Kx3x3 or None}, stores K rotations regarding symmetries, if not symmetric, None\n -----\n closest_poses_gt: [B, 8]\n \"\"\"\n batch_size = poses_est.shape[0]\n\n rots_est = [quat2mat(poses_est[i, :4]) for i in range(batch_size)]\n rots_gt = [quat2mat(poses_gt[i, :4]) for i in range(batch_size)]\n labels = poses_est[:, 7].astype(int)\n\n closest_poses_gt = poses_gt.copy()\n\n for i in range(batch_size):\n closest_rot = get_closest_rot(rots_est[i], rots_gt[i], sym_infos[int(labels[i])])\n closest_poses_gt[i][:4] = mat2quat(closest_rot)\n return closest_poses_gt\n\n\ndef R_transform_th(R_src, R_delta, rot_coord=\"CAMERA\"):\n \"\"\"transform R_src use R_delta.\n\n :param R_src: matrix\n :param R_delta:\n :param rot_coord:\n :return:\n \"\"\"\n if rot_coord.lower() == \"model\":\n R_output = torch.matmul(R_src, R_delta)\n elif rot_coord.lower() == \"camera\" or rot_coord.lower() == \"naive\" or rot_coord.lower() == \"camera_new\":\n # dR_m2c x R_src_m2c\n R_output = torch.matmul(R_delta, R_src)\n else:\n raise Exception(\"Unknown rot_coord in R_transform: {}\".format(rot_coord))\n return R_output\n\n\ndef T_transform_batch(T_src, T_delta, zoom_factor, labels_pred=None):\n \"\"\"inv_zoom T_delta; T_delta + T_src --> T_tgt.\n T_src: [B, 3] (x1, y1, z1)\n T_delta: [B, 3xnum_classes] (dx, dy, dz)\n zoom_factor: [B, 4]\n wx = crop_height / height\n wy = crop_height / height\n tx = zoom_c_x / width * 2 - 1\n ty = zoom_c_y / height * 2 - 1\n affine_matrix = [[wx, 0, tx], [0, wy, ty]]\n ---------\n T_tgt: [B, 3] (x2, y2, z2)\n \"\"\"\n batch_size = T_delta.shape[0]\n if T_delta.shape[1] > 3: # class aware\n assert labels_pred is not None, \"labels_pred should not be None when class aware\"\n inds = torch.arange(0, batch_size, dtype=torch.long, device=T_delta.device)\n T_delta_selected = T_delta.view(batch_size, -1, 3)[inds, labels_pred] # [B, 3]\n else:\n T_delta_selected = T_delta\n factor_x = zoom_factor[:, 0] # [B,]\n factor_y = zoom_factor[:, 1] # [B,]\n\n vx_0 = T_delta_selected[:, 0] * factor_x\n vy_0 = T_delta_selected[:, 1] * factor_y\n\n vz = torch.div(T_src[:, 2], torch.exp(T_delta_selected[:, 2]))\n vx = vz * torch.addcdiv(vx_0, 1.0, T_src[:, 0], T_src[:, 2])\n vy = vz * torch.addcdiv(vy_0, 1.0, T_src[:, 1], T_src[:, 2])\n # import pdb; pdb.set_trace()\n\n T_tgt = torch.stack([vx, vy, vz], 1)\n return T_tgt\n\n\ndef R_transform_batch(quats_delta, poses_src):\n \"\"\"\n # R_tgt_m2c = dR_c2c x R_src_m2c\n quats_delta: [B, 4] or [B, 4*num_classes]\n poses_src: [B, 8]\n --------------\n rots_tgt: [B, 3, 3]\n \"\"\"\n batch_size = quats_delta.shape[0]\n rots_src = quat2mat_torch(poses_src[:, :4]) # [B, 3, 3] m2c\n if quats_delta.shape[1] > 4: # class aware\n labels = poses_src[:, 7].long() # [B,]\n quats_delta = quats_delta.view(batch_size, -1, 4) # [B, num_classes, 4]\n inds = torch.arange(0, batch_size, dtype=torch.long, device=quats_delta.device)\n quats_delta_selected = quats_delta[inds, labels] # [B, 4]\n rots_delta = quat2mat_torch(quats_delta_selected)\n else:\n rots_delta = quat2mat_torch(quats_delta) # [B,3,3] c2c\n rots_tgt = torch.matmul(rots_delta, rots_src) # [B,3,3] # m2c\n return rots_tgt\n\n\ndef RT_transform_batch_gpu(quaternion_delta, translation, poses_src_batch):\n quaternion_delta = quaternion_delta.detach().cpu().numpy()\n translation = translation.detach().cpu().numpy()\n poses_src = poses_src_batch.cpu().numpy()\n poses_tgt = RT_transform_batch_cpu(quaternion_delta, translation, poses_src)\n poses_tgt = torch.cuda.FloatTensor(poses_tgt, device=poses_src_batch.device)\n return poses_tgt\n\n\ndef RT_transform_batch_cpu(quaternion_delta, translation, poses_src):\n poses_tgt = poses_src.copy()\n for i in range(poses_src.shape[0]):\n cls = int(poses_src[i, 1]) if quaternion_delta.shape[1] > 4 else 0\n if all(poses_src[i, 2:] == 0):\n poses_tgt[i, 2:] = 0\n else:\n poses_tgt[i, 2:6] = mat2quat(\n np.dot(\n quat2mat(quaternion_delta[i, 4 * cls : 4 * cls + 4]),\n quat2mat(poses_src[i, 2:6]),\n )\n )\n poses_tgt[i, 6:] = translation[i, 3 * cls : 3 * cls + 3]\n return poses_tgt\n\n\ndef get_closest_pose(est_rot, gt_rot, sym_info):\n # sym_info: (sym_axis, sym_angle) [0:3, 3]\n def gen_mat(axis, degree):\n axis = axis / LA.norm(axis)\n return quat2mat(\n [\n cos(degree / 360.0 * pi),\n axis[0] * sin(degree / 360.0 * pi),\n axis[1] * sin(degree / 360.0 * pi),\n axis[2] * sin(degree / 360.0 * pi),\n ]\n )\n\n sym_angle = int(sym_info[3])\n sym_axis = np.copy(sym_info[:3])\n if sym_angle == -1:\n closest_rot = gt_rot\n elif sym_angle == 0:\n angle = 180.0\n gt_rot_1 = np.copy(gt_rot)\n rd_1 = re(gt_rot_1, est_rot)\n gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, angle))\n rd_2 = re(gt_rot_2, est_rot)\n if rd_1 < rd_2:\n gt_rot_1 = np.matmul(gt_rot, gen_mat(sym_axis, -90))\n gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, 90))\n else:\n gt_rot_1 = np.matmul(gt_rot, gen_mat(sym_axis, 90))\n gt_rot_2 = np.matmul(gt_rot, gen_mat(sym_axis, 270))\n rd_1 = re(gt_rot_1, est_rot)\n rd_2 = re(gt_rot_2, est_rot)\n count = 1\n thresh = 0.1\n while angle > thresh:\n angle /= 2\n count += 1\n if rd_1 < rd_2:\n gt_rot_2 = np.matmul(gt_rot_2, gen_mat(sym_axis, -angle))\n rd_2 = re(gt_rot_2, est_rot)\n else:\n gt_rot_1 = np.matmul(gt_rot_1, gen_mat(sym_axis, angle))\n rd_1 = re(gt_rot_1, est_rot)\n\n # print(\"rd_1: {}, rd_2: {}, angle: {}, count: {}\".format(rd_1, rd_2, angle, count))\n closest_rot = gt_rot_1 if rd_1 < rd_2 else gt_rot_2\n else:\n assert 180 % sym_angle == 0\n rot_delta = gen_mat(sym_axis, sym_angle)\n cur_rot = np.copy(gt_rot)\n closest_rot = np.copy(cur_rot)\n closest_angle = re(cur_rot, est_rot)\n for i in range(180 / sym_angle):\n cur_rot = np.matmul(cur_rot, rot_delta)\n rd = re(cur_rot, est_rot)\n if rd < closest_angle:\n closest_rot = np.copy(cur_rot)\n closest_angle = rd\n\n return closest_rot\n\n\ndef se3_inverse_torch(RT):\n # RT is a 3x4 matrix\n R = RT[0:3, 0:3]\n T = RT[0:3, 3]\n R_inv = R.t()\n T_inv = -1 * torch.matmul(R.t(), T)\n RT_inv = torch.cat([R_inv, T_inv.view(3, 1)], dim=1)\n return RT_inv\n\n\ndef se3_mul_torch(RT1, RT2):\n R1 = RT1[0:3, 0:3]\n T1 = RT1[0:3, 3].reshape((3, 1))\n\n R2 = RT2[0:3, 0:3]\n T2 = RT2[0:3, 3].reshape((3, 1))\n\n R_new = torch.matmul(R1, R2)\n T_new = torch.matmul(R1, T2) + T1\n RT_new = torch.cat([R_new, T_new.view(3, 1)], dim=1)\n return RT_new\n\n\ndef se3_inverse_torch_batch(RT):\n # RT is a Bx3x4 matrix\n B = RT.shape[0]\n R_inv = RT[:, :3, :3].permute(0, 2, 1)\n T_inv = -1 * torch.matmul(R_inv, RT[:, :3, 3].view(B, 3, 1))\n RT_inv = torch.cat([R_inv, T_inv], dim=-1)\n return RT_inv\n\n\ndef se3_mul_torch_batch(RT1, RT2):\n assert RT1.shape[0] == RT2.shape[0]\n B = RT1.shape[0]\n\n R_new = torch.matmul(RT1[:, :3, :3], RT2[:, :3, :3])\n T_new = torch.matmul(RT1[:, :3, :3], RT2[:, :3, 3].view(B, 3, 1)) + RT1[:, :3, 3].view(B, 3, 1)\n RT_new = torch.cat([R_new, T_new], dim=-1)\n return RT_new\n\n\ndef calc_se3_torch(pose_src, pose_tgt):\n \"\"\"\n :param pose_src: pose matrix of source, [R|T], 3x4\n :param pose_tgt: pose matrix of target, [R|T], 3x4\n \"\"\"\n se3_src2tgt = se3_mul_torch(pose_tgt, se3_inverse_torch(pose_src))\n return se3_src2tgt\n\n\ndef calc_se3_torch_batch(poses_src, poses_tgt):\n \"\"\"Bx3x4.\"\"\"\n # B = poses_src.shape[0]\n se3_src2tgt_batch = se3_mul_torch_batch(poses_tgt, se3_inverse_torch_batch(poses_src))\n return se3_src2tgt_batch\n\n\ndef blender_euler_to_blender_pose(euler):\n euler_0 = (-euler[0] + 90) % 360\n euler_1 = euler[1] + 90\n return euler2mat(\n euler_0 * np.pi / 180,\n euler_1 * np.pi / 180,\n euler[2] * np.pi / 180,\n axes=\"szxz\",\n )\n\n\ndef blender_pose_to_blender_euler(pose):\n euler = [r / np.pi * 180 for r in mat2euler(pose, axes=\"szxz\")]\n euler[0] = -(euler[0] + 90) % 360\n euler[1] = euler[1] - 90\n return np.array(euler)\n\n\n###########################################################################\n# NOTE: tests\ndef test_calc_se3_torch():\n from lib.pysixd.RT_transform import calc_se3\n\n B = 8\n device = \"cuda\"\n\n def to_tensor(a):\n return torch.tensor(a, dtype=torch.float32, device=device)\n\n np.random.seed(1)\n axis = np.random.rand(B, 3)\n angle = np.random.rand(B)\n axis_tensor = to_tensor(axis)\n angle_tensor = to_tensor(angle)\n mat_torch = axangle2mat_torch(axis_tensor, angle_tensor, is_normalized=False)\n RT1 = torch.rand(B, 3, 4)\n RT1[:, :3, :3] = mat_torch\n\n axis = np.random.rand(B, 3)\n angle = np.random.rand(B)\n axis_tensor = to_tensor(axis)\n angle_tensor = to_tensor(angle)\n mat_torch = axangle2mat_torch(axis_tensor, angle_tensor, is_normalized=False)\n RT2 = torch.rand(B, 3, 4)\n RT2[:, :3, :3] = mat_torch\n runs = 10000\n import time\n\n t1 = time.perf_counter()\n for _ in range(runs):\n se3_numpy = []\n for i in range(B):\n se3_r, se3_t = calc_se3(RT1[i].cpu().numpy(), RT2[i].cpu().numpy())\n se3_numpy.append(np.hstack([se3_r, se3_t.reshape((3, 1))]))\n se3_numpy = np.array(se3_numpy)\n print(\"numpy: {}s\".format((time.perf_counter() - t1) / runs))\n\n t2 = time.perf_counter()\n for _ in range(runs):\n se3_torch_single = torch.empty_like(RT1)\n for i in range(B):\n se3_torch_single[i] = calc_se3_torch(RT1[i], RT2[i])\n print(\"torch_single: {}s\".format((time.perf_counter() - t2) / runs))\n\n t3 = time.perf_counter()\n for _ in range(runs):\n se3_torch = calc_se3_torch_batch(RT1, RT2)\n print(\"torch: {}s\".format((time.perf_counter() - t3) / runs))\n\n print(np.allclose(se3_numpy, se3_torch.cpu().numpy()))\n print(torch.allclose(se3_torch_single, se3_torch))\n \"\"\"\n numpy: 0.000260600209236145s\n torch_single: 0.0005826136112213135s\n torch: 0.00012000765800476074s\n True\n True\n \"\"\"\n\n\ndef test_pose_vec2mat():\n B = 8\n qt1 = torch.rand(B, 7).to(\"cuda\", torch.float32)\n RT1 = pose_vec2mat(qt1)\n\n RT_np = []\n for i in range(B):\n r = quat2mat(qt1[i, :4].cpu().numpy())\n t = qt1[i, 4:7].cpu().numpy().reshape((3, 1))\n RT_np.append(np.hstack([r, t]))\n RT_np = np.array(RT_np)\n print(RT_np.dtype)\n print(RT1.dtype)\n print(np.allclose(RT_np, RT1.cpu().numpy()))\n if np.allclose(RT_np, RT1.cpu().numpy()) is False:\n print(\n np.abs(RT_np - RT1.cpu().numpy()).mean(),\n np.abs(RT_np - RT1.cpu().numpy()).max(),\n )\n # 4.28597091408379e-08 1.8986341143722996e-07\n\n\ndef test_axangle2mat_torch():\n B = 8\n device = \"cuda\"\n\n def to_tensor(a):\n return torch.tensor(a, dtype=torch.float32, device=device)\n\n np.random.seed(1)\n axis = np.random.rand(B, 3)\n angle = np.random.rand(B)\n axis_tensor = to_tensor(axis)\n angle_tensor = to_tensor(angle)\n mat_torch = axangle2mat_torch(axis_tensor, angle_tensor, is_normalized=False)\n mat_np = []\n for i in range(B):\n mat_np.append(axangle2mat(axis[i], angle[i]))\n mat_np = np.array(mat_np)\n print(mat_np)\n print(mat_torch)\n print(np.allclose(mat_np, mat_torch.cpu().numpy()))\n\n\ndef test_quat2euler():\n B = 8\n quat = np.random.rand(B, 4)\n euler = []\n for i in range(quat.shape[0]):\n euler.append(quat2euler(quat[i]))\n euler = np.array(euler)\n\n # torch\n quat_torch = torch.from_numpy(quat)\n euler_torch = quat2euler_torch(quat_torch)\n print(euler)\n print(euler_torch)\n print(np.allclose(euler, euler_torch.cpu().numpy()))\n\n\ndef test_euler2quat():\n B = 8\n quat = np.random.rand(B, 4)\n euler = []\n for i in range(quat.shape[0]):\n euler.append(quat2euler(quat[i]))\n euler = np.array(euler)\n\n # torch\n quat_torch = torch.from_numpy(quat).to(\"cuda\")\n euler_torch = quat2euler_torch(quat_torch)\n print(euler_torch)\n print(euler)\n\n ######\n \"\"\"\n torch 0.0002950624704360962\n numpy 0.0001986891746520996\n \"\"\"\n runs = 10000\n import time\n\n t1 = time.perf_counter()\n for _ in range(runs):\n quat_from_euler_torch = euler2quat_torch(euler_torch[:, 0], euler_torch[:, 1], euler_torch[:, 2])\n # print(quat_from_euler_torch.shape)\n print(\"torch \", (time.perf_counter() - t1) / runs)\n\n euler_np = euler_torch.cpu().numpy()\n quat_from_euler_np = torch.zeros_like(quat_torch)\n t1 = time.perf_counter()\n for _ in range(runs):\n for i in range(B):\n quat_from_euler_np[i].copy_(torch.tensor(euler2quat(euler_np[i, 0], euler_np[i, 1], euler_np[i, 2])))\n print(\"numpy \", (time.perf_counter() - t1) / runs)\n print(\n np.allclose(\n quat_from_euler_np.cpu().numpy(),\n quat_from_euler_torch.cpu().numpy(),\n )\n )\n print(quat_from_euler_np)\n print(quat_from_euler_torch)\n\n\ndef test_qrot_points():\n from lib.pysixd.inout import load_ply\n\n data_root = osp.normpath(osp.join(cur_dir, \"../../datasets/BOP_DATASETS/lm/\"))\n models_cad_files = [osp.join(data_root, \"models/obj_{:06d}.ply\".format(i)) for i in range(1, 15 + 1)]\n obj_id = 0\n points = load_ply(models_cad_files[obj_id])[\"pts\"]\n axis = np.array([1, 2, 0])\n rot = axangle2mat(axis, -pi / 3)\n quat = mat2quat(rot)\n points_q = qrot_points_th(torch.from_numpy(quat), torch.from_numpy(points))\n # N = points.shape[0]\n # points_q = qrot_torch(torch.from_numpy(quat).expand(N, 4), torch.from_numpy(points))\n points_r = rot.dot(points.T).T\n print(np.allclose(points_q.numpy(), points_r))\n\n\nif __name__ == \"__main__\":\n import time\n import os\n import os.path as osp\n import sys\n import mmcv\n\n os.environ[\"PYOPENGL_PLATFORM\"] = \"egl\"\n cur_dir = osp.dirname(osp.abspath(__file__))\n sys.path.insert(0, osp.join(cur_dir, \"../../\"))\n test_qrot_points()\n exit(0)\n # test_axangle2mat_torch()\n # exit(0)\n\n # test_quat2euler()\n # exit(0)\n # test_calc_se3_torch()\n # test_pose_vec2mat()\n test_euler2quat()\n exit(0)\n\n from lib.meshrenderer.meshrenderer_color import Renderer\n from lib.vis_utils.image import grid_show\n from lib.pysixd.misc import get_symmetry_transformations\n\n IDX2CLASS = {\n 1: \"ape\",\n 2: \"benchvise\",\n 3: \"bowl\",\n 4: \"camera\",\n 5: \"can\",\n 6: \"cat\",\n 7: \"cup\",\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n 13: \"iron\",\n 14: \"lamp\",\n 15: \"phone\",\n }\n CLASSES = IDX2CLASS.values()\n CLASSES = sorted(CLASSES)\n CLASS2IDX = {cls_name: idx for idx, cls_name in IDX2CLASS.items()}\n idx2class = IDX2CLASS\n K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])\n ZNEAR = 0.25\n ZFAR = 6.0\n width = 640\n height = 480\n\n data_root = osp.normpath(osp.join(cur_dir, \"../../data/BOP_DATASETS/lm_full/\"))\n\n models_cad_files = [osp.join(data_root, \"models/obj_{:06d}.ply\".format(i)) for i in range(1, 15 + 1)]\n models_info = mmcv.load(osp.join(data_root, \"models/models_info.pkl\"))\n renderer = Renderer(\n models_cad_files,\n K,\n samples=1,\n vertex_tmp_store_folder=\".\",\n vertex_scale=0.001,\n height=height,\n width=width,\n near=ZNEAR,\n far=ZFAR,\n )\n\n # obj_id = 10 - 1\n # trans = np.array([-0.0021458883, 0.0804758, 0.78142926])\n # axis_est = np.array([1, 2, 0])\n # axis_gt = np.array([0,2,1])\n # est_rot = axangle2mat(axis_est, -pi/3)\n # gt_rot = axangle2mat(axis_gt, pi)\n # sym_info = axangle2mat([0, 0, 1], pi)\n # print('sym_info', sym_info)\n\n cls_idx = 3\n obj_id = cls_idx - 1\n trans = np.array([-0.0021458883, 0.0804758, 0.78142926])\n axis_est = np.array([1, 2, 0])\n axis_gt = np.array([0, 2, 1])\n est_rot = axangle2mat(axis_est, -pi / 3)\n gt_rot = axangle2mat(axis_gt, pi)\n\n transforms_sym = get_symmetry_transformations(models_info[cls_idx], max_sym_disc_step=0.01)\n # sym_info = axangle2mat([0, 0, 1], pi)\n sym_info = np.array([sym[\"R\"] for sym in transforms_sym])\n print(\"sym_info\", sym_info.shape)\n\n est_pose = np.random.rand(3, 4)\n est_pose[:, :3] = est_rot\n gt_pose = np.random.rand(3, 4)\n gt_pose[:, :3] = gt_rot\n rd_ori = re(est_rot, gt_rot)\n\n t = time.perf_counter()\n for i in range(3000):\n closest_rot = get_closest_rot(est_rot, gt_rot, sym_info)\n print((\"calculate closest rot {}s\".format((time.perf_counter() - t) / 3000)))\n closest_pose = np.copy(gt_pose)\n closest_pose[:, :3] = closest_rot\n\n rd_closest = re(est_rot, closest_pose[:, :3])\n print(\n (\n \"rot_est: {}, rot_gt: {}, closest rot_gt: {}\".format(\n mat2axangle(est_rot),\n mat2axangle(gt_rot),\n mat2axangle(closest_rot),\n )\n )\n )\n print((\"original rot dist: {}, closest rot dist: {}\".format(rd_ori, rd_closest)))\n\n est_img, _ = renderer.render(obj_id, est_rot, trans)\n gt_img, _ = renderer.render(obj_id, gt_rot, trans)\n closest_img, _ = renderer.render(obj_id, closest_rot, trans)\n show_imgs = [\n est_img[:, :, [2, 1, 0]],\n gt_img[:, :, [2, 1, 0]],\n closest_img[:, :, [2, 1, 0]],\n ]\n show_titles = [\"est\", \"gt_ori\", \"gt_closest\"]\n grid_show(show_imgs, show_titles, row=1, col=3)\n\n # import cv2\n # while(1):\n # est_img = render(renderer, est_rot, trans)\n # cv2.imshow('test', cv2.cvtColor(est_img, cv2.COLOR_RGB2BGR))\n # q = cv2.waitKey(16)\n # if q == ord('w'):\n # trans[1] += 0.05\n # elif q == ord('s'):\n # trans[1] -= 0.05\n # elif q == ord('a'):\n # trans[0] -= 0.1\n # elif q == ord('d'):\n # trans[0] += 0.1\n # elif q == ord('q'):\n # trans[2] += 0.01\n # elif q == ord('e'):\n # trans[2] -= 0.01\n # print trans\n\n # import cv2\n # rot = aa2mat([0,0,1], 0)\n # a = aa2mat([0, 0, 1], 5)\n # d = aa2mat([0, 0, 1], -5)\n # q = aa2mat([0, 1, 0], 5)\n # e = aa2mat([0, 1, 0], -5)\n # w = aa2mat([1, 0, 0], 5)\n # s = aa2mat([1, 0, 0], -5)\n # while(1):\n # est_img = render(renderer, rot, trans)\n # cv2.imshow('test', cv2.cvtColor(est_img, cv2.COLOR_RGB2BGR))\n # key = cv2.waitKey(16)\n # if key == ord('w'):\n # rot = np.matmul(w, rot)\n # elif key == ord('s'):\n # rot = np.matmul(s, rot)\n # elif key == ord('a'):\n # rot = np.matmul(a, rot)\n # elif key == ord('d'):\n # rot = np.matmul(d, rot)\n # elif key == ord('q'):\n # rot = np.matmul(q, rot)\n # elif key == ord('e'):\n # rot = np.matmul(e, rot)\n # print mat2quat(rot)\n",
"import math\n\nimport torch\nimport torch.nn as nn\n\n\ndef rgb_to_hsv(image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert an image from RGB to HSV.\n\n The image data is assumed to be in the range of (0, 1).\n\n Args:\n image (torch.Tensor): RGB Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.\n\n Returns:\n torch.Tensor: HSV version of the image with shape of :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = rgb_to_hsv(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(image)))\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(\"Input size must have a shape of (*, 3, H, W). Got {}\".format(image.shape))\n\n # The first or last occurance is not guarenteed before 1.6.0\n # https://github.com/pytorch/pytorch/issues/20414\n maxc, _ = image.max(-3)\n maxc_mask = image == maxc.unsqueeze(-3)\n _, max_indices = ((maxc_mask.cumsum(-3) == 1) & maxc_mask).max(-3)\n minc: torch.Tensor = image.min(-3)[0]\n\n v: torch.Tensor = maxc # brightness\n\n deltac: torch.Tensor = maxc - minc\n s: torch.Tensor = deltac / (v + 1e-31)\n\n # avoid division by zero\n deltac = torch.where(\n deltac == 0,\n torch.ones_like(deltac, device=deltac.device, dtype=deltac.dtype),\n deltac,\n )\n\n maxc_tmp = maxc.unsqueeze(-3) - image\n rc: torch.Tensor = maxc_tmp[..., 0, :, :]\n gc: torch.Tensor = maxc_tmp[..., 1, :, :]\n bc: torch.Tensor = maxc_tmp[..., 2, :, :]\n\n h = torch.stack([bc - gc, 2.0 * deltac + rc - bc, 4.0 * deltac + gc - rc], dim=-3)\n\n h = torch.gather(h, dim=-3, index=max_indices[..., None, :, :])\n h = h.squeeze(-3)\n h = h / deltac\n\n h = (h / 6.0) % 1.0\n\n h = 2 * math.pi * h\n\n return torch.stack([h, s, v], dim=-3)\n\n\ndef hsv_to_rgb(image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert an image from HSV to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Args:\n image (torch.Tensor): HSV Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.\n\n Returns:\n torch.Tensor: RGB version of the image with shape of :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = hsv_to_rgb(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(image)))\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(\"Input size must have a shape of (*, 3, H, W). Got {}\".format(image.shape))\n\n h: torch.Tensor = image[..., 0, :, :] / (2 * math.pi)\n s: torch.Tensor = image[..., 1, :, :]\n v: torch.Tensor = image[..., 2, :, :]\n\n hi: torch.Tensor = torch.floor(h * 6) % 6\n f: torch.Tensor = ((h * 6) % 6) - hi\n one: torch.Tensor = torch.tensor(1.0).to(image.device)\n p: torch.Tensor = v * (one - s)\n q: torch.Tensor = v * (one - f * s)\n t: torch.Tensor = v * (one - (one - f) * s)\n\n hi = hi.long()\n indices: torch.Tensor = torch.stack([hi, hi + 6, hi + 12], dim=-3)\n # yapf: disable\n out = torch.stack(\n (\n v, q, p, p, t, v,\n t, v, v, q, p, p,\n p, p, t, v, v, q\n ),\n dim=-3,\n )\n # yapf: enable\n out = torch.gather(out, -3, indices)\n\n return out\n\n\nclass RgbToHsv(nn.Module):\n r\"\"\"Convert an image from RGB to HSV.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n torch.tensor: HSV version of the image.\n\n Shape:\n - image: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> hsv = RgbToHsv()\n >>> output = hsv(input) # 2x3x4x5\n \"\"\"\n\n def __init__(self) -> None:\n super(RgbToHsv, self).__init__()\n\n def forward(self, image: torch.Tensor) -> torch.Tensor:\n return rgb_to_hsv(image)\n\n\nclass HsvToRgb(nn.Module):\n r\"\"\"Convert an image from HSV to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n torch.Tensor: RGB version of the image.\n\n Shape:\n - image: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> rgb = HsvToRgb()\n >>> output = rgb(input) # 2x3x4x5\n \"\"\"\n\n def __init__(self) -> None:\n super(HsvToRgb, self).__init__()\n\n def forward(self, image: torch.Tensor) -> torch.Tensor:\n return hsv_to_rgb(image)\n",
"# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n# Modified\n\"\"\"Sampling of views from a sphere.\"\"\"\n\nimport math\nimport numpy as np\n\nfrom lib.pysixd import transform, inout, misc\nfrom transforms3d.euler import euler2mat\n\n\ndef fibonacci_sampling(n_pts, radius=1.0):\n \"\"\"Samples an odd number of almost equidistant 3D points from the Fibonacci\n lattice on a unit sphere.\n\n Latitude (elevation) represents the rotation angle around the X axis.\n Longitude (azimuth) represents the rotation angle around the Z axis.\n\n Ref:\n [1] https://arxiv.org/pdf/0912.4540.pdf\n [2] http://stackoverflow.com/questions/34302938/map-point-to-closest-point-on-fibonacci-lattice\n [3] http://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere\n [4] https://www.openprocessing.org/sketch/41142\n\n :param n_pts: Number of 3D points to sample (an odd number).\n :param radius: Radius of the sphere.\n :return: List of 3D points on the sphere surface.\n \"\"\"\n # Needs to be an odd number [1].\n assert n_pts % 2 == 1\n n_pts_half = int(n_pts / 2)\n\n phi = (math.sqrt(5.0) + 1.0) / 2.0 # Golden ratio.\n phi_inv = phi - 1.0\n ga = 2.0 * math.pi * phi_inv # Complement to the golden angle.\n\n pts = []\n for i in range(-n_pts_half, n_pts_half + 1):\n lat = math.asin((2 * i) / float(2 * n_pts_half + 1))\n lon = (ga * i) % (2 * math.pi)\n\n # Convert the latitude and longitude angles to 3D coordinates.\n s = math.cos(lat) * radius\n x, y, z = math.cos(lon) * s, math.sin(lon) * s, math.tan(lat) * s\n pts.append([x, y, z])\n\n # Calculate rotation matrix and translation vector.\n # Note: lat,lon=0,0 is a camera looking to the sphere center from\n # (-radius, 0, 0) in the world (i.e. sphere) coordinate system.\n # pi_half = 0.5 * math.pi\n # alpha_x = -lat - pi_half\n # alpha_z = lon + pi_half\n # R_x = transform.rotation_matrix(alpha_x, [1, 0, 0])[:3, :3]\n # R_z = transform.rotation_matrix(alpha_z, [0, 0, 1])[:3, :3]\n # R = np.linalg.inv(R_z.dot(R_x))\n # t = -R.dot(np.array([x, y, z]).reshape((3, 1)))\n\n return pts\n\n\ndef hinter_sampling(min_n_pts, radius=1.0):\n \"\"\"Samples 3D points on a sphere surface by refining an icosahedron, as in:\n Hinterstoisser et al., Simultaneous Recognition and Homography Extraction\n of Local Patches with a Simple Linear Classifier, BMVC 2008.\n\n :param min_n_pts: The minimum number of points to sample on the whole sphere.\n :param radius: Radius of the sphere.\n :return: 3D points on the sphere surface and a list with indices of refinement\n levels on which the points were created.\n \"\"\"\n # Vertices and faces of an icosahedron.\n a, b, c = 0.0, 1.0, (1.0 + math.sqrt(5.0)) / 2.0\n pts = [\n (-b, c, a),\n (b, c, a),\n (-b, -c, a),\n (b, -c, a),\n (a, -b, c),\n (a, b, c),\n (a, -b, -c),\n (a, b, -c),\n (c, a, -b),\n (c, a, b),\n (-c, a, -b),\n (-c, a, b),\n ]\n faces = [\n (0, 11, 5),\n (0, 5, 1),\n (0, 1, 7),\n (0, 7, 10),\n (0, 10, 11),\n (1, 5, 9),\n (5, 11, 4),\n (11, 10, 2),\n (10, 7, 6),\n (7, 1, 8),\n (3, 9, 4),\n (3, 4, 2),\n (3, 2, 6),\n (3, 6, 8),\n (3, 8, 9),\n (4, 9, 5),\n (2, 4, 11),\n (6, 2, 10),\n (8, 6, 7),\n (9, 8, 1),\n ]\n\n # Refinement levels on which the points were created.\n pts_level = [0 for _ in range(len(pts))]\n\n ref_level = 0\n while len(pts) < min_n_pts:\n ref_level += 1\n edge_pt_map = {} # Mapping from an edge to a newly added point on the edge.\n faces_new = [] # New set of faces.\n\n # Each face is replaced by four new smaller faces.\n for face in faces:\n pt_inds = list(face) # List of point ID's involved in the new faces.\n for i in range(3):\n\n # Add a new point if this edge has not been processed yet, or get ID of\n # the already added point.\n edge = (face[i], face[(i + 1) % 3])\n edge = (min(edge), max(edge))\n if edge not in edge_pt_map.keys():\n pt_new_id = len(pts)\n edge_pt_map[edge] = pt_new_id\n pt_inds.append(pt_new_id)\n\n pt_new = 0.5 * (np.array(pts[edge[0]]) + np.array(pts[edge[1]]))\n pts.append(pt_new.tolist())\n pts_level.append(ref_level)\n else:\n pt_inds.append(edge_pt_map[edge])\n\n # Replace the current face with four new faces.\n faces_new += [\n (pt_inds[0], pt_inds[3], pt_inds[5]),\n (pt_inds[3], pt_inds[1], pt_inds[4]),\n (pt_inds[3], pt_inds[4], pt_inds[5]),\n (pt_inds[5], pt_inds[4], pt_inds[2]),\n ]\n faces = faces_new\n\n # Project the points to a sphere.\n pts = np.array(pts)\n pts *= np.reshape(radius / np.linalg.norm(pts, axis=1), (pts.shape[0], 1))\n\n # Collect point connections.\n pt_conns = {}\n for face in faces:\n for i in range(len(face)):\n pt_conns.setdefault(face[i], set()).add(face[(i + 1) % len(face)])\n pt_conns[face[i]].add(face[(i + 2) % len(face)])\n\n # Order the points - starting from the top one and adding the connected points\n # sorted by azimuth.\n top_pt_id = np.argmax(pts[:, 2])\n pts_ordered = []\n pts_todo = [top_pt_id]\n pts_done = [False for _ in range(pts.shape[0])]\n\n def calc_azimuth(x, y):\n two_pi = 2.0 * math.pi\n return (math.atan2(y, x) + two_pi) % two_pi\n\n while len(pts_ordered) != pts.shape[0]:\n # Sort by azimuth.\n pts_todo = sorted(pts_todo, key=lambda i: calc_azimuth(pts[i][0], pts[i][1]))\n pts_todo_new = []\n for pt_id in pts_todo:\n pts_ordered.append(pt_id)\n pts_done[pt_id] = True\n pts_todo_new += [i for i in pt_conns[pt_id]] # Find the connected points.\n\n # Points to be processed in the next iteration.\n pts_todo = [i for i in set(pts_todo_new) if not pts_done[i]]\n\n # Re-order the points and faces.\n pts = pts[np.array(pts_ordered), :]\n pts_level = [pts_level[i] for i in pts_ordered]\n pts_order = np.zeros((pts.shape[0],))\n pts_order[np.array(pts_ordered)] = np.arange(pts.shape[0])\n for face_id in range(len(faces)):\n faces[face_id] = [pts_order[i] for i in faces[face_id]]\n\n # import inout\n # inout.save_ply('output/hinter_sampling.ply', pts=pts, faces=np.array(faces))\n\n return pts, pts_level\n\n\ndef sample_views(\n min_n_views,\n radius=1.0,\n azimuth_range=(0, 2 * math.pi),\n elev_range=(-0.5 * math.pi, 0.5 * math.pi),\n mode=\"hinterstoisser\",\n):\n \"\"\"Viewpoint sampling from a view sphere.\n\n :param min_n_views: The min. number of points to sample on the whole sphere.\n :param radius: Radius of the sphere.\n :param azimuth_range: Azimuth range from which the viewpoints are sampled.\n :param elev_range: Elevation range from which the viewpoints are sampled.\n :param mode: Type of sampling (options: 'hinterstoisser' or 'fibonacci').\n :return: List of views, each represented by a 3x3 ndarray with a rotation\n matrix and a 3x1 ndarray with a translation vector.\n \"\"\"\n # Get points on a sphere.\n if mode == \"hinterstoisser\":\n pts, pts_level = hinter_sampling(min_n_views, radius=radius)\n elif mode == \"fibonacci\":\n n_views = min_n_views\n if n_views % 2 != 1:\n n_views += 1\n\n pts = fibonacci_sampling(n_views, radius=radius)\n pts_level = [0 for _ in range(len(pts))]\n else:\n raise ValueError(\"Unknown view sampling mode.\")\n\n views = []\n for pt in pts:\n # Azimuth from (0, 2 * pi).\n azimuth = math.atan2(pt[1], pt[0])\n if azimuth < 0:\n azimuth += 2.0 * math.pi\n\n # Elevation from (-0.5 * pi, 0.5 * pi).\n a = np.linalg.norm(pt)\n b = np.linalg.norm([pt[0], pt[1], 0])\n elev = math.acos(b / a)\n if pt[2] < 0:\n elev = -elev\n\n if not (azimuth_range[0] <= azimuth <= azimuth_range[1] and elev_range[0] <= elev <= elev_range[1]):\n continue\n\n # Rotation matrix.\n # Adopted from gluLookAt function (uses OpenGL coordinate system):\n # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation\n # [2] https://www.opengl.org/wiki/GluLookAt_code\n f = -(np.array(pt)) # Forward direction.\n f /= np.linalg.norm(f)\n u = np.array([0.0, 0.0, 1.0]) # Up direction.\n s = np.cross(f, u) # Side direction.\n if np.count_nonzero(s) == 0:\n # f and u are parallel, i.e. we are looking along or against Z axis.\n s = np.array([1.0, 0.0, 0.0])\n s /= np.linalg.norm(s)\n u = np.cross(s, f) # Recompute up.\n R = np.array([[s[0], s[1], s[2]], [u[0], u[1], u[2]], [-f[0], -f[1], -f[2]]])\n\n # Convert from OpenGL to OpenCV coordinate system.\n R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]\n R = R_yz_flip.dot(R)\n\n # Translation vector.\n t = -(R.dot(np.array(pt).reshape((3, 1))))\n\n views.append({\"R\": R, \"t\": t})\n\n return views, pts_level\n\n\ndef save_vis(path, views, views_level=None):\n \"\"\"Creates a PLY file visualizing the views.\n\n :param path: Path to output PLY file.\n :param views: Views as returned by sample_views().\n :param views_level: View levels as returned by sample_views().\n \"\"\"\n pts = []\n normals = []\n colors = []\n for view_id, view in enumerate(views):\n R_inv = np.linalg.inv(view[\"R\"])\n pts += [\n R_inv.dot(-view[\"t\"]).squeeze(),\n R_inv.dot(np.array([[0.01, 0, 0]]).T - view[\"t\"]).squeeze(),\n R_inv.dot(np.array([[0, 0.01, 0]]).T - view[\"t\"]).squeeze(),\n R_inv.dot(np.array([[0, 0, 0.01]]).T - view[\"t\"]).squeeze(),\n ]\n\n normal = R_inv.dot(np.array([0, 0, 1]).reshape((3, 1)))\n normals += [\n normal.squeeze(),\n np.array([0, 0, 0]),\n np.array([0, 0, 0]),\n np.array([0, 0, 0]),\n ]\n\n if views_level:\n max_level = max(1, max(views_level))\n intens = (255 * views_level[view_id]) / float(max_level)\n else:\n intens = 255 * view_id / float(len(views))\n colors += [\n [intens, intens, intens],\n [255, 0, 0],\n [0, 255, 0],\n [0, 0, 255],\n ]\n\n inout.save_ply2(\n path,\n pts=np.array(pts),\n pts_normals=np.array(normals),\n pts_colors=np.array(colors),\n )\n\n\ndef sample_rotations_phere(\n min_n_views,\n radius=0.7,\n azimuth_range=(0, 2 * np.pi),\n elev_range=(-0.5 * np.pi, 0.5 * np.pi),\n num_cyclo=36,\n):\n \"\"\"NOTE: borrowed from AAE\n min_n_views: 2562\n radius: 0.7\n azimuth_range = (0, 2 * np.pi)\n elev_range = (-0.5 * np.pi, 0.5 * np.pi)\n ---------\n return: len(views)xnum_cyclo [[3, 3]]\n \"\"\"\n views, _ = sample_views(min_n_views, radius, azimuth_range, elev_range)\n # Rs = np.empty((len(views) * num_cyclo, 3, 3))\n Rs = []\n for view in views:\n for cyclo in np.linspace(0, 2.0 * np.pi, num_cyclo):\n rot_z = np.array(\n [\n [np.cos(-cyclo), -(np.sin(-cyclo)), 0],\n [np.sin(-cyclo), np.cos(-cyclo), 0],\n [0, 0, 1],\n ]\n )\n Rs.append(rot_z.dot(view[\"R\"]))\n return Rs\n\n\ndef sample_sphere(num_samples, begin_elevation):\n \"\"\"sample angles from the sphere\n reference: https://zhuanlan.zhihu.com/p/25988652?group_id=828963677192491008\n return azimuths, elevations\n \"\"\"\n flat_objects = [\"037_scissors\", \"051_large_clamp\", \"052_extra_large_clamp\"]\n # if self.class_type in flat_objects:\n # begin_elevation = 30\n # else:\n # begin_elevation = 0\n ratio = (begin_elevation + 90) / 180\n num_points = int(num_samples // (1 - ratio))\n phi = (np.sqrt(5) - 1.0) / 2.0 # fibonacci\n azimuths = []\n elevations = []\n for n in range(num_points - num_samples, num_points):\n z = 2.0 * n / num_points - 1.0\n azimuths.append(np.rad2deg(2 * np.pi * n * phi % (2 * np.pi)))\n elevations.append(np.rad2deg(np.arcsin(z)))\n return np.array(azimuths), np.array(elevations)\n\n\ndef sample_poses(num_samples, eulers, translations, begin_elevation):\n \"\"\"sample poses based on existing poses\n from pvnet\n --------\n eulers + translations\n \"\"\"\n from scipy import stats\n\n # eulers, translations = self.get_dataset_poses()\n # num_samples = cfg.NUM_SYN\n azimuths, elevations = sample_sphere(num_samples, begin_elevation)\n euler_sampler = stats.gaussian_kde(eulers.T)\n eulers = euler_sampler.resample(num_samples).T\n eulers[:, 0] = azimuths # NOTE: azimuths and elevations are randomly sampled\n eulers[:, 1] = elevations\n print(\"eulers: min {} max {} mean {}\".format(eulers.min(0), eulers.max(0), eulers.mean(0)))\n translation_sampler = stats.gaussian_kde(translations.T)\n translations = translation_sampler.resample(num_samples).T\n poses = np.concatenate([eulers, translations], axis=-1)\n return poses\n # np.save(self.blender_poses_path, np.concatenate([eulers, translations], axis=-1))\n\n\ndef sample_rotations_sphere_and_inplane(num_samples, begin_elevation, in_plane_range=(0, 360)):\n azimuths, elevations = sample_sphere(num_samples, begin_elevation)\n N = len(azimuths)\n in_planes = np.random.uniform(in_plane_range[0], in_plane_range[1], N)\n rotations = [\n euler2mat(\n azimuths[i] * np.pi / 180,\n elevations[i] * np.pi / 180,\n in_planes[i] * np.pi / 180,\n )\n for i in range(N)\n ]\n # # Convert from OpenGL to OpenCV coordinate system.\n # R_transform = np.array(\n # [[-1.00000024e00, -8.74227979e-08, -5.02429621e-15, 8.74227979e-08],\n # [5.02429621e-15, 1.34358856e-07, -1.00000012e00, -1.34358856e-07],\n # [8.74227979e-08, -1.00000012e00, 1.34358856e-07, 1.00000012e00]])[:3,:3]\n # R_transform = np.linalg.inv(R_transform)\n # Convert from OpenGL to OpenCV coordinate system.\n R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]\n R_transform = R_yz_flip\n rotations = [R_transform.dot(R) for R in rotations]\n return rotations\n\n\ndef angle(u, v):\n c = np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v)) # -> cosine of the angle\n rad = np.arccos(min(max(c, -1), 1))\n deg = rad / np.pi * 180\n return deg\n\n\ndef vis_rots_on_sphere(rotations):\n pz = np.array([0, 0, 1])\n new_points = []\n for rot in rotations:\n new_pz = np.dot(rot, pz.reshape((-1, 1))).reshape((3,))\n new_points.append(new_pz)\n new_points = np.array(new_points)\n pz_mean = np.mean(new_points, 0)\n\n angles = []\n for p_i in range(new_points.shape[0]):\n deg = angle(pz_mean, new_points[p_i, :])\n angles.append(deg)\n angles = np.array(angles)\n\n print(\n \"angle mean: \",\n np.mean(angles),\n \"angle std: \",\n np.std(angles),\n \"angle max: \",\n np.max(angles),\n )\n print()\n\n def vis_points():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D # noqa:F401\n\n ax = plt.figure().add_subplot(111, projection=\"3d\")\n ax.scatter(\n new_points[:, 0],\n new_points[:, 1],\n new_points[:, 2],\n c=\"r\",\n marker=\"^\",\n )\n ax.scatter(0, 0, 0, c=\"b\", marker=\"o\")\n ax.scatter(0, 0, 1, c=\"b\", marker=\"o\")\n ax.scatter(0, 1, 0, c=\"b\", marker=\"o\")\n ax.scatter(1, 0, 0, c=\"b\", marker=\"o\")\n ax.quiver(0, 0, 0, 0, 0, 1)\n ax.quiver(0, 0, 0, pz_mean[0], pz_mean[1], pz_mean[2])\n\n ax.scatter(pz_mean[0], pz_mean[1], pz_mean[2], c=\"b\", marker=\"o\")\n ax.set_xlabel(\"X Label\")\n ax.set_ylabel(\"Y Label\")\n ax.set_zlabel(\"Z Label\")\n ax.set_xlim([-1.5, 1.5])\n ax.set_ylim([-1.5, 1.5])\n ax.set_zlim([-1.5, 1.5])\n plt.show()\n\n vis_points()\n return\n\n\ndef sample_rotations_from_euler():\n rotations = []\n x_low = -90\n x_high = 0 # 360\n y_low = -90\n y_high = 0\n z_low = -90\n z_high = 0\n for roll in range(x_low, x_high, 15):\n for pitch in range(y_low, y_high, 15):\n for yaw in range(z_low, z_high, 15):\n rot = euler2mat(roll * np.pi / 180, pitch * np.pi / 180, yaw * np.pi / 180)\n rotations.append(rot)\n\n return rotations\n\n\nif __name__ == \"__main__\":\n from lib.utils import logger\n\n # Example of sampling views from a view sphere.\n # views, views_level = sample_views(\n # min_n_views=25,\n # radius=1,\n # azimuth_range=(0, 2 * math.pi),\n # elev_range=(-0.5 * math.pi, 0.5 * math.pi),\n # mode=\"fibonacci\",\n # )\n # misc.log(\"Sampled views: \" + str(len(views)))\n # out_views_vis_path = \"view_sphere.ply\"\n # save_vis(out_views_vis_path, views)\n\n # rotations = sample_rotations_phere(min_n_views=2000, radius=0.7, azimuth_range=(0, 2 * np.pi), elev_range=(-0.5 * np.pi, 0.5 * np.pi), num_cyclo=36)\n # rotations = sample_rotations_from_euler()\n rotations = sample_rotations_sphere_and_inplane(1000, 0, (0, 90))\n print(len(rotations))\n vis_rots_on_sphere(rotations)\n",
"import types\nfrom bisect import bisect_right\nimport warnings\nimport torch\nfrom torch.optim import Optimizer\nfrom math import pi, cos\n\n\ndef build_scheduler(lr_config, optimizer, epoch_length):\n \"\"\"\n total_epochs = 80\n # learning policy\n lr_config = dict(\n policy='flat_and_anneal', #\n warmup_method='linear',\n warmup_iters=800,\n warmup_factor=1.0 / 10,\n target_lr_factor=0.001,\n anneal_method='cosine', # step, linear, poly, exp, cosine\n anneal_point=0.72, # no use when method is step\n steps=[0.5, 0.75],\n step_gamma=0.1,\n poly_power=0.5,\n epochs=total_epochs)\n warmup init lr = base_lr * warmup_factor\n epoch_length: len(train_loader)\n \"\"\"\n policy = lr_config[\"policy\"]\n assert policy in (\n \"flat_and_anneal\",\n \"linear\",\n \"step\",\n \"poly\",\n \"multistep\",\n \"warmup_multistep\",\n )\n total_iters = lr_config[\"epochs\"] * epoch_length\n\n # update_mode = 'epoch' if lr_config.get('by_epoch', False) else 'batch'\n if policy == \"flat_and_anneal\":\n scheduler = flat_and_anneal_lr_scheduler(\n optimizer=optimizer,\n total_iters=total_iters,\n warmup_method=lr_config[\"warmup_method\"],\n warmup_factor=lr_config[\"warmup_factor\"],\n warmup_iters=lr_config[\"warmup_iters\"],\n anneal_method=lr_config[\"anneal_method\"],\n anneal_point=lr_config[\"anneal_point\"],\n target_lr_factor=lr_config[\"target_lr_factor\"],\n poly_power=lr_config[\"poly_power\"],\n step_gamma=lr_config[\"step_gamma\"],\n steps=lr_config[\"steps\"],\n )\n elif policy == \"warmup_multistep\":\n # if update_mode == 'epoch':\n # milestones = [epoch_length * _step for _step in lr_config['steps']]\n # else:\n milestones = [_step * total_iters for _step in lr_config[\"steps\"]]\n scheduler = WarmupMultiStepLR(\n optimizer,\n milestones,\n gamma=lr_config[\"step_gamma\"],\n warmup_factor=lr_config[\"warmup_factor\"],\n warmup_iters=lr_config[\"warmup_iters\"],\n warmup_method=lr_config[\"warmup_method\"],\n last_epoch=-1,\n )\n elif policy == \"linear\":\n # if update_mode == \"batch\":\n # count = epoch_length * lr_config[\"epochs\"]\n # else:\n # count = lr_config[\"epochs\"]\n count = total_iters\n\n beta = float(lr_config[\"from\"])\n alpha = float(lr_config[\"to\"] - beta) / count\n\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda it: it * alpha + beta)\n elif policy == \"step\":\n if len(lr_config[\"steps\"]) != 1:\n raise ValueError(\"step policy only support 1 step. got {}\".format(len(lr_config[\"steps\"])))\n step_size = lr_config[\"steps\"][0] * total_iters # by batch/iter\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size, lr_config[\"step_gamma\"])\n elif policy == \"poly\":\n # if update_mode == \"batch\":\n # count = epoch_length * lr_config[\"epochs\"]\n # else:\n # count = lr_config[\"epochs\"]\n count = total_iters\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda it: (1 - float(it) / count) ** lr_config[\"poly_power\"],\n )\n elif policy == \"multistep\":\n milestones = [_step * total_iters for _step in lr_config[\"steps\"]]\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, lr_config[\"step_gamma\"])\n else:\n raise ValueError(\n \"Unrecognized scheduler type {}, \"\n \"valid options: 'flat_and_anneal', 'linear', 'step', 'poly', 'multistep'\".format(policy)\n )\n return scheduler\n\n\nclass WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):\n def __init__(\n self,\n optimizer,\n milestones,\n gamma=0.1,\n warmup_factor=1.0 / 3,\n warmup_iters=5,\n warmup_method=\"linear\",\n last_epoch=-1,\n ):\n if not milestones == sorted(milestones):\n raise ValueError(\n \"Milestones should be a list of\" \" increasing integers. Got {}\",\n milestones,\n )\n\n if warmup_method not in (\"constant\", \"linear\"):\n raise ValueError(\"Only 'constant' or 'linear' warmup_method accepted\" \"got {}\".format(warmup_method))\n self.milestones = milestones\n self.gamma = gamma\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n self.warmup_method = warmup_method\n super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n # to add warmup for other schedulers, we only need to implement warmup_factor\n # another way is to combine the warmup_lr_scheduler function and native scheduler,\n # so that we don't need to reimplement many schedulers\n warmup_factor = 1\n if self.last_epoch < self.warmup_iters:\n if self.warmup_method == \"constant\":\n warmup_factor = self.warmup_factor\n elif self.warmup_method == \"linear\":\n alpha = float(self.last_epoch) / self.warmup_iters\n warmup_factor = self.warmup_factor * (1 - alpha) + alpha\n return [\n base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)\n for base_lr in self.base_lrs\n ]\n\n\ndef flat_and_anneal_lr_scheduler(\n optimizer,\n total_iters,\n warmup_iters=0,\n warmup_factor=0.1,\n warmup_method=\"linear\",\n warmup_pow=2,\n anneal_point=0.72,\n anneal_method=\"cosine\",\n target_lr_factor=0,\n poly_power=1.0,\n step_gamma=0.1,\n steps=[2 / 3.0, 8 / 9.0],\n cyclic=False,\n return_function=False,\n):\n \"\"\"Ref: https://github.com/fastai/fastai/blob/master/fastai/callbacks/flat_cos_anneal.py.\n\n warmup_initial_lr = warmup_factor * base_lr\n target_lr = base_lr * target_lr_factor\n total_iters: cycle length; set to max_iter to get a one cycle schedule.\n \"\"\"\n if warmup_method not in (\"constant\", \"linear\", \"pow\", \"exp\"):\n raise ValueError(\n \"Only 'constant', 'linear', 'pow' or 'exp' warmup_method accepted,\" \"got {}\".format(warmup_method)\n )\n\n if anneal_method not in (\n \"cosine\",\n \"linear\",\n \"poly\",\n \"exp\",\n \"step\",\n \"none\",\n ):\n raise ValueError(\n \"Only 'cosine', 'linear', 'poly', 'exp', 'step' or 'none' anneal_method accepted,\"\n \"got {}\".format(anneal_method)\n )\n\n if anneal_method == \"step\":\n if any([_step < warmup_iters / total_iters or _step > 1 for _step in steps]):\n raise ValueError(\n \"error in steps: {}. warmup_iters: {} total_iters: {}.\"\n \"steps should be in ({},1)\".format(\n steps,\n warmup_iters,\n total_iters,\n warmup_iters / total_iters,\n )\n )\n if list(steps) != sorted(steps):\n raise ValueError(\"steps {} is not in ascending order.\".format(steps))\n warnings.warn(\"ignore anneal_point when using step anneal_method\")\n anneal_start = steps[0] * total_iters\n else:\n if anneal_point > 1 or anneal_point < 0:\n raise ValueError(\"anneal_point should be in [0,1], got {}\".format(anneal_point))\n anneal_start = anneal_point * total_iters\n\n def f(x): # x is the iter in lr scheduler, return the lr_factor\n # the final lr is warmup_factor * base_lr\n x = x % total_iters if cyclic else x # cyclic\n if x < warmup_iters:\n if warmup_method == \"linear\":\n alpha = float(x) / warmup_iters\n return (1 - warmup_factor) * alpha + warmup_factor\n elif warmup_method == \"pow\":\n alpha = float(x) / warmup_iters\n return (1 - warmup_factor) * pow(alpha, warmup_pow) + warmup_factor\n elif warmup_method == \"exp\":\n assert warmup_factor > 0, warmup_factor\n alpha = float(x) / warmup_iters\n return warmup_factor ** (1 - alpha)\n elif warmup_method == \"constant\":\n return warmup_factor\n\n if x < anneal_start:\n return 1\n elif x >= anneal_start and x < total_iters:\n if anneal_method == \"step\":\n # ignore anneal_point and target_lr_factor\n milestones = [_step * total_iters for _step in steps]\n lr_factor = step_gamma ** bisect_right(milestones, float(x))\n elif anneal_method == \"cosine\":\n # slow --> fast --> slow\n lr_factor = target_lr_factor + 0.5 * (1 - target_lr_factor) * (\n 1 + cos(pi * ((float(x) - anneal_start) / (total_iters - anneal_start)))\n )\n elif anneal_method == \"linear\":\n # (y-m) / (B-x) = (1-m) / (B-A)\n lr_factor = target_lr_factor + (1 - target_lr_factor) * (total_iters - float(x)) / (\n total_iters - anneal_start\n )\n elif anneal_method == \"poly\":\n # slow --> fast if poly_power < 1\n # fast --> slow if poly_power > 1\n # when poly_power == 1.0, it is the same with linear\n lr_factor = (\n target_lr_factor\n + (1 - target_lr_factor) * ((total_iters - float(x)) / (total_iters - anneal_start)) ** poly_power\n )\n elif anneal_method == \"exp\":\n # fast --> slow\n # do not decay too much, especially if lr_end == 0, lr will be\n # 0 at anneal iter, so we should avoid that\n _target_lr_factor = max(target_lr_factor, 5e-3)\n lr_factor = _target_lr_factor ** ((float(x) - anneal_start) / (total_iters - anneal_start))\n else:\n lr_factor = 1\n return lr_factor\n elif x >= total_iters:\n return target_lr_factor\n\n if return_function:\n return torch.optim.lr_scheduler.LambdaLR(optimizer, f), f\n else:\n return torch.optim.lr_scheduler.LambdaLR(optimizer, f)\n\n\ndef update_learning_rate(optimizer, cur_lr, new_lr):\n # old way of update learning rate\n \"\"\"Update learning rate.\"\"\"\n if cur_lr == new_lr:\n return\n ratio = max((new_lr / max((cur_lr, 1e-10)), cur_lr / max((new_lr, 1e-10))))\n if ratio > 1.1:\n print(\"Changing learning rate {} -> {}\".format(cur_lr, new_lr))\n # Update learning rate, note that different parameter may have different learning rate\n param_keys = []\n for ind, param_group in enumerate(optimizer.param_groups):\n param_group[\"lr\"] = new_lr if ind == 0 else new_lr * 2 # bias params\n param_keys += param_group[\"params\"]\n\n\ndef test_flat_and_anneal():\n import numpy as np\n\n model = resnet18()\n base_lr = 1e-4\n optimizer_cfg = dict(type=\"Adam\", lr=base_lr, weight_decay=0)\n optimizer = obj_from_dict(optimizer_cfg, torch.optim, dict(params=model.parameters()))\n\n # learning policy\n total_epochs = 300\n epoch_len = 500\n tail_flat_iters = 15 * epoch_len\n total_iters = epoch_len * total_epochs - tail_flat_iters\n\n # scheduler = build_scheduler(lr_config, optimizer, epoch_length)\n scheduler_cfg = L(flat_and_anneal_lr_scheduler)(\n total_iters=total_iters,\n warmup_method=\"pow\",\n warmup_pow=2,\n warmup_factor=0.0,\n warmup_iters=epoch_len * 5,\n #\n anneal_method=\"cosine\",\n anneal_point=5 / (total_epochs - 15),\n # anneal_point=0.72,\n step_gamma=0.1,\n poly_power=5,\n steps=[0.5, 0.75, 0.9],\n #\n target_lr_factor=0.05,\n )\n scheduler_cfg.optimizer = optimizer\n scheduler = instantiate(scheduler_cfg)\n\n print(\"start lr: {}\".format(scheduler.get_lr()))\n steps = []\n lrs = []\n\n epoch_lrs = []\n global_step = 0\n\n start_epoch = 0\n for epoch in range(start_epoch):\n for batch in range(epoch_len):\n scheduler.step() # when no state_dict availble\n global_step += 1\n\n for epoch in range(start_epoch, total_epochs):\n # if global_step >= lr_config['warmup_iters']:\n # scheduler.step(epoch)\n # print(type(scheduler.get_lr()[0]))\n # import pdb;pdb.set_trace()\n epoch_lrs.append([epoch, scheduler.get_lr()[0]]) # only get the first lr (maybe a group of lrs)\n for batch in range(epoch_len):\n # if global_step < lr_config['warmup_iters']:\n # scheduler.step(global_step)\n cur_lr = scheduler.get_lr()[0]\n if global_step == 0 or (len(lrs) >= 1 and cur_lr != lrs[-1]):\n print(\"epoch {}, batch: {}, global_step:{} lr: {}\".format(epoch, batch, global_step, cur_lr))\n steps.append(global_step)\n lrs.append(cur_lr)\n global_step += 1\n scheduler.step() # usually after optimizer.step()\n # print(epoch_lrs)\n # import pdb;pdb.set_trace()\n # epoch_lrs.append([total_epochs, scheduler.get_lr()[0]])\n\n epoch_lrs = np.asarray(epoch_lrs, dtype=np.float32)\n for i in range(len(epoch_lrs)):\n print(\"{:02d} {}\".format(int(epoch_lrs[i][0]), epoch_lrs[i][1]))\n\n plt.figure(dpi=100)\n # plt.suptitle(\"{}\".format(dict(lr_cfg)), size=4)\n plt.subplot(1, 2, 1)\n plt.plot(steps, lrs, \"-.\")\n # plt.show()\n plt.subplot(1, 2, 2)\n # print(epoch_lrs.dtype)\n plt.plot(epoch_lrs[:, 0], epoch_lrs[:, 1], \"-.\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n from mmcv.runner import obj_from_dict\n import sys\n import os.path as osp\n import torch\n from torchvision.models import resnet18\n import matplotlib.pyplot as plt\n from omegaconf import OmegaConf\n from detectron2.config import LazyCall as L\n from detectron2.config import LazyConfig, instantiate\n\n cur_dir = osp.dirname(osp.abspath(__file__))\n sys.path.insert(0, osp.join(cur_dir, \"../../..\"))\n\n test_flat_and_anneal()\n exit(0)\n\n total_epochs = 24\n model = resnet18()\n optimizer_cfg = dict(type=\"Adam\", lr=6.25e-5, weight_decay=0)\n # learning policy\n lr_config = dict(\n policy=\"warmup_multistep\",\n gamma=0.1,\n warmup=\"linear\",\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[16, 22],\n epochs=total_epochs,\n )\n optimizer = obj_from_dict(optimizer_cfg, torch.optim, dict(params=model.parameters()))\n epoch_length = 1000\n scheduler = build_scheduler(lr_config, optimizer, epoch_length)\n print(\"start lr: {}\".format(scheduler.get_lr()))\n steps = []\n lrs = []\n epoch_lrs = []\n global_step = 0\n for epoch in range(total_epochs):\n # if global_step >= lr_config['warmup_iters']:\n # scheduler.step(epoch)\n epoch_lrs.append(scheduler.get_lr())\n for batch in range(epoch_length):\n # if global_step < lr_config['warmup_iters']:\n # scheduler.step(global_step)\n cur_lr = scheduler.get_lr()\n if global_step == 0 or cur_lr != lrs[-1]:\n print(\"epoch {}, batch: {}, global_step:{} lr: {}\".format(epoch, batch, global_step, cur_lr))\n steps.append(global_step)\n lrs.append(cur_lr)\n global_step += 1\n scheduler.step() # usually after optimizer.step()\n for i, lr in enumerate(epoch_lrs):\n print(\"{:02d} {}\".format(i, lr))\n plt.subplot(1, 2, 1)\n plt.plot(steps, lrs)\n # plt.show()\n plt.subplot(1, 2, 2)\n plt.plot(list(range(len(epoch_lrs))), epoch_lrs)\n plt.show()\n",
"# -*- coding: utf-8 -*-\nimport copy\nimport logging\nimport pickle\n\nimport mmcv\nimport random\nimport numpy as np\nimport torch\nfrom core.base_data_loader import Base_DatasetFromList\nfrom core.utils.data_utils import read_image_mmcv\nfrom core.utils.dataset_utils import (\n filter_invalid_in_dataset_dicts,\n flat_dataset_dicts,\n my_build_batch_data_loader,\n trivial_batch_collator,\n)\nfrom core.utils.my_distributed_sampler import (\n InferenceSampler,\n RepeatFactorTrainingSampler,\n TrainingSampler,\n)\nfrom detectron2.data import get_detection_dataset_dicts\nfrom detectron2.structures import BoxMode\n\nfrom .datasets_misc import letterbox, random_affine, augment_hsv\nfrom det.yolov4.yolo_utils.utils import xyxy2cxcywh\n\nlogger = logging.getLogger(__name__)\n\n\nclass YoloV4_DatasetFromList(Base_DatasetFromList):\n \"\"\"NOTE: we can also use the default DatasetFromList and\n implement a similar custom DataMapper,\n but it is harder to implement some features relying on other dataset dicts\n # https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/common.py\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self, cfg, split, lst: list, *, stride, img_size, copy: bool = True, serialize: bool = True, flatten=False\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool): whether to hold memory using serialized objects, when\n enabled, data loader workers can use shared RAM from master\n process instead of making a copy.\n \"\"\"\n self.cfg = cfg\n self.split = split # train | val | test\n # fmt: off\n self.stride = stride\n self.img_size = img_size\n # load 4 images at a time into a mosaic (only during training)\n self.mosaic = cfg.INPUT.AUG_MOSAIC if split == 'train' else False\n self.mosaic_border = [-img_size // 2, -img_size // 2]\n self.img_format = cfg.INPUT.FORMAT # default BGR\n self.with_depth = cfg.INPUT.WITH_DEPTH\n self.aug_depth = cfg.INPUT.AUG_DEPTH\n # NOTE: color augmentation config\n self.aug_hsv_prob = cfg.INPUT.AUG_HSV_PROB\n self.color_aug_prob = cfg.INPUT.COLOR_AUG_PROB\n self.color_aug_type = cfg.INPUT.COLOR_AUG_TYPE\n self.color_aug_code = cfg.INPUT.COLOR_AUG_CODE\n self.rand_hflip = cfg.INPUT.RAND_HFLIP\n self.rand_vflip = cfg.INPUT.RAND_VFLIP\n # fmt: on\n if split == \"train\" and self.color_aug_prob > 0:\n self.color_augmentor = self._get_color_augmentor(aug_type=self.color_aug_type, aug_code=self.color_aug_code)\n else:\n self.color_augmentor = None\n # ----------------------------------------------------\n self.flatten = flatten\n self._lst = flat_dataset_dicts(lst) if flatten else lst\n # ----------------------------------------------------\n self._copy = copy\n self._serialize = serialize\n\n def _serialize(data):\n buffer = pickle.dumps(data, protocol=-1)\n return np.frombuffer(buffer, dtype=np.uint8)\n\n if self._serialize:\n logger.info(\"Serializing {} elements to byte tensors and concatenating them all ...\".format(len(self._lst)))\n self._lst = [_serialize(x) for x in self._lst]\n self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)\n self._addr = np.cumsum(self._addr)\n self._lst = np.concatenate(self._lst)\n logger.info(\"Serialized dataset takes {:.2f} MiB\".format(len(self._lst) / 1024 ** 2))\n\n def __len__(self):\n if self._serialize:\n return len(self._addr)\n else:\n return len(self._lst)\n\n def read_data(self, idx):\n \"\"\"load image and annos random shift & scale bbox; crop, rescale.\"\"\"\n cfg = self.cfg\n net_cfg = cfg.MODEL.YOLO\n loss_cfg = net_cfg.LOSS_CFG\n\n dataset_dict = copy.deepcopy(self._get_sample_dict(idx)) # it will be modified by code below\n\n if self.split == \"train\":\n if self.mosaic:\n image, labels = self.load_mosaic(idx)\n shapes = None\n else:\n (\n image,\n (im_H_ori, im_W_ori),\n (im_H, im_W),\n ) = self.load_resize_image(dataset_dict)\n # Letterbox\n shape = self.img_size # final letterboxed shape\n image, ratio, pad = letterbox(image, shape, auto=False, scaleup=True)\n shapes = (im_H_ori, im_W_ori), (\n (im_W / im_W_ori, im_W / im_W_ori),\n pad,\n ) # for COCO mAP rescaling\n\n # Load labels\n annotations = dataset_dict[\"annotations\"]\n bboxes = [BoxMode.convert(anno[\"bbox\"], anno[\"bbox_mode\"], BoxMode.XYXY_ABS) for anno in annotations]\n bboxes = np.array(bboxes, dtype=\"float32\")\n classes = np.array([anno[\"category_id\"] for anno in annotations])\n labels = np.hstack([classes.reshape(-1, 1), bboxes])\n if labels.size > 0:\n labels[:, 1] = ratio[0] * im_W / im_W_ori * labels[:, 1] + pad[0] # pad width\n labels[:, 2] = ratio[1] * im_H / im_H_ori * labels[:, 2] + pad[1] # pad height\n labels[:, 3] = ratio[0] * im_W / im_W_ori * labels[:, 3] + pad[0]\n labels[:, 4] = ratio[1] * im_H / im_H_ori * labels[:, 4] + pad[1]\n\n image, labels = random_affine(\n image,\n labels,\n degrees=cfg.INPUT.RAND_ROTATE_DEG,\n translate=cfg.INPUT.RAND_TRANSLATE,\n scale=cfg.INPUT.RAND_SCALE,\n shear=cfg.INPUT.RAND_SHEAR,\n )\n else: # load test image\n image, (im_H_ori, im_W_ori), (im_H, im_W) = self.load_resize_image(dataset_dict)\n # Letterbox\n shape = self.img_size # final letterboxed shape\n image, ratio, pad = letterbox(image, shape, auto=False, scaleup=False)\n shapes = (im_H_ori, im_W_ori), (\n (im_W / im_W_ori, im_W / im_W_ori),\n pad,\n ) # for COCO mAP rescaling\n\n if self.split == \"train\":\n # Augment colorspace\n if np.random.rand() < self.aug_hsv_prob:\n augment_hsv(\n image,\n hgain=cfg.INPUT.HSV_H,\n sgain=cfg.INPUT.HSV_S,\n vgain=cfg.INPUT.HSV_V,\n source_format=self.img_format,\n )\n\n # NOTE: maybe add or change color augment here ===================================\n if self.split == \"train\" and self.color_aug_prob > 0 and self.color_augmentor is not None:\n if np.random.rand() < self.color_aug_prob:\n image = self._color_aug(image, self.color_aug_type)\n\n if self.split == \"train\":\n # convert xyxy to cxcywh (rel) --------------------------\n n_label = len(labels)\n if n_label > 0:\n labels[:, 1:5] = xyxy2cxcywh(labels[:, 1:5])\n # Normalize coordinates 0 - 1\n labels[:, [2, 4]] /= image.shape[0] # height\n labels[:, [1, 3]] /= image.shape[1] # width\n\n # random left-right flip\n if self.rand_hflip and random.random() < 0.5:\n image = np.fliplr(image)\n if n_label > 0:\n labels[:, 1] = 1 - labels[:, 1] # flip cx\n\n # random up-down flip\n if self.rand_vflip and random.random() < 0.5:\n image = np.flipud(image)\n if n_label > 0:\n labels[:, 2] = 1 - labels[:, 2] # flip cy\n\n dataset_dict.pop(\"annotations\", None) # no need to keep original annos\n\n # result image: NOTE: yolo was trained in RGB for coco (CHW, 0-1)\n if self.img_format == \"BGR\":\n image_normed = self.normalize_image(cfg, image[:, :, ::-1].transpose(2, 0, 1))\n elif self.img_format == \"RGB\":\n image_normed = self.normalize_image(cfg, image.transpose(2, 0, 1))\n else:\n raise ValueError(\n \"Yolo was trained in RGB. In dataloader, RGB or BGR are OK, but got: {}\".format(self.img_format)\n )\n dataset_dict[\"image\"] = torch.as_tensor(image_normed.astype(\"float32\")).contiguous()\n dataset_dict[\"shapes\"] = shapes\n\n #################################################################################\n if self.split != \"train\":\n # don't load annotations at test time\n return dataset_dict\n #######################################################################################\n labels_out = torch.zeros(n_label, 6) # image_idx_in_batch, cls_label, cxcywh\n if n_label > 0:\n labels_out[:, 1:] = torch.from_numpy(labels)\n dataset_dict[\"labels\"] = labels_out\n return dataset_dict\n\n def load_resize_image(self, dataset_dict):\n # loads 1 image from dataset, returns img, original hw, resized hw\n img = read_image_mmcv(dataset_dict[\"file_name\"], format=self.img_format) # BGR\n assert img is not None, \"Image not found: {}\".format(dataset_dict[\"file_name\"])\n h0, w0 = img.shape[:2] # orig hw\n r = self.img_size / max(h0, w0) # resize image to img_size\n if r != 1: # always resize down, only resize up if training with augmentation\n interp = \"area\" if r < 1 and self.split != \"train\" else \"bilinear\"\n img = mmcv.imresize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)\n return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized\n\n def load_mosaic(self, idx):\n # loads images in a mosaic\n labels4 = []\n im_size = self.img_size\n yc, xc = [int(random.uniform(-x, 2 * im_size + x)) for x in self.mosaic_border] # mosaic center x, y\n indices = [idx] + [random.randint(0, self.__len__() - 1) for _ in range(3)] # 3 additional image indices\n\n for i, index in enumerate(indices):\n # Load image\n dataset_dict = self._get_sample_dict(idx)\n img, (h0, w0), (h, w) = self.load_resize_image(dataset_dict)\n\n # place img in img4\n if i == 0: # top left\n img4 = np.full(\n (im_size * 2, im_size * 2, img.shape[2]),\n 114,\n dtype=np.uint8,\n ) # base image with 4 tiles\n x1a, y1a, x2a, y2a = (\n max(xc - w, 0),\n max(yc - h, 0),\n xc,\n yc,\n ) # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = (\n w - (x2a - x1a),\n h - (y2a - y1a),\n w,\n h,\n ) # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = (\n xc,\n max(yc - h, 0),\n min(xc + w, im_size * 2),\n yc,\n )\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = (\n max(xc - w, 0),\n yc,\n xc,\n min(im_size * 2, yc + h),\n )\n x1b, y1b, x2b, y2b = (\n w - (x2a - x1a),\n 0,\n max(xc, w),\n min(y2a - y1a, h),\n )\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = (\n xc,\n yc,\n min(xc + w, im_size * 2),\n min(im_size * 2, yc + h),\n )\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n # Labels\n annotations = dataset_dict[\"annotations\"]\n bboxes = [BoxMode.convert(anno[\"bbox\"], anno[\"bbox_mode\"], BoxMode.XYXY_ABS) for anno in annotations]\n bboxes = np.array(bboxes, dtype=\"float32\")\n classes = np.array([anno[\"category_id\"] for anno in annotations])\n labels = np.hstack([classes.reshape(-1, 1), bboxes])\n if labels.size > 0: # old xyxy to new xyxy\n labels[:, 1] = w / w0 * labels[:, 1] + padw\n labels[:, 2] = h / h0 * labels[:, 2] + padh\n labels[:, 3] = w / w0 * labels[:, 3] + padw\n labels[:, 4] = h / h0 * labels[:, 4] + padh\n labels4.append(labels)\n\n # Concat/clip labels\n if len(labels4):\n labels4 = np.concatenate(labels4, 0)\n # np.clip(labels4[:, 1:] - im_size / 2, 0, im_size, out=labels4[:, 1:]) # use with center crop\n np.clip(labels4[:, 1:], 0, 2 * im_size, out=labels4[:, 1:]) # use with random_affine\n\n # Replicate\n # img4, labels4 = replicate(img4, labels4)\n\n # Augment\n # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)\n cfg = self.cfg\n img4, labels4 = random_affine(\n img4,\n labels4,\n degrees=cfg.INPUT.RAND_ROTATE_DEG,\n translate=cfg.INPUT.RAND_TRANSLATE,\n scale=cfg.INPUT.RAND_SCALE,\n shear=cfg.INPUT.RAND_SHEAR,\n border=self.mosaic_border,\n ) # border to remove\n\n return img4, labels4\n\n def __getitem__(self, idx):\n if self.split != \"train\":\n return self.read_data(idx)\n\n while True: # return valid data for train\n processed_data = self.read_data(idx)\n if processed_data is None:\n idx = self._rand_another(idx)\n continue\n return processed_data\n\n\ndef build_yolo_train_loader(cfg, dataset_names, *, stride, img_size):\n \"\"\"A data loader is created by the following steps:\n\n 1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.\n 2. Coordinate a random shuffle order shared among all processes (all GPUs)\n 3. Each process spawn another few workers to process the dicts. Each worker will:\n * Map each metadata dict into another format to be consumed by the model.\n * Batch them by simply putting dicts into a list.\n\n The batched ``list[mapped_dict]`` is what this dataloader will yield.\n\n Args:\n cfg (CfgNode): the config\n\n Returns:\n an infinite iterator of training data\n \"\"\"\n dataset_dicts = get_detection_dataset_dicts(\n dataset_names,\n filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,\n min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0,\n proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,\n )\n\n dataset_dicts = filter_invalid_in_dataset_dicts(dataset_dicts, visib_thr=cfg.DATALOADER.FILTER_VISIB_THR)\n\n dataset = YoloV4_DatasetFromList(\n cfg,\n split=\"train\",\n lst=dataset_dicts,\n stride=stride,\n img_size=img_size,\n copy=False,\n )\n\n sampler_name = cfg.DATALOADER.SAMPLER_TRAIN\n logger = logging.getLogger(__name__)\n logger.info(\"Using training sampler {}\".format(sampler_name))\n # TODO avoid if-else?\n if sampler_name == \"TrainingSampler\":\n sampler = TrainingSampler(len(dataset))\n elif sampler_name == \"RepeatFactorTrainingSampler\":\n repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(\n dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD\n )\n sampler = RepeatFactorTrainingSampler(repeat_factors)\n else:\n raise ValueError(\"Unknown training sampler: {}\".format(sampler_name))\n return my_build_batch_data_loader(\n dataset,\n sampler,\n cfg.SOLVER.IMS_PER_BATCH,\n aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,\n num_workers=cfg.DATALOADER.NUM_WORKERS,\n )\n\n\ndef build_yolo_test_loader(cfg, dataset_name, *, stride, img_size):\n \"\"\"Similar to `build_detection_train_loader`. But this function uses the\n given `dataset_name` argument (instead of the names in cfg), and uses batch\n size 1.\n\n Args:\n cfg: a detectron2 CfgNode\n dataset_name (str): a name of the dataset that's available in the DatasetCatalog\n\n Returns:\n DataLoader: a torch DataLoader, that loads the given detection\n dataset, with test-time transformation and batching.\n \"\"\"\n dataset_dicts = get_detection_dataset_dicts(\n [dataset_name],\n filter_empty=False,\n proposal_files=[cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]]\n if cfg.MODEL.LOAD_PROPOSALS\n else None,\n )\n\n dataset = YoloV4_DatasetFromList(\n cfg,\n split=\"test\",\n lst=dataset_dicts,\n stride=stride,\n img_size=img_size,\n flatten=False,\n )\n\n sampler = InferenceSampler(len(dataset))\n # Always use 1 image per worker during inference since this is the\n # standard when reporting inference time in papers.\n batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)\n\n num_workers = cfg.DATALOADER.NUM_WORKERS\n # Horovod: limit # of CPU threads to be used per worker.\n # if num_workers > 0:\n # torch.set_num_threads(num_workers)\n\n kwargs = {\"num_workers\": num_workers}\n # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent\n # issues with Infiniband implementations that are not fork-safe\n # https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_imagenet_resnet50.py\n # if (num_workers > 0 and hasattr(mp, '_supports_context') and\n # mp._supports_context and 'forkserver' in mp.get_all_start_methods()):\n # kwargs['multiprocessing_context'] = 'forkserver'\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator, **kwargs\n )\n return data_loader\n",
"import numpy as np\nimport mmcv\nfrom scipy.io import loadmat\nimport os.path as osp\nimport sys\nfrom tqdm import tqdm\nimport setproctitle\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../../../\"))\nsys.path.insert(0, PROJ_ROOT)\nfrom lib.pysixd import inout, misc\nfrom lib.pysixd.RT_transform import se3_q2m\n\nsetproctitle.setproctitle(osp.basename(__file__).split(\".\")[0])\n\ndata_root = osp.join(PROJ_ROOT, \"datasets/BOP_DATASETS/lmo\")\ntest_root = osp.join(data_root, \"test\")\nimage_set_dir = osp.join(data_root, \"image_set\")\n\n# original posecnn results\nposecnn_results_dir = osp.join(test_root, \"PoseCNN_Occlusion_results\")\n\n# our format\ninit_pose_dir = osp.join(test_root, \"init_poses\")\nmmcv.mkdir_or_exist(init_pose_dir)\ninit_pose_path = osp.join(init_pose_dir, \"init_pose_posecnn_lmo.json\")\n\nidx2class = {\n 1: \"ape\",\n # 2: \"benchvise\",\n # 3: 'bowl',\n # 4: \"camera\",\n 5: \"can\",\n 6: \"cat\",\n # 7: 'cup',\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n # 13: \"iron\",\n # 14: \"lamp\",\n # 15: \"phone\",\n}\nclasses = idx2class.values()\nclasses = sorted(classes)\n\nK = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])\n\n\nif __name__ == \"__main__\":\n results = {}\n\n idx_file = osp.join(image_set_dir, \"lmo_test.txt\")\n with open(idx_file, \"r\") as f:\n indices = [line.strip(\"\\r\\n\") for line in f]\n\n models = {}\n for obj_id in idx2class:\n models[obj_id] = inout.load_ply(\n osp.join(data_root, f\"models/obj_{obj_id:06d}.ply\"),\n vertex_scale=0.001,\n )\n\n num_not_exist = 0\n num_not_found = 0\n num_total = 0\n\n scene_id = 2\n for im_idx in tqdm(indices):\n int_im_idx = int(im_idx)\n num_total += 1\n posecnn_result_path = osp.join(posecnn_results_dir, f\"{int_im_idx:04d}.mat\")\n\n if not osp.exists(posecnn_result_path):\n print(f\"not result file: {posecnn_result_path}\")\n num_not_exist += 1\n continue\n\n posecnn_res = loadmat(posecnn_result_path)\n pred_obj_ids = posecnn_res[\"rois\"][:, 1]\n if len(pred_obj_ids) < 1:\n print(f\"not detected: {im_idx}\")\n num_not_found += 1\n continue\n\n scene_im_id = f\"{scene_id}/{int_im_idx}\"\n results[scene_im_id] = []\n for pred_i, pred_obj_id in enumerate(pred_obj_ids):\n pred_obj_id = int(pred_obj_id)\n pose_q = posecnn_res[\"poses\"][pred_i]\n pose_m = se3_q2m(pose_q)\n\n model = models[pred_obj_id]\n bbox_from_pose = misc.compute_2d_bbox_xywh_from_pose(\n model[\"pts\"], pose_m, K, width=640, height=480, clip=True\n )\n\n cur_res = {\n \"obj_id\": pred_obj_id,\n \"pose_est\": pose_m.tolist(),\n \"bbox_est\": bbox_from_pose.tolist(),\n \"score\": 1.0,\n }\n results[scene_im_id].append(cur_res)\n\n print(init_pose_path)\n print(f\"num not exist: {num_not_exist}/{num_total}={num_not_exist/num_total*100:.2f}%\")\n print(f\"num not found: {num_not_found}/{num_total}={num_not_found/num_total*100:.2f}%\")\n \"\"\"\n num not exist: 0\n num not found: 0\n \"\"\"\n inout.save_json(init_pose_path, results, sort=False)\n",
"import numpy as np\nimport mmcv\nfrom scipy.io import loadmat\nimport os.path as osp\nimport sys\nfrom tqdm import tqdm\nimport setproctitle\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../../../\"))\nsys.path.insert(0, PROJ_ROOT)\nfrom lib.pysixd import inout, misc\nfrom lib.pysixd.RT_transform import se3_q2m\n\nsetproctitle.setproctitle(osp.basename(__file__).split(\".\")[0])\n\ndata_root = osp.join(PROJ_ROOT, \"datasets/BOP_DATASETS/lm\")\ntest_root = osp.join(data_root, \"test\")\nimage_set_dir = osp.join(data_root, \"image_set\")\n\n# original posecnn results\nposecnn_results_dir = osp.join(test_root, \"PoseCNN_LINEMOD_6D_results\")\n\n# our format\ninit_pose_dir = osp.join(test_root, \"init_poses\")\nmmcv.mkdir_or_exist(init_pose_dir)\ninit_pose_path = osp.join(init_pose_dir, \"init_pose_posecnn_lm.json\")\n\nidx2class = {\n 1: \"ape\",\n 2: \"benchvise\",\n # 3: 'bowl',\n 4: \"camera\",\n 5: \"can\",\n 6: \"cat\",\n # 7: 'cup',\n 8: \"driller\",\n 9: \"duck\",\n 10: \"eggbox\",\n 11: \"glue\",\n 12: \"holepuncher\",\n 13: \"iron\",\n 14: \"lamp\",\n 15: \"phone\",\n}\nclasses = idx2class.values()\nclasses = sorted(classes)\n\nK = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])\n\n\nif __name__ == \"__main__\":\n results = {}\n num_not_exist = 0\n num_not_found = 0\n num_total = 0\n for obj_id, obj_name in tqdm(idx2class.items()):\n scene_id = obj_id # for lm, scene_id is obj_id\n model = inout.load_ply(\n osp.join(data_root, f\"models/obj_{obj_id:06d}.ply\"),\n vertex_scale=0.001,\n )\n\n idx_file = osp.join(image_set_dir, f\"{obj_name}_test.txt\")\n with open(idx_file, \"r\") as f:\n indices = [line.strip(\"\\r\\n\") for line in f]\n for i_res, im_idx in enumerate(tqdm(indices)):\n num_total += 1\n posecnn_obj_name = obj_name if obj_name != \"benchvise\" else \"benchviseblue\"\n posecnn_result_path = osp.join(posecnn_results_dir, posecnn_obj_name, f\"{i_res:04d}.mat\")\n if not osp.exists(posecnn_result_path):\n print(f\"not result file: {posecnn_result_path}\")\n num_not_exist += 1\n continue\n\n posecnn_res = loadmat(posecnn_result_path)\n found = posecnn_res[\"rois\"][:, 1]\n if found == -1: # not detected\n print(f\"not detected: {obj_name} {im_idx}\")\n num_not_found += 1\n continue\n proposal_idx = np.where(found == 1)\n assert len(proposal_idx) == 1\n pose_q = posecnn_res[\"poses\"][proposal_idx].reshape(7)\n pose_m = se3_q2m(pose_q)\n\n bbox_from_pose = misc.compute_2d_bbox_xywh_from_pose(\n model[\"pts\"], pose_m, K, width=640, height=480, clip=True\n )\n\n int_im_idx = int(im_idx)\n scene_im_id = f\"{scene_id}/{int_im_idx}\"\n results[scene_im_id] = []\n cur_res = {\n \"obj_id\": obj_id,\n \"pose_est\": pose_m.tolist(),\n \"bbox_est\": bbox_from_pose.tolist(),\n \"score\": 1.0,\n }\n results[scene_im_id].append(cur_res)\n\n print(init_pose_path)\n print(f\"num not exist: {num_not_exist}/{num_total}={num_not_exist/num_total*100:.2f}%\")\n print(f\"num not found: {num_not_found}/{num_total}={num_not_found/num_total*100:.2f}%\")\n \"\"\"\n num not exist: 0\n num not found: 0\n \"\"\"\n inout.save_json(init_pose_path, results, sort=False)\n"
] | [
[
"torch.sum",
"torch.rand",
"torch.autograd.Variable"
],
[
"numpy.abs",
"numpy.linalg.inv",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.abs",
"matplotlib.pyplot.title",
"numpy.median",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.array"
],
[
"torch.sin",
"torch.cat",
"torch.addcdiv",
"torch.allclose",
"numpy.hstack",
"torch.ones",
"numpy.matmul",
"torch.from_numpy",
"torch.tensor",
"numpy.copy",
"torch.rand",
"torch.arange",
"torch.cos",
"torch.empty_like",
"torch.zeros_like",
"torch.cuda.FloatTensor",
"torch.exp",
"numpy.random.rand",
"torch.stack",
"numpy.array",
"torch.atan2",
"numpy.random.seed",
"numpy.linalg.norm",
"torch.matmul",
"torch.nn.functional.grid_sample",
"torch.clamp",
"torch.cross"
],
[
"torch.floor",
"torch.gather",
"torch.tensor",
"torch.stack",
"torch.ones_like"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.rad2deg",
"numpy.concatenate",
"numpy.max",
"scipy.stats.gaussian_kde",
"numpy.mean",
"numpy.cross",
"numpy.arcsin",
"numpy.arange",
"numpy.sin",
"numpy.std",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linalg.inv",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.linalg.norm",
"numpy.cos",
"numpy.random.uniform"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.lr_scheduler.LambdaLR",
"numpy.asarray",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"torch.zeros",
"numpy.clip",
"numpy.fliplr",
"numpy.flipud",
"torch.utils.data.DataLoader",
"numpy.cumsum",
"torch.from_numpy",
"numpy.full",
"numpy.concatenate",
"numpy.frombuffer",
"numpy.random.rand",
"numpy.array",
"torch.utils.data.sampler.BatchSampler"
],
[
"numpy.array",
"scipy.io.loadmat"
],
[
"numpy.array",
"scipy.io.loadmat",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
gorff/Toric-Code-Correlated-Error-Decoder | [
"c43cf34c22f03334add078f5d02e6604e5c89cba",
"c43cf34c22f03334add078f5d02e6604e5c89cba",
"c43cf34c22f03334add078f5d02e6604e5c89cba"
] | [
"project/correctiondemos/pythag_test.py",
"project/heatplots/3D_L40N32A15l10000_ROUGH.py",
"project/heatplots/heat_L40N32A15l50000_FINE.py"
] | [
"\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport numpy as np\nimport os,sys,inspect\nimport imageio\n\nsys.path.insert(1, os.path.join(sys.path[0], '..')) #go up a dir to import\nimport CodePy2.funmath as funmath\n#import imageio\nn = 1.0\nsizes = [i/n for i in range(33*int(n))]\n\nxvals = sizes\nfilenames = []\nfor expectedlength in sizes:\n yvals = []\n fig = plt.figure()\n for i in sizes:\n variance = 1\n strength = 1\n yvals.append(funmath.getnormval(i,expectedlength,strength,variance))\n maxval = mlab.normpdf(expectedlength, expectedlength, np.sqrt(variance))\n\n #yvals[-1] = yvals[-1]*strength/maxval\n\n plt.plot(xvals,yvals)\n plt.grid(True)\n plt.ylabel('Adjusted weight (A)')\n plt.xlabel('Manhatten distance (M)')\n plt.axis([0, 30, 0, 30])\n plt.title('Gaussian adjusted matching distances')\n plt.suptitle('variance = '+str(variance)+', w = '+str(expectedlength))\n filename = 'gaussian/'+'gaussian-'+str(int(expectedlength*n))+'.png'\n plt.savefig(filename)\n filenames.append(filename)\n plt.close()\n #plt.show()\n#os.system(\"avconv -y -f image2 -i figs/gaussian-%d.png -r 10 -s 800x600 gaussianvideo.avi\")\n\n#turn into gif\nimages = []\nfor filename in filenames:\n images.append(imageio.imread(filename))\nimageio.mimsave('xbar_demo.gif', images)\n",
"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport pandas as pd\nfrom sys import argv\n\nfilename = 'highMT_l_3400_ac7e166d364445f7bc9d746582ee3392'\ndat = np.loadtxt('data/'+filename+'.txt' , delimiter=',', skiprows=1, unpack=False)\n\nn = 3400 #numsamples\n\n# Load data from CSV\nfig = plt.figure()\nax = fig.gca(projection='3d')\nx = dat[:,1]\ny = dat[:,3]\nz = dat[:,6]\n\n#calc error\np = 0.2 #value to calculate error for\nsigma = 1.96 #confidence interval\nzerror = [1.0*sigma*np.sqrt((1.0/n)*p*(1.0-p)) for p in z]\nxmin = 0\nxmax = 8\nnx = 12 #numer of steps\nymin = 0\nymax = 21\nny = 10\n\ndf = pd.DataFrame({'x': x, 'y': y, 'z': z})\nfig = plt.figure()\nax = Axes3D(fig)\nsurf = ax.plot_trisurf(df.x, df.y, df.z, cmap=cm.jet, linewidth=0.1)\nfig.colorbar(surf, shrink=0.5, aspect=5)\n\n#draw errorbars\nfor i in range(len(x)):\n ax.plot([x[i], x[i]], [y[i], y[i]], [z[i]+zerror[i], z[i]-zerror[i]], 'k',marker=\"_\")\n\nplt.xlabel('expected distance ($\\overline{w}$)')\nplt.ylabel('variacne ($\\sigma^2$)')\n#CS = plt.contour(xi, yi, zi, 15, linewidths=0.5, colors='k')\n# Plot the surface.\n#ax.scatter(x, y, z)#plt.colorbar(CS, orientation='vertical', shrink=0.8)\n\nplt.suptitle('Variance Vs Gaussian location')\nplt.title('L = 40, N = 32, Anyons = 15')\n#plt.grid(True)\n#plt.colorbar()\nplt.savefig('figs/'+'surface_'+filename+'.pdf')\nplt.savefig('figs/'+'surface'+filename+'.png')\nplt.show()\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\nimport matplotlib.mlab as ml\nfilename = 'highMT_l_50000_93a5d5e5843c4d849a430a5a3885eb24'\ndat = np.loadtxt('data/'+filename+'.txt' , delimiter=',', skiprows=1, unpack=False)\n\nqfail = dat[:,6]\nnumsamples = 50000\n\n# Load data from CSV\n\nx = dat[:,1] #expected val\ny = dat[:,3] #variance\nz = qfail\nxmin = min(x)\nxmax = max(x)\nprint([xmin,xmax])\nxss = 0.2 #step size\nnx = (xmax - xmin )/xss#10 #number of steps\nprint(nx)\nymin = min(y)\nymax = max(y)\nprint([ymin,ymax])\nyss = 0.25 #step size\nny = (ymax - ymin )/yss #10 #number of steps\nprint('min failure: '+str(min(z)))\nxi = np.linspace(xmin, xmax, nx)\nyi = np.linspace(ymin, ymax, ny)\nzi = ml.griddata(x, y, z, xi, yi, 'linear')\nplt.xlabel('expected distance ($\\overline{w}$)')\nplt.ylabel('variacne ($\\sigma^2$)')\n#CS = plt.contour(xi, yi, zi, 15, linewidths=0.5, colors='k')\nCS = plt.pcolormesh(xi, yi, zi, cmap = plt.get_cmap('rainbow'))\nplt.colorbar(CS, orientation='vertical', shrink=0.8)\nplt.suptitle('Variance Vs Gaussian location')\nplt.title('L = 40, N = 32, Anyons = 15')\n#plt.grid(True)\n#plt.colorbar()\n\nplt.savefig('figs/'+'variancevslocation_'+filename+'.pdf')\nplt.savefig('figs/'+'variancevslocation_'+filename+'.png')\nplt.show()\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"numpy.sqrt",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.mlab.griddata",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SunYanCN/nlp-experiments-in-pytorch | [
"5d05a53146dffd707e4d037230656f980d7be05c"
] | [
"models/Transformer.py"
] | [
"import copy\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.utils import clones\n\n\nclass LayerNormGoogle(nn.Module):\n def __init__(self, features, epsilon=1e-6):\n super(LayerNormGoogle, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.epsilon = epsilon\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.epsilon) + self.b_2\n\n\nclass EncoderBlockGoogle(nn.Module):\n def __init__(self, layer, num_layers):\n super(EncoderBlockGoogle, self).__init__()\n self.layers = clones(layer, num_layers)\n self.norm = LayerNormGoogle(layer.size)\n\n def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass ResidualConnectionGoogle(nn.Module):\n def __init__(self, size, keep_prob):\n super(ResidualConnectionGoogle, self).__init__()\n self.norm = LayerNormGoogle(size)\n # TODO: Use dropout interface\n self.dropout = nn.Dropout(keep_prob)\n\n def forward(self, input, sublayer):\n return input + self.dropout(sublayer(self.norm(input)))\n\n\nclass EncoderLayerGoogle(nn.Module):\n def __init__(self, size, attention, feed_forward, keep_prob):\n super(EncoderLayerGoogle, self).__init__()\n self.size = size\n self.attention = attention\n self.feed_forward = feed_forward\n # Each encoder layer has two sublayers\n self.sublayer = clones(ResidualConnectionGoogle(size, keep_prob), 2)\n\n def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.attention(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass EncoderClassifier(nn.Module):\n def __init__(self, embedding, encoder, classifier, device, is_average=True):\n super(EncoderClassifier, self).__init__()\n self.embedding = embedding\n self.encoder = encoder\n self.classifier = classifier\n self.device = device\n self.is_average = is_average\n\n def forward(self, x, mask=None):\n kl_loss = torch.Tensor([0.0])\n # Initial x.size() = [length, batch_size]\n x = x.permute(1, 0)\n # After permute x.size = [batch_size, length]\n x = self.embedding(x)\n if \"cuda\" in str(self.device):\n x = x.cuda()\n kl_loss = kl_loss.cuda()\n x = self.encoder(x, mask)\n if self.is_average:\n # Averaged sentence representation\n x = torch.mean(x, dim=1)\n x = self.classifier(x)\n return x, kl_loss\n\n\nclass Classifier(nn.Module):\n def __init__(self, d_model, d_hidden, num_classes, keep_prob):\n super(Classifier, self).__init__()\n self.linear1 = nn.Linear(d_model, d_hidden)\n self.dropout = nn.Dropout(keep_prob)\n self.relu = nn.ReLU()\n self.linear2 = nn.Linear(d_hidden, num_classes)\n\n def forward(self, x):\n x = self.dropout(self.relu(self.linear1(x)))\n x = self.linear2(x)\n return x\n\n\nclass MultiHeadedAttentionGoogle(nn.Module):\n def __init__(self, heads=8, d_model=512, keep_prob=0.1):\n super(MultiHeadedAttentionGoogle, self).__init__()\n assert d_model % heads == 0\n self.d_k = d_model // heads\n self.heads = heads\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(keep_prob)\n\n def attention(self, query, key, value, mask=None):\n # Dot product attention\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n\n p_attn = F.softmax(scores, dim=-1)\n\n if self.dropout is not None:\n p_attn = self.dropout(p_attn)\n\n return torch.matmul(p_attn, value), p_attn\n\n def forward(self, query, key, value, mask=None):\n num_batches = query.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1)\n\n # Apply linear projection on the input sequence and split the heads.\n query, key, value = [linear(x).view(num_batches, -1, self.heads, self.d_k).transpose(1, 2)\n for linear, x in zip(self.linears, (query, key, value))]\n\n # Apply attention on the projected and splitted vectors\n x, self.attn = self.attention(query, key, value, mask=mask)\n\n # Concat vectors and apply linear\n x = x.transpose(1, 2).contiguous().view(num_batches, -1, self.heads * self.d_k)\n\n return self.linears[-1](x)\n\n\nclass PositionalFeedForwardGoogle(nn.Module):\n def __init__(self, d_model, d_ff, keep_prob=0.1):\n super(PositionalFeedForwardGoogle, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(keep_prob)\n self.relu = nn.ReLU()\n\n def forward(self, input):\n return self.w_2(self.dropout(self.relu(self.w_1(input))))\n\n\nclass Embeddings(nn.Module):\n def __init__(self, embed_dim, vocab_size, padding_id, use_pretrained_embed, pretrained_weights,\n optional_sqrt_mul=False):\n super(Embeddings, self).__init__()\n # Initialize embeddings\n self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_id).cpu()\n if use_pretrained_embed:\n self.embedding.from_pretrained(pretrained_weights)\n self.embed_dim = embed_dim\n self.optional_sqrt_mul = optional_sqrt_mul\n\n def forward(self, input):\n if self.optional_sqrt_mul:\n return self.embedding(input) * math.sqrt(self.embed_dim)\n else:\n return self.embedding(input)\n\n\nclass PositionalEncodingGoogle(nn.Module):\n def __init__(self, d_model, keep_prob=0.1, max_len=5000):\n super(PositionalEncodingGoogle, self).__init__()\n self.dropout = nn.Dropout(keep_prob)\n\n positional_encoding = torch.zeros(max_len, d_model)\n pos = torch.arange(0., max_len).unsqueeze(1)\n # Log space\n div_term = torch.exp(torch.arange(0., d_model, 2) * (-math.log(10000) / d_model))\n\n positional_encoding[:, 0::2] = torch.sin(pos * div_term)\n positional_encoding[:, 1::2] = torch.cos(pos * div_term)\n\n positional_encoding = positional_encoding.unsqueeze(0)\n self.register_buffer(\"pe\", positional_encoding)\n\n def forward(self, input):\n return self.dropout(input + Variable(self.pe[:, :input.size(1)], requires_grad=False))\n\n\nclass TransformerGoogle:\n def __init__(self, args):\n super(TransformerGoogle, self).__init__()\n\n self.args_common = args[\"common_model_properties\"]\n self.args_specific = args[\"transformer_google\"]\n\n # Device\n self.device = self.args_common[\"device\"]\n\n # Input/Output dimensions\n self.vocab_size = self.args_common[\"vocab_size\"]\n self.embed_dim = self.args_common[\"embed_dim\"]\n self.num_class = self.args_common[\"num_class\"]\n\n # Embedding parameters\n self.padding_id = self.args_common[\"padding_id\"]\n\n # Condition parameters\n self.use_pretrained_embed = self.args_common[\"use_pretrained_embed\"]\n self.use_embed_sqrt_mul = self.args_specific[\"use_embed_sqrt_mul\"]\n\n # Pretrained embedding weights\n self.pretrained_weights = self.args_common[\"pretrained_weights\"]\n\n # Dropout probabilities for each individual part of the full model.\n self.keep_prob_encoder = self.args_specific[\"keep_prob_encoder\"]\n self.keep_prob_pe = self.args_specific[\"keep_prob_pe\"]\n self.kee_prob_pff = self.args_specific[\"keep_prob_pff\"]\n self.keep_prob_attn = self.args_specific[\"keep_prob_attn\"]\n self.keep_prob_clf = self.args_specific[\"keep_prob_clf\"]\n\n # Condition parameter for the transformer type (It only supports classification for now)\n self.transformer_type = self.args_specific[\"transformer_type\"]\n\n # Number of parallel attention layers for MultiHeadedAttention\n self.heads = self.args_specific[\"heads\"]\n\n # Number of encoder layers\n self.num_encoder_layers = self.args_specific[\"num_encoder_layers\"]\n\n # Number of hidden count units for Position-Wise Feed-Forward Network\n self.num_hidden_pos_ff = self.args_specific[\"num_hidden_pos_ff\"]\n\n # Maximum length of an input\n self.max_length = self.args_specific[\"max_length\"]\n\n if self.transformer_type == \"classifier\":\n self.model = self.create_classifier_transformer()\n else:\n raise ValueError(\"Transformer can be created as classifier for now!\")\n\n def create_classifier_transformer(self):\n c = copy.deepcopy\n\n # Initialize individual parts of the full model\n # attention = torch.nn.MultiheadAttention(num_heads=self.heads, embed_dim=self.embed_dim,\n # dropout=self.keep_prob_attn)\n attention = MultiHeadedAttentionGoogle(heads=self.heads, d_model=self.embed_dim, keep_prob=self.keep_prob_attn)\n\n ff = PositionalFeedForwardGoogle(d_model=self.embed_dim, d_ff=self.num_hidden_pos_ff,\n keep_prob=self.kee_prob_pff)\n\n embeddings = Embeddings(self.embed_dim, self.vocab_size, self.padding_id, self.use_pretrained_embed,\n self.pretrained_weights, optional_sqrt_mul=self.use_embed_sqrt_mul)\n\n positional_embeddings = PositionalEncodingGoogle(d_model=self.embed_dim, keep_prob=self.keep_prob_pe,\n max_len=self.max_length)\n\n # Initialize the full model\n model = EncoderClassifier(nn.Sequential(embeddings, c(positional_embeddings)),\n EncoderBlockGoogle(\n EncoderLayerGoogle(self.embed_dim, c(attention), c(ff), self.keep_prob_encoder),\n self.num_encoder_layers),\n Classifier(self.embed_dim, d_hidden=self.embed_dim // 2, num_classes=self.num_class,\n keep_prob=self.keep_prob_clf),\n device=self.device)\n\n # Initialize model parameters\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return model\n\n\nif __name__ == '__main__':\n print(\"Transformer tests\")\n plt.figure(figsize=(15, 5))\n pe = PositionalEncodingGoogle(20, 0)\n y = pe.forward(Variable(torch.zeros(1, 100, 20)))\n plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())\n plt.legend([\"dim %d\" % p for p in [4, 5, 6, 7]])\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.ones",
"torch.mean",
"torch.Tensor",
"torch.zeros",
"torch.sin",
"numpy.arange",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.init.xavier_uniform_",
"torch.arange",
"torch.nn.ReLU",
"matplotlib.pyplot.show",
"torch.cos",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
donyori/2018ccf_bdci_inter_fund_correlation_prediction | [
"6e06a3e192e05ae1e9822111cf323eda3a61bf4e"
] | [
"program/model/version/ver1_2.py"
] | [
"from tensorflow import keras\n\nfrom constants import TRADING_DAYS_PER_WEEK, INDEX_RETURN_INDICATOR_NUMBER\nfrom ..constants import *\n\nMODEL_NAME = 'ifcp_model_ver1_2'\nROLLING_WINDOW_SIZE = TRADING_DAYS_PER_WEEK\n\n\ndef build_model():\n fund1_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_RETURN_NAME)\n fund1_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_BENCHMARK_RETURN_NAME)\n fund2_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_RETURN_NAME)\n fund2_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_BENCHMARK_RETURN_NAME)\n\n fund1_performance = keras.layers.subtract([fund1_return, fund1_benchmark_return], name='fund1_performance')\n fund2_performance = keras.layers.subtract([fund2_return, fund2_benchmark_return], name='fund2_performance')\n\n fund1_attributes = keras.layers.concatenate(\n [fund1_return, fund1_benchmark_return, fund1_performance], name='fund1_attributes')\n fund2_attributes = keras.layers.concatenate(\n [fund2_return, fund2_benchmark_return, fund2_performance], name='fund2_attributes')\n\n fund_attributes_gru = keras.layers.GRU(\n 12,\n kernel_regularizer=keras.regularizers.l2(0.01),\n recurrent_regularizer=keras.regularizers.l2(0.01),\n activity_regularizer=keras.regularizers.l1(0.01),\n name='fund_attributes_gru',\n )\n\n fund1_attributes_after_gru = fund_attributes_gru(fund1_attributes)\n fund2_attributes_after_gru = fund_attributes_gru(fund2_attributes)\n\n fund_attributes_after_gru = keras.layers.concatenate(\n [fund1_attributes_after_gru, fund2_attributes_after_gru], name='fund_attributes_after_gru')\n\n auxiliary_output = keras.layers.Dense(1, activation='sigmoid', name=AUXILIARY_OUTPUT_NAME)(\n fund_attributes_after_gru)\n\n index_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, INDEX_RETURN_INDICATOR_NUMBER), name=INDEX_RETURN_NAME)\n index_return_gru = keras.layers.GRU(\n 35,\n kernel_regularizer=keras.regularizers.l2(0.01),\n recurrent_regularizer=keras.regularizers.l2(0.01),\n activity_regularizer=keras.regularizers.l1(0.01),\n name='index_return_gru',\n )\n index_return_after_gru = index_return_gru(index_return)\n merge = keras.layers.concatenate([fund_attributes_after_gru, index_return_after_gru], name='merge')\n x = keras.layers.Dense(64, activation='relu',\n kernel_regularizer=keras.regularizers.l2(0.01),\n activity_regularizer=keras.regularizers.l1(0.01))(merge)\n x = keras.layers.Dense(64, activation='relu',\n kernel_regularizer=keras.regularizers.l2(0.01),\n activity_regularizer=keras.regularizers.l1(0.01))(x)\n x = keras.layers.Dense(16, activation='relu',\n kernel_regularizer=keras.regularizers.l2(0.01),\n activity_regularizer=keras.regularizers.l1(0.01))(x)\n main_output = keras.layers.Dense(1, activation='sigmoid', name=MAIN_OUTPUT_NAME)(x)\n\n model = keras.Model(inputs=[\n fund1_return, fund1_benchmark_return, fund2_return, fund2_benchmark_return, index_return],\n outputs=[main_output, auxiliary_output])\n return model\n"
] | [
[
"tensorflow.keras.Input",
"tensorflow.keras.regularizers.l1",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.layers.subtract"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.2"
]
}
] |
simonfong6/micro-projects | [
"5be195ea72ce117df6da041446f11c18e102b5df"
] | [
"ml_tutorial/test.py"
] | [
"import svm as SVM\nimport numpy as np\n\ndata_dict = {\t-1:np.array(\t[[10,9,1],\n\t\t\t\t[2,8,1],\n\t\t\t\t[3,8,1],]),\n \n\t\t1:np.array(\t[[5,1,1],\n \t [6,-1,1],\n \t [7,3,1],])}\n\nsvm = SVM.Support_Vector_Machine()\nsvm.fit(data=data_dict)\n\npredict_us = [[0,10,1],\n\t [1,3,1],\n\t [3,4,1],\n\t [3,5,1],\n\t [5,5,1],\n\t [5,6,1],\n\t [6,-5,1],\n\t [5,8,1]]\n\nfor p in predict_us:\n\tsvm.predict(p)\nsvm.visualize()\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OnionIoT/tau-lidar-camera | [
"a70b24e18be8e4c5abfe525c6768fbc10a492fd8",
"a70b24e18be8e4c5abfe525c6768fbc10a492fd8"
] | [
"examples/distancePlusAmplitude.py",
"examples/checkCamera.py"
] | [
"import argparse\nimport numpy as np\nimport cv2\n\nfrom TauLidarCommon.frame import FrameType\nfrom TauLidarCamera.camera import Camera\n\ndef setup(serialPort=None):\n port = None\n camera = None\n\n # if no serial port is specified, scan for available Tau Camera devices\n if serialPort is None:\n ports = Camera.scan() ## Scan for available Tau Camera devices\n\n if len(ports) > 0:\n port = ports[0]\n else:\n port = serialPort\n\n if port is not None:\n Camera.setRange(0, 4500) ## points in the distance range to be colored\n\n camera = Camera.open(port) ## Open the first available Tau Camera\n camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0\n camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000\n camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80\n\n cameraInfo = camera.info()\n\n print(\"\\nToF camera opened successfully:\")\n print(\" model: %s\" % cameraInfo.model)\n print(\" firmware: %s\" % cameraInfo.firmware)\n print(\" uid: %s\" % cameraInfo.uid)\n print(\" resolution: %s\" % cameraInfo.resolution)\n print(\" port: %s\" % cameraInfo.port)\n\n print(\"\\nPress Esc key over GUI or Ctrl-c in terminal to shutdown ...\")\n\n\n cv2.namedWindow('Depth Map')\n cv2.namedWindow('Amplitude')\n\n cv2.moveWindow('Depth Map', 20, 20)\n cv2.moveWindow('Amplitude', 20, 360)\n\n return camera\n\n\ndef run(camera):\n while True:\n frame = camera.readFrame(FrameType.DISTANCE_AMPLITUDE)\n\n if frame:\n mat_depth_rgb = np.frombuffer(frame.data_depth_rgb, dtype=np.uint16, count=-1, offset=0).reshape(frame.height, frame.width, 3)\n mat_depth_rgb = mat_depth_rgb.astype(np.uint8)\n\n mat_amplitude = np.frombuffer(frame.data_amplitude, dtype=np.float32, count=-1, offset=0).reshape(frame.height, frame.width)\n mat_amplitude = mat_amplitude.astype(np.uint8)\n\n # Upscalling the image\n upscale = 4\n depth_img = cv2.resize(mat_depth_rgb, (frame.width*upscale, frame.height*upscale))\n amplitude_img = cv2.resize(mat_amplitude, (frame.width*upscale, frame.height*upscale))\n\n cv2.imshow('Depth Map', depth_img)\n cv2.imshow('Amplitude', amplitude_img)\n\n if cv2.waitKey(1) == 27: break\n\n\ndef cleanup(camera):\n print('\\nShutting down ...')\n cv2.destroyAllWindows()\n camera.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Sample program to demonstrate acquiring frames with both distance / depth and amplitude data from the Tau LiDAR Camera')\n parser.add_argument('--port', metavar='<serial port>', default=None,\n help='Specify a serial port for the Tau Camera')\n args = parser.parse_args()\n\n\n camera = setup(args.port)\n\n if camera:\n try:\n run(camera)\n except Exception as e:\n print(e)\n\n cleanup(camera)\n",
"import argparse\nimport numpy as np\nimport cv2\n\nfrom TauLidarCommon.frame import FrameType\nfrom TauLidarCamera.camera import Camera\n\ndef scanPortsAndSetup():\n camera = None\n\n ports = Camera.scan() ## Scan for available Tau Camera devices\n print('\\nFound %d possible device(s)'%len(ports))\n\n for port in ports:\n print('Attempting to connect to device on port \\'%s\\''%port)\n camera = setup(port)\n\n return camera\n\ndef setup(port):\n camera = None\n\n Camera.setRange(0, 4500) ## points in the distance range to be colored\n\n camera = Camera.open(port) ## Open the first available Tau Camera\n camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0\n camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000\n camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80\n\n cameraInfo = camera.info()\n\n print(\"\\nToF camera opened successfully:\")\n print(\" model: %s\" % cameraInfo.model)\n print(\" firmware: %s\" % cameraInfo.firmware)\n print(\" uid: %s\" % cameraInfo.uid)\n print(\" resolution: %s\" % cameraInfo.resolution)\n print(\" port: %s\" % cameraInfo.port)\n\n print(\"\\nPress Esc key over GUI or Ctrl-c in terminal to shutdown ...\")\n\n return camera\n\n\ndef run(camera):\n while True:\n frame = camera.readFrame(FrameType.DISTANCE)\n\n if frame:\n mat_depth_rgb = np.frombuffer(frame.data_depth_rgb, dtype=np.uint16, count=-1, offset=0).reshape(frame.height, frame.width, 3)\n mat_depth_rgb = mat_depth_rgb.astype(np.uint8)\n\n # Upscalling the image\n upscale = 4\n img = cv2.resize(mat_depth_rgb, (frame.width*upscale, frame.height*upscale))\n\n cv2.imshow('Depth Map', img)\n\n if cv2.waitKey(1) == 27: break\n\n\ndef cleanup(camera):\n print('\\nShutting down ...')\n cv2.destroyAllWindows()\n camera.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Try connecting to any available Tau LiDAR Camera.\\nIf no port argument is specified scan all serial ports will be scanned.')\n parser.add_argument('--port', metavar='<serial port>', default=None,\n help='Specify a serial port instead of trying all available Tau LiDAR Cameras')\n args = parser.parse_args()\n\n port = args.port\n\n if port is None:\n camera = scanPortsAndSetup()\n else:\n print('Attempting to connect to device on port \\'%s\\''%port)\n camera = setup(port)\n\n if camera:\n try:\n run(camera)\n except Exception as e:\n print(e)\n\n cleanup(camera)\n"
] | [
[
"numpy.frombuffer"
],
[
"numpy.frombuffer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
R-aryan/Jigsaw-Toxic-Comment-Classification | [
"e5e4da7df379ac1b315f2bde655386180f39c517"
] | [
"backend/services/toxic_comment_jigsaw/application/ai/training/src/train.py"
] | [
"import pandas as pd\nimport numpy as np\nimport torch\n\nfrom sklearn.model_selection import train_test_split\nfrom backend.services.toxic_comment_jigsaw.application.ai.model import BERTClassifier\nfrom backend.services.toxic_comment_jigsaw.application.ai.training.src.dataset import BERTDataset\nfrom backend.services.toxic_comment_jigsaw.application.ai.training.src.preprocess import Preprocess\nfrom backend.services.toxic_comment_jigsaw.application.ai.training.src.engine import Engine\nfrom backend.services.toxic_comment_jigsaw.application.ai.settings import Settings\n\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nfrom torch.utils.data import DataLoader\n\n\nclass Train:\n def __init__(self):\n # initialize required class\n self.settings = Settings\n self.engine = Engine()\n self.preprocess = Preprocess()\n\n # initialize required variables\n self.bert_classifier = None\n self.optimizer = None\n self.scheduler = None\n self.train_data_loader = None\n self.val_data_loader = None\n self.total_steps = None\n self.best_accuracy = 0\n\n def __initialize(self):\n # Instantiate Bert Classifier\n self.bert_classifier = BERTClassifier(freeze_bert=False)\n self.bert_classifier.to(self.settings.DEVICE)\n\n # Create the optimizer\n self.optimizer = AdamW(self.bert_classifier.parameters(),\n lr=5e-5, # Default learning rate\n eps=1e-8 # Default epsilon value\n )\n # Set up the learning rate scheduler\n self.scheduler = get_linear_schedule_with_warmup(self.optimizer,\n num_warmup_steps=0, # Default value\n num_training_steps=self.total_steps)\n\n def crete_data_loaders(self, dataset):\n pass\n\n def load_data(self):\n train_df = pd.read_csv(self.settings.TRAIN_DATA).fillna(\"none\")\n train_df['comment_text'] = train_df['comment_text'].apply(lambda x: self.preprocess.clean_text(x))\n X = list(train_df['comment_text'])\n y = np.array(train_df.loc[:, 'toxic':])\n\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=self.settings.RANDOM_STATE)\n\n # training dataset\n train_dataset = BERTDataset(X_train, y_train)\n\n # validation dataset\n val_dataset = BERTDataset(X_val, y_val)\n\n self.train_data_loader = DataLoader(train_dataset,\n batch_size=self.settings.TRAIN_BATCH_SIZE,\n shuffle=True,\n num_workers=self.settings.TRAIN_NUM_WORKERS)\n\n self.val_data_loader = DataLoader(val_dataset,\n batch_size=self.settings.VALID_BATCH_SIZE,\n shuffle=True,\n num_workers=self.settings.VAL_NUM_WORKERS)\n\n self.total_steps = int(len(X_train) / self.settings.TRAIN_BATCH_SIZE * self.settings.EPOCHS)\n\n def train(self):\n for epochs in range(self.settings.EPOCHS):\n\n # calling the training function in engine.py file\n self.engine.train_fn(data_loader=self.train_data_loader,\n model=self.bert_classifier,\n optimizer=self.optimizer,\n device=self.settings.DEVICE,\n schedular=self.scheduler)\n\n # calling the evaluation function from the engine.py file to compute evaluation\n val_loss, val_accuracy = self.engine.eval_fn(data_loader=self.val_data_loader,\n model=self.bert_classifier,\n device=self.settings.DEVICE)\n\n # updating the accuracy\n if val_accuracy > self.best_accuracy:\n torch.save(self.bert_classifier.state_dict(), self.settings.MODEL_PATH)\n self.best_accuracy = val_accuracy\n\n def run(self):\n try:\n print(\"Loading and Preparing the Dataset-----!! \")\n self.load_data()\n print(\"Dataset Successfully Loaded and Prepared-----!! \")\n print()\n print(\"-\" * 70)\n print(\"Loading and Initializing the Bert Model -----!! \")\n self.__initialize()\n print(\"Model Successfully Loaded and Initialized-----!! \")\n print()\n print(\"-\" * 70)\n print(\"------------------Starting Training-----------!!\")\n self.engine.set_seed()\n self.train()\n print(\"Training complete-----!!!\")\n\n except BaseException as ex:\n print(\"Following Exception Occurred---!! \", str(ex))\n\n"
] | [
[
"pandas.read_csv",
"numpy.array",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chensnathan/CARAFE_CUDA | [
"33d3d3af69b24fc679f6a3a071a19070dc46664b"
] | [
"carafe_layer/setup.py"
] | [
"from setuptools import setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\nsetup(\n name='carafe_layer_cuda',\n ext_modules=[\n CUDAExtension('carafe_layer_cuda', [\n 'src/carafe_layer_cuda.cpp',\n 'src/carafe_layer_kernel.cu',\n ])\n ],\n cmdclass={\n 'build_ext': BuildExtension\n })\n"
] | [
[
"torch.utils.cpp_extension.CUDAExtension"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AU-DATALAB/newsFluxus | [
"20522b2c8c830d2377a9620d149a515baaaa9cf4",
"20522b2c8c830d2377a9620d149a515baaaa9cf4"
] | [
"src/saffine/detrending_coeff.py",
"src/archive/legacy_main_extractor_Ida_incl_comments_grundtvig.py"
] | [
"from numpy import *\r\nimport numpy as np\r\n# from numba import jit\r\n\r\n# @jit\r\n\r\ndef detrending_coeff(win_len , order):\r\n\r\n#win_len = 51\r\n#order = 2\r\n\tn = (win_len-1)/2\r\n\tA = mat(ones((win_len,order+1)))\r\n\tx = np.arange(-n , n+1)\r\n\tfor j in range(0 , order + 1):\r\n\t\tA[:,j] = mat(x ** j).T\r\n\r\n\tcoeff_output = (A.T * A).I * A.T\r\n\treturn coeff_output , A\r\n\r\n# coeff_output,A = detrending_coeff(5,2)\r\n# print(coeff_output)\r\n# print(A)\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 18 13:29:39 2021\r\n\r\n@author: au685355\r\n\"\"\"\r\n\r\n\"\"\"\r\n1. Lemmatize texts\r\n2. Train LDA model\r\n3. Get topic model distribution per document\r\n4. Extract novelty/resonance\r\n\r\n#To run script in shell console:\r\npython3 src/main_extractor.py\r\n\"\"\"\r\n\r\nimport os\r\nimport pickle\r\nimport glob\r\n\r\nimport pandas as pd\r\nimport spacy\r\nimport re\r\nfrom itertools import islice\r\nimport math\r\n\r\n#add package directory to sys\r\nimport sys\r\nsys.path.insert(1, r'/home/idanissen/newsFluxus-master_Lasse/newsFluxus-master/src')\r\n#check path: type 'sys.path'\r\nfrom tekisuto.preprocessing import CaseFolder\r\nfrom tekisuto.preprocessing import RegxFilter\r\nfrom tekisuto.preprocessing import StopWordFilter\r\nfrom tekisuto.preprocessing import Tokenizer\r\nfrom tekisuto.models import TopicModel\r\nfrom tekisuto.models import InfoDynamics\r\nfrom tekisuto.metrics import jsd\r\nfrom tekisuto.models import LatentSemantics\r\nfrom import_ndjson_files_incl_comments import import_ndjson_files\r\n\r\n\r\ndef spacy_lemmatize(texts, nlp, **kwargs):\r\n docs = nlp.pipe(texts, **kwargs)\r\n\r\n def __lemmatize(doc):\r\n lemmas = []\r\n for sent in doc.sents:\r\n for token in sent:\r\n lemmas.append(token.lemma_)\r\n return lemmas\r\n\r\n return [__lemmatize(doc) for doc in docs]\r\n\r\ndef preprocess_for_topic_models(lemmas: list, lang=\"da\"):\r\n cf = CaseFolder(lower=True)\r\n re0 = RegxFilter(pattern=r\"\\W+\")\r\n re1 = RegxFilter(pattern=r\"\\d+\")\r\n sw = StopWordFilter(path=os.path.join(ROOT_PATH, \"res\", f\"stopwords-{lang}.txt\"))\r\n processors = [cf, re0, re1, sw]\r\n for processor in processors:\r\n lemmas = [processor.preprocess(t) for t in lemmas]\r\n return lemmas\r\n\r\n\r\n\r\n#%% not used\r\ndef train_topic_model(tokens, \r\n estimate_topics: bool,\r\n tune_topic_range=[10,30,50],\r\n plot_topics=False,\r\n **kwargs):\r\n \"\"\"\r\n tokens: list of strings (document)\r\n estimate topics: whether to search a range of topics\r\n tune_topic_range: number of topics to fit\r\n plot_topics: quality check, plot coherence by topics\r\n **kwargs: other arguments to LDAmulticore\r\n \"\"\"\r\n if estimate_topics:\r\n tm = TopicModel(tokens)\r\n n, n_cohers = tm.tune_topic_range(\r\n ntopics=tune_topic_range,\r\n plot_topics=plot_topics)\r\n print(f\"\\n[INFO] Optimal number of topics is {n}\")\r\n tm = TopicModel(tokens)\r\n tm.fit(n, **kwargs)\r\n else:\r\n tm = TopicModel(tokens)\r\n n = 10\r\n tm.fit(10, **kwargs)\r\n return tm, n\r\n#%%\r\n\r\n#added by Ida: use mallet for topic model instead of LDA\r\ndef train_topic_model_mallet(tokens, \r\n estimate_topics: bool,\r\n tune_topic_range=[10,30,50],\r\n plot_topics=False,\r\n **kwargs):\r\n \"\"\"\r\n tokens: list of strings (document)\r\n estimate topics: whether to search a range of topics\r\n tune_topic_range: number of topics to fit\r\n plot_topics: quality check, plot coherence by topics\r\n **kwargs: other arguments to LDAmulticore\r\n \"\"\"\r\n if estimate_topics:\r\n ls = LatentSemantics(tokens)\r\n n, n_cohers = ls.coherence_k(\r\n krange=tune_topic_range,\r\n plot_topics=plot_topics)\r\n print(f\"\\n[INFO] Optimal number of topics is {n}\")\r\n ls = LatentSemantics(tokens, k=n)\r\n ls.fit()\r\n else:\r\n ls = LatentSemantics(tokens, k=10)# change to your preferred default value\r\n n = 10\r\n ls.fit()\r\n return ls, n\r\n\r\n\r\n\r\ndef extract_novelty_resonance(df, theta, dates, window):\r\n idmdl = InfoDynamics(data = theta, time = dates, window = window)\r\n idmdl.novelty(meas = jsd)\r\n idmdl.transience(meas = jsd)\r\n idmdl.resonance(meas = jsd)\r\n\r\n df[\"novelty\"] = idmdl.nsignal\r\n df[\"transience\"] = idmdl.tsignal\r\n df[\"resonance\"] = idmdl.rsignal\r\n df[\"nsigma\"] = idmdl.nsigma\r\n df[\"tsigma\"] = idmdl.tsigma\r\n df[\"rsigma\"] = idmdl.rsigma\r\n return df\r\n\r\n\r\n\r\n#%% Plot part\r\n#From news_uncertainty.py\r\n\r\nimport numpy as np\r\nimport saffine.detrending_method as dm\r\nimport scipy as sp\r\nimport scipy.stats as stats\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef normalize(x, lower=-1, upper=1):\r\n \"\"\" transform x to x_ab in range [a, b]\r\n \"\"\"\r\n x_norm = (upper - lower)*((x - np.min(x)) / (np.max(x) - np.min(x))) + lower\r\n return x_norm\r\n\r\n\r\ndef adaptive_filter(y, span=56):\r\n #if len(y) % 2:\r\n # y=y[:-1]\r\n\r\n w = int(4 * np.floor(len(y)/span) + 1)\r\n y_dt = np.mat([float(j) for j in y])\r\n _, y_smooth = dm.detrending_method(y_dt, w, 1)\r\n \r\n return y_smooth.T\r\n\r\ndef plot_ci_manual(t, s_err, n, x, x2, y2, ax=None):\r\n \"\"\"Return an axes of confidence bands using a simple approach.\r\n\r\n \"\"\"\r\n if ax is None:\r\n ax = plt.gca()\r\n\r\n ci = t * s_err * np.sqrt(1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))\r\n ax.fill_between(x2, y2 + ci, y2 - ci, color=\"#b9cfe7\", edgecolor=\"\")\r\n\r\n return ax\r\n\r\n\r\ndef plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None):\r\n \"\"\"Return an axes of confidence bands using a bootstrap approach.\r\n Returns\r\n -------\r\n ax : axes\r\n - Cluster of lines\r\n - Upper and Lower bounds (high and low) (optional) Note: sensitive to outliers\r\n \"\"\" \r\n if ax is None:\r\n ax = plt.gca()\r\n\r\n bootindex = sp.random.randint\r\n\r\n for _ in range(nboot):\r\n resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]\r\n # Make coeffs of for polys\r\n pc = np.polyfit(xs, ys + resamp_resid, 1) \r\n # Plot bootstrap cluster\r\n ax.plot(xs, np.polyval(pc, xs), \"r-\", linewidth=2, alpha=3.0 / float(nboot))\r\n\r\n return ax\r\n\r\ndef adaptiveline(x1, x2, fname=\"adaptline.png\"):\r\n _, ax = plt.subplots(2,1,figsize=(14,6),dpi=300)\r\n c = [\"g\", \"r\", \"b\"]\r\n ax[0].plot(normalize(x1, lower=0),c=\"gray\")\r\n for i, span in enumerate([128, 56, 32]):\r\n n_smooth = normalize(adaptive_filter(x1, span=span), lower=0)\r\n ax[0].plot(n_smooth,c=c[i])\r\n ax[0].set_ylabel(\"$\\\\mathbb{N}ovelty$\", fontsize=14)\r\n \r\n ax[1].plot(normalize(x2, lower=-1),c=\"gray\")\r\n for i, span in enumerate([128, 56, 32]):\r\n r_smooth = normalize(adaptive_filter(x2, span=span), lower=-1)\r\n ax[1].plot(r_smooth,c=c[i])\r\n ax[1].set_ylabel(\"$\\\\mathbb{R}esonance$\", fontsize=14)\r\n plt.tight_layout()\r\n plt.savefig(fname)\r\n #plt.close()\r\n \r\n\r\ndef adaptiveline_toptimes(x1, x2, x, y, cond, fname=\"adaptline_top.png\"):\r\n #_, ax = plt.subplots(2,1,figsize=(14,6),dpi=300)\r\n fig, ax = plt.subplots(2,1,figsize=(14,6),dpi=300)\r\n c = [\"g\", \"r\", \"b\"]\r\n ax[0].plot(normalize(x1, lower=0),c=\"gray\")\r\n for i, span in enumerate([128, 56, 32]):\r\n n_smooth = normalize(adaptive_filter(x1, span=span), lower=0)\r\n ax[0].plot(n_smooth,c=c[i])\r\n ax[0].set_ylabel(\"$\\\\mathbb{N}ovelty$\", fontsize=14)\r\n \r\n ax[1].plot(normalize(x2, lower=-1),c=\"gray\")\r\n for i, span in enumerate([128, 56, 32]):\r\n r_smooth = normalize(adaptive_filter(x2, span=span), lower=-1)\r\n ax[1].plot(r_smooth,c=c[i])\r\n ax[1].set_ylabel(\"$\\\\mathbb{R}esonance$\", fontsize=14)\r\n \r\n ax[1].scatter(x[cond == True], y[cond == True], c='r') \r\n y2 = y+1\r\n ax[0].scatter(x[cond == True], y2[cond == True], c='r')\r\n \r\n plt.tight_layout()\r\n plt.savefig(fname)\r\n #plt.close()\r\n\r\n\r\ndef regline(x, y, bootstap=True, fname=\"regline.png\"):\r\n p, _ = np.polyfit(x, y, 1, cov=True)\r\n y_model = np.polyval(p, x)\r\n # statistics\r\n n = y.size\r\n m = p.size\r\n dof = n - m\r\n t = stats.t.ppf(0.975, n - m)\r\n # estimates of error\r\n resid = y - y_model \r\n chi2 = np.sum((resid / y_model)**2) \r\n chi2_red = chi2 / dof\r\n s_err = np.sqrt(np.sum(resid**2) / dof) \r\n # plot\r\n fig, ax = plt.subplots(figsize=(8, 7.5),dpi=300)\r\n ax.plot(x, y, \".\", color=\"#b9cfe7\", markersize=8,markeredgewidth=1, markeredgecolor=\"r\", markerfacecolor=\"None\")\r\n ax.plot(x, y_model, \"-\", color=\"0.1\", linewidth=1.5, alpha=0.5, label=\"$\\\\beta_1 = {}$\".format(round(p[0], 2)))\r\n x2 = np.linspace(np.min(x), np.max(x), 100)\r\n y2 = np.polyval(p, x2)\r\n # confidence interval option\r\n if bootstap:\r\n plot_ci_bootstrap(x, y, resid, ax=ax)\r\n else:\r\n plot_ci_manual(t, s_err, n, x, x2, y2, ax=ax)\r\n # prediction interval\r\n pi = t * s_err * np.sqrt(1 + 1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2)) \r\n ax.fill_between(x2, y2 + pi, y2 - pi, color=\"None\", linestyle=\"--\")\r\n ax.plot(x2, y2 - pi, \"--\", color=\"0.5\", label=\"95% Prediction Limits\")\r\n ax.plot(x2, y2 + pi, \"--\", color=\"0.5\")\r\n # borders\r\n ax.spines[\"top\"].set_color(\"0.5\")\r\n ax.spines[\"bottom\"].set_color(\"0.5\")\r\n ax.spines[\"left\"].set_color(\"0.5\")\r\n ax.spines[\"right\"].set_color(\"0.5\")\r\n ax.get_xaxis().set_tick_params(direction=\"out\")\r\n ax.get_yaxis().set_tick_params(direction=\"out\")\r\n ax.xaxis.tick_bottom()\r\n ax.yaxis.tick_left()\r\n # labels\r\n plt.title(\"Classification of Uncertainty State\", fontsize=\"14\", fontweight=\"bold\")\r\n plt.xlabel(\"$\\\\mathbb{N}ovelty_z$\", fontsize=\"14\", fontweight=\"bold\")\r\n plt.ylabel(\"$\\\\mathbb{R}esonance_z$\", fontsize=\"14\", fontweight=\"bold\")\r\n plt.xlim(np.min(x) - .25, np.max(x) + .25)\r\n # custom legend\r\n handles, labels = ax.get_legend_handles_labels()\r\n display = (0, 1)\r\n anyArtist = plt.Line2D((0, 1), (0, 0), color=\"#ea5752\")\r\n legend = plt.legend(\r\n [handle for i, handle in enumerate(handles) if i in display] + [anyArtist],\r\n [label for i, label in enumerate(labels) if i in display] + [\"95% Confidence Limits\"],\r\n loc=9, bbox_to_anchor=(0, -0.21, 1., 0.102), ncol=3, mode=\"expand\"\r\n ) \r\n frame = legend.get_frame().set_edgecolor(\"0.5\")\r\n mpl.rcParams['axes.linewidth'] = 1\r\n # save figure\r\n plt.tight_layout()\r\n plt.savefig(fname, bbox_extra_artists=(legend,), bbox_inches=\"tight\")\r\n #Ida added\r\n beta1 = round(p[0], 2)\r\n return beta1\r\n\r\ndef regline_without_figure(x, y):\r\n p, _ = np.polyfit(x, y, 1, cov=True)\r\n #Ida added\r\n beta1 = round(p[0], 2)\r\n return beta1\r\n\r\n\r\ndef plot_figures(df, OUT_PATH, IN_DATA, window):\r\n time = df['date'].tolist()\r\n novelty = df['novelty'].tolist()\r\n resonance = df['resonance'].tolist()\r\n \r\n # remove window start-end #Ida: had to move window removal above plotting to avoid error messages\r\n time = time[window:-window]\r\n novelty = novelty[window:-window]\r\n resonance = resonance[window:-window]\r\n # trend detection\r\n if not os.path.exists(os.path.join(OUT_PATH, \"fig\")):\r\n os.mkdir(os.path.join(OUT_PATH, \"fig\"))\r\n figname0 = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_adaptline.png\")\r\n #with open(figname0, \"wb\") as f:\r\n # adaptiveline(novelty, resonance, fname=figname0)\r\n #or (both do not show figure in plot window, but save it)\r\n adaptiveline(novelty, resonance, fname=figname0)\r\n # classification based on z-scores\r\n xz = stats.zscore(novelty)\r\n yz = stats.zscore(resonance)\r\n figname1 = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_regline.png\")\r\n regline(xz, yz, fname=figname1)\r\n beta1 = regline(xz, yz, fname=figname1)\r\n return time, novelty, resonance, beta1\r\n\r\n\r\n\r\n#%% Estimate Hurst exponent\r\ndef hurst_exp(resonance, OUT_PATH):\r\n import nolds\r\n \r\n #hurst_n = nolds.hurst_rs(novelty, nvals=None, fit='poly', debug_plot=True, plot_file=None, corrected=True, unbiased=True)\r\n #show figure\r\n nolds.hurst_rs(resonance, nvals=None, fit='poly', debug_plot=True, plot_file=None, corrected=True, unbiased=True)\r\n #save figure\r\n fignameH = os.path.join(OUT_PATH, \"fig\", \"H_plot.png\")\r\n hurst_r = nolds.hurst_rs(resonance, nvals=None, fit='poly', debug_plot=True, plot_file=fignameH, corrected=True, unbiased=True)\r\n \r\n return hurst_r\r\n\r\n\r\n#%% Beta time series\r\ndef sliding_window(seq, n=21):\r\n \"Returns a sliding window (of width n) over data from the iterable\"\r\n \" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... \"\r\n it = iter(seq)\r\n result = tuple(islice(it, n))\r\n if len(result) == n:\r\n yield result\r\n for elem in it:\r\n result = result[1:] + (elem,)\r\n yield result\r\n\r\ndef beta_time_series(time, novelty, resonance, window, OUT_PATH, IN_DATA):\r\n if not os.path.exists(os.path.join(OUT_PATH, \"fig\")):\r\n os.mkdir(os.path.join(OUT_PATH, \"fig\"))\r\n \r\n #convert time series into windows\r\n time_w = list()\r\n for w in sliding_window(time, window):\r\n time_w.append(w)\r\n #print(w) \r\n novelty_w = list()\r\n for w in sliding_window(novelty, window):\r\n novelty_w.append(w)\r\n resonance_w = list()\r\n for w in sliding_window(resonance, window):\r\n resonance_w.append(w) \r\n \r\n #loop over window\r\n beta_w = list()\r\n for i in range(len(time_w)):\r\n # classification based on z-scores\r\n xz = stats.zscore(novelty_w[i])\r\n yz = stats.zscore(resonance_w[i])\r\n #get beta without generating a figure for each window\r\n beta = regline_without_figure(xz, yz)\r\n beta_w.append(beta)\r\n\r\n #old: a figure is generated for each window\r\n #figname1 = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_regline_w\" + str(i) + \".png\")\r\n #beta = regline(xz, yz, fname=figname1)\r\n #plt.close()\r\n #beta_w.append(beta)\r\n \r\n #choose middle time point for plot\r\n #later: maybe average instead, as time points are not spaced evenly\r\n time_middle = list()\r\n middle = round((len(time_w[0]) - 1)/2)\r\n for i in range(len(time_w)):\r\n time_middle.append(time_w[i][middle])\r\n time_middle_days = list()\r\n for i in range(len(time_middle)):\r\n time_middle_days.append(time_middle[i][0:10])\r\n \r\n #plot beta over time \r\n #(execute as block)\r\n plt.scatter(time_middle, beta_w)\r\n plt.xticks([]) \r\n plt.ylabel('beta')\r\n plt.xlabel('time')\r\n plt.box(False)\r\n xlabels = list()\r\n for i in range(0, len(time_middle_days)-1, math.floor((len(time_middle_days)-1)/4)):\r\n xlabels.append(time_middle_days[i])\r\n plt.xticks(range(0, len(time_middle_days)-1, math.floor((len(time_middle_days)-1)/4)), xlabels) #, rotation = 45\r\n #save figure\r\n fname = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_beta_timeseries.png\")\r\n plt.savefig(fname, dpi=300, bbox_inches='tight')\r\n plt.show()\r\n \r\n \r\n \r\n #save beta timeseries\r\n output = pd.DataFrame({'beta': beta_w, 'time middle': time_middle, 'time': time_w})\r\n output.to_csv(os.path.join(OUT_PATH, \"{}_beta_timeseries.csv\".format(IN_DATA.split(\".\")[0])), index=False, encoding='utf-8-sig', sep=';') \r\n \r\n return beta_w, time_w, time_middle, time_middle_days\r\n \r\n\r\ndef timepoints_beta_top(beta_w, time_w, percentage):\r\n \r\n #Find treshold \r\n beta_index = [0]*len(beta_w)\r\n beta_ranked = [0]*len(beta_w)\r\n for i, x in enumerate(sorted(range(len(beta_w)), key=lambda y: beta_w[y], reverse=True)): #sort descending\r\n beta_ranked[i] = beta_w[x]\r\n beta_index[i] = x\r\n \r\n threshold = beta_ranked[round(percentage*len(beta_w))]\r\n treshold_idx = beta_index[round(percentage*len(beta_w))]\r\n \r\n list_top_idx = beta_index[0:round(percentage*len(beta_w))]\r\n \r\n #find time points according to top beta values\r\n time_top = list(time_w[i] for i in list_top_idx)\r\n #put all windowed time points back into one long list\r\n time_top_unpacked = [item for sublist in time_top for item in sublist]\r\n #remove all duplicates\r\n time_top_unpacked = list(set(time_top_unpacked))\r\n time_top_unpacked = sorted(time_top_unpacked)\r\n \r\n #NB: 557/847 time points end up in top 20 list due to windowing (66%)! For top 10% it is 384/847 (45%)\r\n \r\n return time_top_unpacked, threshold, list_top_idx\r\n\r\n\r\ndef line_top_time(size_df, idx_top, WINDOW):\r\n \r\n #preparation to plot selected time points\r\n idx_bin = [0]*len(df) #convert top time points into binary array\r\n for i in range(size_df):\r\n idx_bin[idx_top[i]] = 1\r\n idx_bin = idx_bin[WINDOW:-WINDOW] #shorten by removing first and last window to fit length of novelty and resonace arrays\r\n \r\n cond = np.array(idx_bin) == 1\r\n x = np.array(range(len(idx_bin)))\r\n y = np.array([-1]*len(idx_bin))\r\n #plt.scatter(x[cond == True], y[cond == True], c='r')\r\n\r\n return x, y, cond\r\n\r\n\r\ndef plot_beta_top_time(time_middle, beta_w, time_middle_days, time_top, threshold, list_top_idx, OUT_PATH, IN_DATA):\r\n \r\n #plot beta over time \r\n #(execute as block)\r\n plt.scatter(time_middle, beta_w)\r\n plt.xticks([]) \r\n plt.ylabel('beta')\r\n plt.xlabel('time')\r\n plt.box(False)\r\n xlabels = list()\r\n for i in range(0, len(time_middle_days)-1, math.floor((len(time_middle_days)-1)/4)):\r\n xlabels.append(time_middle_days[i])\r\n plt.xticks(range(0, len(time_middle_days)-1, math.floor((len(time_middle_days)-1)/4)), xlabels) #, rotation = 45\r\n #add threshold line\r\n plt.axhline(y=threshold, color='k')\r\n \r\n top_beta = [0]*len(beta_w) #convert top time points into binary array\r\n for i in range(len(list_top_idx)):\r\n top_beta[list_top_idx[i]] = threshold\r\n \r\n cond2 = np.array(top_beta) == threshold\r\n plt.scatter(np.array(time_middle)[cond2 == True], np.array(top_beta)[cond2 == True], c='r')\r\n \r\n #save figure\r\n fname = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_beta_timeseries_top.png\")\r\n plt.savefig(fname, dpi=300, bbox_inches='tight')\r\n plt.show()\r\n\r\n\r\n#def topic_modelling_bert():\r\n #Article: Topic modeling with BERT: BERTopic\r\n #https://towardsdatascience.com/topic-modeling-with-bert-779f7db187e6\r\n \r\n #Embed the sentences\r\n # BERT sentence transformers\r\n # Follow https://github.com/UKPLab/sentence-transformers\r\n #Anaconda terminal in virtual environment 'Newsfluxus': \r\n #pip install -U sentence-transformers\r\n #pip install -U torch torchvision (or split the two up if error)\r\n #pip install -U transformers\r\n #Install pytorch 1.6.0 or higher by selecting it in anaconda navigator\r\n #pip install -U umap-learn\r\n #pip install -U hdbscan\r\n #pip install -U hdbscan --no-cache-dir --no-binary :all: --no-build-isolation\r\n \r\n #load top tokens from saved file\r\n \r\n # tokens_top = []\r\n # with open(os.path.join(OUT_PATH, \"mdl\", \"{}_toptokens.txt\".format(IN_DATA.split(\".\")[0]))) as f:\r\n # lines = f.readlines()\r\n \r\n # for line in f:\r\n # tokens_top.append(line)\r\n \r\n \r\n # with open(os.path.join(OUT_PATH, \"mdl\", \"{}_toptokens.txt\".format(IN_DATA.split(\".\")[0]))) as f:\r\n # content = f.readlines()\r\n # # you may also want to remove whitespace characters like `\\n` at the end of each line\r\n # content = [x.strip() for x in content] \r\n \r\n \r\n \r\n \r\n \r\n # infile = open('listtxt.txt','r')\r\n # for line in infile:\r\n # mainlist.append(line.strip().split(','))\r\n \r\n \r\n \r\n # with open(os.path.join(OUT_PATH, \"mdl\", \"{}_toptokens.txt\".format(IN_DATA.split(\".\")[0]))) as f:\r\n # for element in tokens_top:\r\n # f.write(\"{}\\n\".format(element))\r\n \r\n \r\n # from sentence_transformers import SentenceTransformer\r\n \r\n #Choose best pre-trained model from https://www.sbert.net/docs/pretrained_models.html\r\n # model = SentenceTransformer('distilbert-base-nli-mean-tokens') #model used by BERTopic\r\n #run bert on tokens, instead of on the sentences\r\n #modify tokens \r\n # tokens_top_mod = [' '.join(doc) for doc in tokens_top]\r\n # embeddings = model.encode(tokens_top_mod) #NB: maybe switch to sentences intead of using tokens \r\n #Reduce dimensionality with UMAP\r\n #_neighbours and n_components can be changed\r\n # import umap\r\n # umap_embeddings = umap.UMAP(n_neighbors=20, n_components=20, metric='cosine').fit_transform(embeddings) \r\n #Cluster sentences with HDBSAN\r\n # import hdbscan\r\n # cluster = hdbscan.HDBSCAN(min_cluster_size=10, metric='euclidean', cluster_selection_method='eom').fit(umap_embeddings)\r\n\r\n \r\n \r\n \r\n# return\r\n\r\n\r\n#%% Run functions\r\ndef main_module(SUBREDDIT_NAME, ROOT_PATH, REDDIT_DATA):\r\n #Run code in python\r\n #ROOT_PATH = r\"U:\\Python\\Newsfluxus\\newsFluxus-master_Lasse\\newsFluxus-master\"\r\n #IN_DATA= \"subreddit_iexec.csv\"\r\n #SUBREDDIT_NAME = \"FreeAsInFreedom\"\r\n #SUBREDDIT_NAME = \"iexec\"\r\n IN_DATA= SUBREDDIT_NAME + \"_incl_comments.csv\"\r\n #IN_DATA2= \"subreddit_FreeAsInFreedom.xlsx\"\r\n #IN_PATH = os.path.join(ROOT_PATH, \"dat\\\\subreddits_incl_comments\", IN_DATA)\r\n #IN_PATH2 = os.path.join(ROOT_PATH, \"dat\", IN_DATA2)\r\n OUT_PATH = os.path.join(ROOT_PATH, \"dat/subreddits_incl_comments/output\")\r\n OUT_FILE = os.path.join(OUT_PATH, IN_DATA.split(\".\")[0] + \"_theta.csv\")\r\n ESTIMATE_TOPIPCS = True # whether to tune multiple topic model sizes\r\n #TOPIC_TUNE = [10, 30, 50, 80, 100] # number of topics to tune over in topic model #Ida: do not set to only one, then set estimate_topipcs = False.\r\n TOPIC_TUNE = [20, 30, 50, 80] # number of topics to tune over in topic model #Ida: do not set to only one, then set estimate_topipcs = False.\r\n PLOT_TOPICS = True # plot a topic of coherence by number of topics\r\n SAVE_SEMANTIC_TOPICS = True # save the semantic content of the topic model\r\n WINDOW=3 # window for novelty/resonance\r\n LANG=\"en\" # language (english = 'en')\r\n #Ida added\r\n if not os.path.exists(OUT_PATH):\r\n os.makedirs(OUT_PATH)\r\n if not os.path.exists(os.path.join(OUT_PATH, \"mdl\")):\r\n os.mkdir(os.path.join(OUT_PATH, \"mdl\"))\r\n \r\n \r\n # Loading the dataset containing all metrics as non-danish speeches\r\n # have been removed\r\n# df = pd.read_csv(IN_PATH)\r\n\r\n #use df from within python directly without csv file\r\n df = import_ndjson_files(SUBREDDIT_NAME, REDDIT_DATA)\r\n\r\n #df2 = pd.read_csv(r\"U:\\Python\\Newsfluxus\\newsFluxus-master_Lasse\\newsFluxus-master\\dat\\subreddit_FreeAsInFreedom.csv\")\r\n #df2 = pd.read_excel(IN_PATH2, engine='openpyxl')\r\n #print(df)\r\n df.head()\r\n #raise SystemExit\r\n \r\n #check if sufficient datapoints\r\n length_df = len(df)\r\n print('length of datapoints in subreddit: ', length_df)\r\n if length_df < 120:\r\n print('not sufficient datapoints')\r\n with open(os.path.join(OUT_PATH, \"{}_not_executed.txt\".format(IN_DATA.split(\".\")[0])), \"w\") as f:\r\n f.write(\"not sufficient datapoints (no datapoints = {})\".format(length_df))\r\n \r\n #do not execute subreddit, return zeros for indicator variables\r\n beta1 = 0\r\n hurst_r = 0\r\n return df, OUT_PATH, IN_DATA, beta1, hurst_r\r\n \r\n # sorting date in descending order for correct calculation\r\n # of novelty and resonance\r\n df = df.sort_values(\"date\")\r\n \r\n print(\"\\n[INFO] lemmatizing...\\n\")\r\n #Pre-step in lemmatizing: remove all urls and numbers and other characters than letters\r\n url_pattern = re.compile(r'(https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})')\r\n remove_more = [r\"\\d+\", r\"\\W+\", r\"[^A-z]\", r\"_\", r\"\\s+\"] #first remove urls, then numbers and other characters, then non-english letters (e.g. chinese), then underscores (from user names for example), and last excess spaces \r\n #pre_lemma = url_pattern.sub('',df[\"text\"].iloc[638])\r\n pre_lemma = [url_pattern.sub(' ',x) for x in df[\"text\"].tolist()]\r\n for i in range(len(remove_more)):\r\n remove_more_pattern = re.compile(remove_more[i])\r\n pre_lemma = [remove_more_pattern.sub(' ',x) for x in pre_lemma]\r\n\r\n #lemmas = spacy_lemmatize(df[\"text\"].tolist(), nlp=nlp)\r\n lemmas = spacy_lemmatize(pre_lemma, nlp=nlp)\r\n #print(lemmas)\r\n lemmas = [' '.join(doc) for doc in lemmas]\r\n #Ida: remove -PRON- from text (white spaces are removed in the next step preprocess_for_topic_models)\r\n lemmas = [re.sub('-PRON-', '', lemmas[x]) for x in range(len(lemmas))]\r\n #print(lemmas)\r\n # preprocess\r\n lemmas = preprocess_for_topic_models(lemmas, lang=LANG)\r\n # model training\r\n print(\"\\n[INFO] training model...\\n\")\r\n to = Tokenizer()\r\n tokens = to.doctokenizer(lemmas)\r\n #import pdb; pdb.set_trace()\r\n #print(tokens)\r\n #take out empty lists\r\n invalid_entries = [index for index in range(len(tokens)) if tokens[index] == []]\r\n print(f'Invalid entries removed at {invalid_entries}: {df.iloc[invalid_entries,0]}')\r\n tokens = [x for x in tokens if x]\r\n #also remove line from df\r\n df_orig = df\r\n df = df.drop(labels=invalid_entries)\r\n df = df.reset_index(drop=True)\r\n #raise SystemExit\r\n tm, n = train_topic_model_mallet(tokens,\r\n ESTIMATE_TOPIPCS,\r\n TOPIC_TUNE,\r\n PLOT_TOPICS)\r\n print(tm, n)\r\n #tm, n = train_topic_model(tokens,\r\n # ESTIMATE_TOPIPCS,\r\n # TOPIC_TUNE,\r\n # PLOT_TOPICS)\r\n \r\n \r\n if SAVE_SEMANTIC_TOPICS:\r\n #tm.save_semantic_content(os.path.join(OUT_PATH, \"mdl\", f\"LDA_{n}_topics.txt\"))\r\n # From bow_mdl.py\r\n # static semantic content for model summary\r\n print(\"\\n[INFO] writing content to file...\\n\")\r\n with open(os.path.join(OUT_PATH, \"mdl\", \"{}_content.txt\".format(IN_DATA.split(\".\")[0])), \"w\") as f:\r\n for topic in tm.model.show_topics(num_topics=-1, num_words=10):\r\n f.write(\"{}\\n\\n\".format(topic))\r\n \r\n # Get topic representation for each document\r\n print(\"\\n[INFO] Getting topic distribution per document...\")\r\n print(\"subreddit = \", SUBREDDIT_NAME)\r\n #theta = tm.get_topic_distribution()\r\n #takes a lot of time\r\n theta = list()\r\n for i, doc in enumerate(tm.corpus):\r\n vector = [x[1] for x in tm.model[doc]]\r\n theta.append(vector)\r\n #print(\"[INFO] processed {}/{}\".format(i + 1, len(lemmas)))\r\n print(\"[INFO] processed {}/{}\".format(i + 1, len(tokens))) #Ida: needs to be length of tokens as empty entries were removed\r\n \r\n print(\"[INFO] exporting model...\")\r\n out = dict()\r\n out[\"model\"] = tm.model\r\n out[\"id2word\"] = tm.id2word\r\n out[\"corpus\"] = tm.corpus\r\n #out[\"tokenlists\"] = tm.tokenlists\r\n out[\"tokens\"] = tokens #add?\r\n out[\"theta\"] = theta\r\n out[\"dates\"] = df['date'].tolist()\r\n with open(os.path.join(OUT_PATH, \"mdl\", \"topic_dist_{}.pcl\".format(IN_DATA.split(\".\")[0])), \"wb\") as f:\r\n pickle.dump(out, f, protocol=pickle.HIGHEST_PROTOCOL)\r\n \r\n \r\n df['theta'] = theta\r\n ### Extract novelty and resonance\r\n dates = df[\"date\"].tolist()\r\n # instantiate and call\r\n print(\"[INFO] extracting novelty and resonance...\")\r\n df = extract_novelty_resonance(df, theta, dates, WINDOW)\r\n \r\n df.to_csv(OUT_FILE, index=False) \r\n \r\n #%% Print figures\r\n time, novelty, resonance, beta1 = plot_figures(df, OUT_PATH, IN_DATA, WINDOW)\r\n \r\n \r\n \r\n #%% Hurst exponent\r\n hurst_r = hurst_exp(resonance, OUT_PATH)\r\n \r\n #%% Save further output\r\n output = pd.DataFrame({'hurst': [hurst_r], 'beta1': [beta1]})\r\n output.to_csv(os.path.join(OUT_PATH, \"{}_hurst_beta.csv\".format(IN_DATA.split(\".\")[0])), index=False, encoding='utf-8-sig', sep=';')\r\n \r\n \r\n #%% Beta over time\r\n #generate beta time series with sliding window\r\n window = 21\r\n beta_w, time_w, time_middle, time_middle_days = beta_time_series(time, novelty, resonance, window, OUT_PATH, IN_DATA)\r\n\r\n #Analyse posts with top beta values\r\n percentage = 0.1\r\n #find time points according to top beta values\r\n time_top, threshold, list_top_idx = timepoints_beta_top(beta_w, time_w, percentage)\r\n #find indices of those time points using df NB: idx_top follows length of df\r\n idx_top = list(df['date'].index[df['date'] == time_top[i]].tolist() for i in range(len(time_top)))\r\n idx_top = list(idx_top[i][0] for i in range(len(idx_top)))\r\n #select the tokens\r\n tokens_top = list(tokens[i] for i in idx_top)\r\n #save top tokens\r\n with open(os.path.join(OUT_PATH, \"mdl\", \"{}_toptokens.txt\".format(IN_DATA.split(\".\")[0])), \"w\") as f:\r\n for element in tokens_top:\r\n f.write(\"{}\\n\".format(element))\r\n \r\n #topic modeling with BERT on top tokens\r\n \r\n \r\n #%%plot top time points onto other figures\r\n #resonance novelty timeseries plot\r\n figname = os.path.join(OUT_PATH, \"fig\", IN_DATA.split(\".\")[0] + \"_adaptline_top.png\")\r\n size_df = len(idx_top)\r\n x, y, cond = line_top_time(size_df, idx_top, WINDOW)\r\n adaptiveline_toptimes(novelty, resonance, x, y, cond, figname)\r\n \r\n #beta timeseries plot\r\n plot_beta_top_time(time_middle, beta_w, time_middle_days, time_top, threshold, list_top_idx, OUT_PATH, IN_DATA)\r\n \r\n return df, OUT_PATH, IN_DATA, beta1, hurst_r\r\n \r\n\r\n#%% Run functions\r\nif __name__ == '__main__':\r\n \r\n nlp = spacy.load(\"en_core_web_lg\")\r\n # you might need to download the model:\r\n # python -m spacy download en_core_web_lg\r\n \r\n #Run code in python, loop over all files\r\n \r\n ROOT_PATH = r\"/home/idanissen/newsFluxus-master_Lasse/newsFluxus-master\"\r\n #Ida added\r\n #os.mkdir(os.path.join(ROOT_PATH, \"dat\", \"mdl\"))\r\n #os.mkdir(os.path.join(ROOT_PATH, \"dat\", \"fig\"))\r\n REDDIT_DATA = r'/data/datalab/reddit-sample-hv/comments/*.ndjson'\r\n \r\n WINDOW=3 # window for novelty/resonance\r\n \r\n #loop over files\r\n slope_all = []\r\n hurst_all = []\r\n #ex: file = 'U:\\\\Python\\\\Newsfluxus\\\\newsFluxus-master_Lasse\\\\newsFluxus-master\\\\dat\\\\subreddits\\\\subreddit_ACTA.csv'\r\n for file in glob.glob(REDDIT_DATA):\r\n print(file)\r\n fname = file.split(\"/\")\r\n SUBREDDIT_NAME= fname[-1]\r\n SUBREDDIT_NAME = SUBREDDIT_NAME.split(\".\")\r\n SUBREDDIT_NAME = SUBREDDIT_NAME[0]\r\n #print(SUBREDDIT_NAME)\r\n \r\n #check for previous runs if subreddit run through\r\n if os.path.isfile(os.path.join(ROOT_PATH, \"dat\", \"subreddits_incl_comments\", \"output\", (SUBREDDIT_NAME + '_incl_comments_not_executed.txt'))):\r\n continue\r\n if os.path.isfile(os.path.join(ROOT_PATH, \"dat\", \"subreddits_incl_comments\", \"output\", (SUBREDDIT_NAME + '_finished.txt'))):\r\n continue\r\n \r\n #wait with the large subreddits\r\n if not(SUBREDDIT_NAME == 'Bitcoin' or SUBREDDIT_NAME == 'technology' or SUBREDDIT_NAME == 'conspiracy' or SUBREDDIT_NAME == 'ComputerSecurity' or SUBREDDIT_NAME == 'netsec' or SUBREDDIT_NAME == 'privacy' or SUBREDDIT_NAME == 'privacytools' or SUBREDDIT_NAME == 'privacytoolsIO' or SUBREDDIT_NAME == 'Stellar'):\r\n \r\n #novelty, resonance and indicator variables (beta and hurst)\r\n df, OUT_PATH, IN_DATA, beta1, hurst_r = main_module(SUBREDDIT_NAME, ROOT_PATH, REDDIT_DATA)\r\n \r\n #store indicator variables\r\n slope_all.append(beta1)\r\n hurst_all.append(hurst_r)\r\n \r\n dict = {'hurst': hurst_all, 'slope': slope_all}\r\n df_ind_var = pd.DataFrame(dict)\r\n df_ind_var.to_csv(os.path.join(OUT_PATH, 'all_subreddits_indicator_variables.csv'), index=False, encoding='utf-8-sig', sep=';')\r\n \r\n with open(os.path.join(OUT_PATH, \"{}_finished.txt\".format(SUBREDDIT_NAME)), \"w\") as f:\r\n f.write(\"finished successfully\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"numpy.arange"
],
[
"numpy.polyfit",
"scipy.stats.zscore",
"pandas.DataFrame",
"numpy.max",
"numpy.mean",
"numpy.polyval",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.box",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.Line2D",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"scipy.stats.t.ppf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
RedTachyon/OpenTraj | [
"8277f526d714a4e77d0f9f354259ff5b74e59fd2"
] | [
"opentraj/toolkit/loaders/loader_pets.py"
] | [
"# Author: Javad Amirian\n# Email: [email protected]\n\nimport xml.etree.ElementTree as et\n\nimport numpy as np\nimport pandas as pd\n\nfrom opentraj.toolkit.core.trajdataset import TrajDataset\nfrom opentraj.toolkit.utils.calibration.camera_calibration_tsai import *\n\n\ndef load_pets(path, **kwargs):\n \"\"\"\n :param path: address of annotation file\n :param kwargs:\n :param calib_path: address of calibration file\n :return: TrajectoryDataset object\n \"\"\"\n traj_dataset = TrajDataset()\n\n annot_xtree = et.parse(path)\n annot_xroot = annot_xtree.getroot() # dataset\n\n cp, cc = None, None # calibration parameters\n\n # load calibration\n calib_path = kwargs.get('calib_path', \"\")\n if calib_path:\n cp = CameraParameters()\n cc = CalibrationConstants()\n\n calib_xtree = et.parse(calib_path)\n calib_xroot = calib_xtree.getroot() # Camera\n\n geometry_node = calib_xroot.find(\"Geometry\")\n width = int(geometry_node.attrib[\"width\"])\n height = int(geometry_node.attrib[\"height\"])\n\n cp.Ncx = float(geometry_node.attrib[\"ncx\"])\n cp.Nfx = float(geometry_node.attrib[\"nfx\"])\n cp.dx = float(geometry_node.attrib[\"dx\"])\n cp.dy = float(geometry_node.attrib[\"dy\"])\n cp.dpx = float(geometry_node.attrib[\"dpx\"])\n cp.dpy = float(geometry_node.attrib[\"dpy\"])\n\n intrinsic_node = calib_xroot.find(\"Intrinsic\")\n cc.f = float(intrinsic_node.attrib[\"focal\"])\n cc.kappa1 = float(intrinsic_node.attrib[\"kappa1\"]) # 1st order radial distortion\n\n cp.Cx = float(intrinsic_node.attrib[\"cx\"])\n cp.Cy = float(intrinsic_node.attrib[\"cy\"])\n cp.sx = float(intrinsic_node.attrib[\"sx\"])\n\n extrinsic_node = calib_xroot.find(\"Extrinsic\")\n cc.Tx = float(extrinsic_node.attrib[\"tx\"])\n cc.Ty = float(extrinsic_node.attrib[\"ty\"])\n cc.Tz = float(extrinsic_node.attrib[\"tz\"])\n cc.Rx = float(extrinsic_node.attrib[\"rx\"])\n cc.Ry = float(extrinsic_node.attrib[\"ry\"])\n cc.Rz = float(extrinsic_node.attrib[\"rz\"])\n\n cc.calc_rr() # Calculate Rotation Matrix\n\n loaded_data = [] # frame_id, agent_id, pos_x, pos_y, xc, yc, h, w\n for frame_node in annot_xroot:\n objectlist_node = frame_node.find(\"objectlist\") # .text\n object_nodes = objectlist_node.findall(\"object\")\n frame_id = int(frame_node.attrib.get(\"number\"))\n\n for obj_node in object_nodes:\n agent_id = obj_node.attrib[\"id\"]\n\n box_node = obj_node.find(\"box\")\n xc = float(box_node.attrib[\"xc\"])\n yc = float(box_node.attrib[\"yc\"])\n h = float(box_node.attrib[\"h\"])\n w = float(box_node.attrib[\"w\"])\n\n x_ground = xc\n y_ground = yc + h/2\n\n if cp:\n pos_x, pos_y = image_coord_to_world_coord(x_ground, y_ground, 0, cp, cc)\n else:\n pos_x, pos_y = np.nan, np.nan\n\n loaded_data.append([frame_id, agent_id, pos_x / 1000., pos_y / 1000., xc, yc, h, w])\n\n data_columns = [\"frame_id\", \"agent_id\", \"pos_x\", \"pos_y\",\n \"xc\", \"yc\", \"h\", \"w\"]\n raw_dataset = pd.DataFrame(np.array(loaded_data), columns=data_columns)\n\n traj_dataset.title = kwargs.get('title', \"PETS\")\n\n # copy columns\n traj_dataset.data[[\"frame_id\", \"agent_id\",\n \"pos_x\", \"pos_y\"]] = \\\n raw_dataset[[\"frame_id\", \"agent_id\",\n \"pos_x\", \"pos_y\"]]\n traj_dataset.data[\"scene_id\"] = kwargs.get('scene_id', 0)\n traj_dataset.data[\"label\"] = \"pedestrian\"\n\n # post-process\n fps = kwargs.get('fps', 7)\n sampling_rate = kwargs.get('sampling_rate', 1)\n use_kalman = kwargs.get('use_kalman', False)\n traj_dataset.postprocess(fps=fps, sampling_rate=sampling_rate, use_kalman=use_kalman)\n\n return traj_dataset\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
t-brink/pyiron | [
"c07552b54a39e3f036ba395325cd4b372af0f794",
"c07552b54a39e3f036ba395325cd4b372af0f794",
"c07552b54a39e3f036ba395325cd4b372af0f794",
"c07552b54a39e3f036ba395325cd4b372af0f794",
"c07552b54a39e3f036ba395325cd4b372af0f794"
] | [
"pyiron/vasp/potential.py",
"tests/sphinx/test_structure.py",
"tests/atomistics/master/test_murnaghan_master_modal.py",
"pyiron/atomistics/thermodynamics/thermo_bulk.py",
"pyiron/interactive/sxextoptint.py"
] | [
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport os\nimport posixpath\n\nimport numpy as np\nimport pandas\nimport tables\nimport warnings\nfrom pyiron_base import GenericParameters, Settings\nfrom pyiron.atomistics.job.potentials import PotentialAbstract, find_potential_file_base\n\n__author__ = \"Jan Janssen\"\n__copyright__ = (\n \"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - \"\n \"Computational Materials Design (CM) Department\"\n)\n__version__ = \"1.0\"\n__maintainer__ = \"Jan Janssen\"\n__email__ = \"[email protected]\"\n__status__ = \"development\"\n__date__ = \"Sep 1, 2017\"\n\ns = Settings()\n\n\nclass VaspPotentialAbstract(PotentialAbstract):\n \"\"\"\n\n Args:\n potential_df:\n default_df:\n selected_atoms:\n \"\"\"\n\n def __init__(self, potential_df=None, default_df=None, selected_atoms=None):\n if potential_df is None:\n potential_df = self._get_potential_df(\n plugin_name=\"vasp\",\n file_name_lst={\"potentials_vasp.csv\"},\n backward_compatibility_name=\"vasppotentials\",\n )\n super(VaspPotentialAbstract, self).__init__(\n potential_df=potential_df,\n default_df=default_df,\n selected_atoms=selected_atoms,\n )\n\n def default(self):\n if self._default_df is not None:\n return pandas.concat(\n [\n self._potential_df[\n (\n self._potential_df[\"Name\"]\n == self._default_df.loc[atom].values[0]\n )\n ]\n for atom in self._selected_atoms\n ]\n )\n return None\n\n def find_default(self, element):\n if isinstance(element, set):\n element = element\n elif isinstance(element, list):\n element = set(element)\n elif isinstance(element, str):\n element = set([element])\n else:\n raise TypeError(\"Only, str, list and set supported!\")\n element_lst = list(element)\n if self._default_df is not None:\n merged_lst = list(set(self._selected_atoms + element_lst))\n return pandas.concat(\n [\n self._potential_df[\n (\n self._potential_df[\"Name\"]\n == self._default_df.loc[atom].values[0]\n )\n ]\n for atom in merged_lst\n ]\n )\n return None\n\n def find(self, element):\n if isinstance(element, set):\n element = element\n elif isinstance(element, list):\n element = set(element)\n elif isinstance(element, str):\n element = set([element])\n else:\n raise TypeError(\"Only, str, list and set supported!\")\n element_lst = list(element)\n merged_lst = list(set(self._selected_atoms + element_lst))\n return pandas.concat(\n [super(VaspPotentialAbstract, self).find({atom}) for atom in merged_lst]\n )\n\n def list(self):\n if len(self._selected_atoms) != 0:\n return pandas.concat(\n [\n super(VaspPotentialAbstract, self).find({atom})\n for atom in self._selected_atoms\n ]\n )\n else:\n return pandas.DataFrame({})\n\n def list_potential_names(self):\n df = self.list()\n if len(df) != 0:\n return list(self.list()[\"Name\"])\n else:\n return []\n\n @staticmethod\n def _return_potential_file(file_name):\n for resource_path in s.resource_paths:\n resource_path_potcar = os.path.join(\n resource_path, \"vasp\", \"potentials\", file_name\n )\n if os.path.exists(resource_path_potcar):\n return resource_path_potcar\n return None\n\n def __dir__(self):\n return [val.replace(\"-\", \"_\") for val in self.list_potential_names()]\n\n def __getitem__(self, item):\n item_replace = item.replace(\"_gga_pbe\", \"-gga-pbe\").replace(\"_lda\", \"-lda\")\n if item_replace in self.list_potential_names():\n df = self.list()\n return self._return_potential_file(\n file_name=list(df[df[\"Name\"] == item_replace][\"Filename\"])[0][0]\n )\n selected_atoms = self._selected_atoms + [item]\n return VaspPotentialAbstract(\n potential_df=self._potential_df,\n default_df=self._default_df,\n selected_atoms=selected_atoms,\n )\n\n\nclass VaspPotentialFile(VaspPotentialAbstract):\n \"\"\"\n The Potential class is derived from the PotentialAbstract class, but instead of loading the potentials from a list,\n the potentials are loaded from a file.\n\n Args:\n xc (str): Exchange correlation functional ['PBE', 'LDA']\n \"\"\"\n\n def __init__(self, xc=None, selected_atoms=None):\n potential_df = self._get_potential_df(\n plugin_name=\"vasp\",\n file_name_lst={\"potentials_vasp.csv\"},\n backward_compatibility_name=\"vasppotentials\",\n )\n if xc == \"PBE\":\n default_df = self._get_potential_default_df(\n plugin_name=\"vasp\",\n file_name_lst={\"potentials_vasp_pbe_default.csv\"},\n backward_compatibility_name=\"defaultvasppbe\",\n )\n potential_df = potential_df[(potential_df[\"Model\"] == \"gga-pbe\")]\n elif xc == \"GGA\":\n default_df = self._get_potential_default_df(\n plugin_name=\"vasp\",\n file_name_lst={\"potentials_vasp_pbe_default.csv\"},\n backward_compatibility_name=\"defaultvasppbe\",\n )\n potential_df = potential_df[(potential_df[\"Model\"] == \"gga-pbe\")]\n elif xc == \"LDA\":\n default_df = self._get_potential_default_df(\n plugin_name=\"vasp\",\n file_name_lst={\"potentials_vasp_lda_default.csv\"},\n backward_compatibility_name=\"defaultvasplda\",\n )\n potential_df = potential_df[(potential_df[\"Model\"] == \"lda\")]\n else:\n raise ValueError(\n 'The exchange correlation functional has to be set and it can either be \"LDA\" or \"PBE\"'\n )\n super(VaspPotentialFile, self).__init__(\n potential_df=potential_df,\n default_df=default_df,\n selected_atoms=selected_atoms,\n )\n\n def add_new_element(self, parent_element, new_element):\n \"\"\"\n Adding a new user defined element with a different POTCAR file. It is assumed that the file exists\n\n Args:\n parent_element (str): Parent element\n new_element (str): Name of the new element (the name of the folder where the new POTCAR file exists\n\n \"\"\"\n ds = self.find_default(element=parent_element)\n ds[\"Species\"].values[0][0] = new_element\n path_list = ds[\"Filename\"].values[0][0].split(\"/\")\n path_list[-2] = new_element\n name_list = ds[\"Name\"].values[0].split(\"-\")\n name_list[0] = new_element\n ds[\"Name\"].values[0] = \"-\".join(name_list)\n ds[\"Filename\"].values[0][0] = \"/\".join(path_list)\n self._potential_df = self._potential_df.append(ds)\n if new_element not in self._default_df.index.values:\n ds = pandas.Series()\n ds.name = new_element\n ds[\"Name\"] = \"-\".join(name_list)\n self._default_df = self._default_df.append(ds)\n else:\n self._default_df.loc[new_element] = \"-\".join(name_list)\n\n\nclass VaspPotential(object):\n \"\"\"\n The Potential class is derived from the PotentialAbstract class, but instead of loading the potentials from a list,\n the potentials are loaded from a file.\n\n Args:\n path (str): path to the potential list\n \"\"\"\n\n def __init__(self, selected_atoms=None):\n self.pbe = VaspPotentialFile(xc=\"PBE\", selected_atoms=selected_atoms)\n self.lda = VaspPotentialFile(xc=\"LDA\", selected_atoms=selected_atoms)\n\n\nclass VaspPotentialSetter(object):\n def __init__(self, element_lst):\n super(VaspPotentialSetter, self).__setattr__(\"_element_lst\", element_lst)\n super(VaspPotentialSetter, self).__setattr__(\n \"_potential_dict\", {el: None for el in element_lst}\n )\n\n def __getattr__(self, item):\n if item in self._element_lst:\n return item\n else:\n raise AttributeError\n\n def __setitem__(self, key, value):\n self.__setattr__(key=key, value=value)\n\n def __setattr__(self, key, value):\n if key in self._element_lst:\n self._potential_dict[key] = value\n else:\n raise AttributeError\n\n def to_dict(self):\n return self._potential_dict\n\n def __repr__(self):\n return self._potential_dict.__repr__()\n\n\ndef find_potential_file(path):\n return find_potential_file_base(\n path=path,\n resource_path_lst=s.resource_paths,\n rel_path=os.path.join(\"vasp\", \"potentials\")\n )\n\n\ndef get_enmax_among_species(symbol_lst, return_list=False, xc=\"PBE\"):\n \"\"\"\n DEPRECATED: Please use `get_enmax_among_potentials`.\n\n Given a list of species symbols, finds the largest applicable encut.\n\n Args:\n symbol_lst (list): The list of species symbols.\n return_list (bool): Whether to return the list of all ENMAX values (in the same order as `species_lst` along with\n the largest value). (Default is False.)\n xc (\"GGA\"/\"PBE\"/\"LDA\"): The exchange correlation functional for which the POTCARs were generated. (Default is \"PBE\".)\n\n Returns:\n (float): The largest ENMAX among the POTCAR files for all the species.\n [optional](list): The ENMAX value corresponding to each species.\n \"\"\"\n warnings.warn((\"get_enmax_among_species is deprecated as of v0.3.0. Please use get_enmax_among_potentials and note \"\n + \"the adjustment to the signature (*args instead of list)\"), DeprecationWarning)\n return get_enmax_among_potentials(*symbol_lst, return_list=return_list, xc=xc)\n\n\ndef get_enmax_among_potentials(*names, return_list=False, xc=\"PBE\"):\n \"\"\"\n Given potential names without XC information or elemental symbols, look over all the corresponding POTCAR files and\n find the largest ENMAX value.\n\n e.g. `get_enmax_among_potentials('Mg', 'Al_GW', 'Ca_pv', 'Ca_sv', xc='LDA')`\n\n Args:\n *names (str): Names of potentials or elemental symbols\n return_list (bool): Whether to return the list of all ENMAX values (in the same order as `names` as a second\n return value after providing the largest value). (Default is False.)\n xc (\"GGA\"/\"PBE\"/\"LDA\"): The exchange correlation functional for which the POTCARs were generated.\n (Default is \"PBE\".)\n\n Returns:\n (float): The largest ENMAX among the POTCAR files for all the requested names.\n [optional](list): The ENMAX value corresponding to each species.\n \"\"\"\n def _get_just_element_from_name(name):\n return name.split('_')[0]\n\n def _get_index_of_exact_match(name, potential_names):\n try:\n return np.argwhere([name == strip_xc_from_potential_name(pn) for pn in potential_names])[0, 0]\n except IndexError:\n raise ValueError(\"Couldn't find {} among potential names for {}\".format(name,\n _get_just_element_from_name(name)))\n\n def _get_potcar_filename(name, exch_corr):\n potcar_table = VaspPotentialFile(xc=exch_corr).find(_get_just_element_from_name(name))\n return potcar_table['Filename'].values[\n _get_index_of_exact_match(name, potcar_table['Name'].values)\n ][0]\n\n enmax_lst = []\n for n in names:\n with open(find_potential_file(path=_get_potcar_filename(n, xc))) as pf:\n for i, line in enumerate(pf):\n if i == 14:\n encut_str = line.split()[2][:-1]\n enmax_lst.append(float(encut_str))\n break\n\n if return_list:\n return max(enmax_lst), enmax_lst\n else:\n return max(enmax_lst)\n\n\ndef strip_xc_from_potential_name(name):\n return name.split('-')[0]\n\n\nclass Potcar(GenericParameters):\n pot_path_dict = {\"GGA\": \"paw-gga-pbe\", \"PBE\": \"paw-gga-pbe\", \"LDA\": \"paw-lda\"}\n\n def __init__(self, input_file_name=None, table_name=\"potcar\"):\n GenericParameters.__init__(\n self,\n input_file_name=input_file_name,\n table_name=table_name,\n val_only=False,\n comment_char=\"#\",\n )\n self._structure = None\n self.electrons_per_atom_lst = list()\n self.max_cutoff_lst = list()\n self.el_path_lst = list()\n self.el_path_dict = dict()\n self.modified_elements = dict()\n\n def potcar_set_structure(self, structure, modified_elements):\n self._structure = structure\n self._set_default_path_dict()\n self._set_potential_paths()\n self.modified_elements = modified_elements\n\n def modify(self, **modify):\n if \"xc\" in modify:\n xc_type = modify[\"xc\"]\n self._set_default_path_dict()\n if xc_type not in self.pot_path_dict:\n raise ValueError(\"xc type not implemented: \" + xc_type)\n GenericParameters.modify(self, **modify)\n if self._structure is not None:\n self._set_potential_paths()\n\n def _set_default_path_dict(self):\n if self._structure is None:\n return\n vasp_potentials = VaspPotentialFile(xc=self.get(\"xc\"))\n for i, el_obj in enumerate(self._structure.get_species_objects()):\n if isinstance(el_obj.Parent, str):\n el = el_obj.Parent\n else:\n el = el_obj.Abbreviation\n if isinstance(el_obj.tags, dict):\n if \"pseudo_potcar_file\" in el_obj.tags.keys():\n new_element = el_obj.tags[\"pseudo_potcar_file\"]\n vasp_potentials.add_new_element(\n parent_element=el, new_element=new_element\n )\n key = vasp_potentials.find_default(el).Species.values[0][0]\n val = vasp_potentials.find_default(el).Name.values[0]\n self[key] = val\n\n def _set_potential_paths(self):\n element_list = (\n self._structure.get_species_symbols()\n ) # .ElementList.getSpecies()\n object_list = self._structure.get_species_objects()\n s.logger.debug(\"element list: {0}\".format(element_list))\n self.el_path_lst = list()\n try:\n xc = self.get(\"xc\")\n except tables.exceptions.NoSuchNodeError:\n xc = self.get(\"xc\")\n s.logger.debug(\"XC: {0}\".format(xc))\n vasp_potentials = VaspPotentialFile(xc=xc)\n for i, el_obj in enumerate(object_list):\n if isinstance(el_obj.Parent, str):\n el = el_obj.Parent\n else:\n el = el_obj.Abbreviation\n if (\n isinstance(el_obj.tags, dict)\n and \"pseudo_potcar_file\" in el_obj.tags.keys()\n ):\n new_element = el_obj.tags[\"pseudo_potcar_file\"]\n vasp_potentials.add_new_element(\n parent_element=el, new_element=new_element\n )\n el_path = find_potential_file(\n path=vasp_potentials.find_default(new_element)[\"Filename\"].values[\n 0\n ][0]\n )\n if not (os.path.isfile(el_path)):\n raise ValueError(\"such a file does not exist in the pp directory\")\n elif el in self.modified_elements.keys():\n new_element = self.modified_elements[el]\n if os.path.isabs(new_element):\n el_path = new_element\n else:\n vasp_potentials.add_new_element(\n parent_element=el, new_element=new_element\n )\n el_path = find_potential_file(\n path=vasp_potentials.find_default(new_element)[\"Filename\"].values[\n 0\n ][0]\n )\n else:\n el_path = find_potential_file(\n path=vasp_potentials.find_default(el)[\"Filename\"].values[0][0]\n )\n\n if not (os.path.isfile(el_path)):\n raise AssertionError()\n pot_name = \"pot_\" + str(i)\n\n if pot_name in self._dataset[\"Parameter\"]:\n try:\n ind = self._dataset[\"Parameter\"].index(pot_name)\n except (ValueError, IndexError):\n indices = np.core.defchararray.find(\n self._dataset[\"Parameter\"], pot_name\n )\n ind = np.where(indices == 0)[0][0]\n self._dataset[\"Value\"][ind] = el_path\n self._dataset[\"Comment\"][ind] = \"\"\n else:\n self._dataset[\"Parameter\"].append(\"pot_\" + str(i))\n self._dataset[\"Value\"].append(el_path)\n self._dataset[\"Comment\"].append(\"\")\n self.el_path_lst.append(el_path)\n\n def write_file(self, file_name, cwd=None):\n \"\"\"\n Args:\n file_name:\n cwd:\n Returns:\n \"\"\"\n self.electrons_per_atom_lst = list()\n self.max_cutoff_lst = list()\n self._set_potential_paths()\n if cwd is not None:\n file_name = posixpath.join(cwd, file_name)\n f = open(file_name, \"w\")\n for el_file in self.el_path_lst:\n with open(el_file) as pot_file:\n for i, line in enumerate(pot_file):\n f.write(line)\n if i == 1:\n self.electrons_per_atom_lst.append(int(float(line)))\n elif i == 14:\n mystr = line.split()[2][:-1]\n self.max_cutoff_lst.append(float(mystr))\n f.close()\n\n def load_default(self):\n file_content = \"\"\"\\\nxc GGA # LDA, GGA\n\"\"\"\n self.load_string(file_content)",
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport posixpath\nimport unittest\nimport os\nimport numpy as np\nimport scipy.constants\nfrom pyiron.atomistics.structure.atoms import Atoms\nfrom pyiron.sphinx.structure import read_atoms\n\n__author__ = \"Sudarsan Surendralal\"\n__copyright__ = (\n \"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - \"\n \"Computational Materials Design (CM) Department\"\n)\n__version__ = \"1.0\"\n__maintainer__ = \"Sudarsan Surendralal\"\n__email__ = \"[email protected]\"\n__status__ = \"production\"\n__date__ = \"Feb 4, 2018\"\n\nBOHR_TO_ANGSTROM = (\n scipy.constants.physical_constants[\"Bohr radius\"][0] / scipy.constants.angstrom\n)\n\n\nclass TestSphinxStructure(unittest.TestCase):\n\n \"\"\"\n Testing routines in the sphinx/structure module.\n \"\"\"\n\n def setUp(self):\n self.file_location = os.path.dirname(os.path.abspath(__file__))\n structure_directory = \"../static/sphinx/sphinx_test_files\"\n file_list = [\"structure_1.sx\", \"structure_2.sx\"]\n self.file_list = [\n posixpath.join(self.file_location, structure_directory, f)\n for f in file_list\n ]\n atom_numbers = np.random.randint(low=1, high=99, size=(1, 3)).flatten()\n cell = 10.0 * np.eye(3)\n pos = 0.5 * np.ones((3, 3)) - 0.5 * np.eye(3)\n self.structure = Atoms(numbers=atom_numbers, cell=cell, positions=pos)\n self.assertIsInstance(self.structure, Atoms)\n self.structure.repeat([2, 2, 2])\n self.element_list = self.structure.get_chemical_elements()\n\n def test_read_atoms(self):\n for i, f in enumerate(self.file_list):\n atoms = read_atoms(filename=f)\n self.assertIsInstance(atoms, Atoms)\n if i == 0:\n self.assertEqual(atoms.get_chemical_formula(), \"Mg5\")\n self.assertTrue(\n np.allclose(\n atoms.cell / BOHR_TO_ANGSTROM,\n [\n [18.0936435257, 0.0, 0.0],\n [-12.0624290171, 20.8927399203, 0.0],\n [0.0, 0.0, 39.1932378013],\n ],\n )\n )\n if i == 1:\n self.assertEqual(atoms.get_chemical_formula(), \"C2Mg5\")\n self.assertTrue(\n np.allclose(\n atoms.cell / BOHR_TO_ANGSTROM,\n [\n [18.09364353, 0.00000000, 0.00000000],\n [-6.03121451, 20.89273992, -0.00000000],\n [0.00000000, 0.00000000, 39.19323780],\n ],\n )\n )\n\n def tearDown(self):\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport os\nfrom pyiron.atomistics.structure.atoms import CrystalStructure\nfrom pyiron_base import Project\nimport unittest\n\n\ndef convergence_goal(self, **qwargs):\n import numpy as np\n\n eps = 0.2\n if \"eps\" in qwargs:\n eps = qwargs[\"eps\"]\n erg_lst = self.get_from_childs(\"output/generic/energy\")\n var = 1000 * np.var(erg_lst)\n # print(var / len(erg_lst))\n if var / len(erg_lst) < eps:\n return True\n ham_prev = self[-1]\n job_name = self.first_child_name() + \"_\" + str(len(self))\n ham_next = ham_prev.restart(job_name=job_name)\n return ham_next\n\n\nclass TestMurnaghan(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.file_location = os.path.dirname(os.path.abspath(__file__))\n cls.project = Project(\n os.path.join(cls.file_location, \"testing_murnaghan_master_modal\")\n )\n cls.basis = CrystalStructure(\n element=\"Fe\", bravais_basis=\"bcc\", lattice_constant=2.8\n )\n cls.project.remove_jobs_silently(recursive=True)\n # cls.project.remove_jobs(recursive=True)\n # self.project.set_logging_level('INFO')\n\n @classmethod\n def tearDownClass(cls):\n file_location = os.path.dirname(os.path.abspath(__file__))\n project = Project(os.path.join(file_location, \"testing_murnaghan_master_modal\"))\n project.remove_jobs_silently(recursive=True)\n project.remove(enable=True, enforce=True)\n\n def test_run(self):\n # Even though the test is completed successful\n ham = self.project.create_job(\n self.project.job_type.AtomisticExampleJob, \"job_test\"\n )\n ham.structure = self.basis\n ham.server.run_mode.non_modal = True\n murn = self.project.create_job(\"Murnaghan\", \"murnaghan\")\n murn.ref_job = ham\n murn.input[\"num_points\"] = 3\n murn.server.run_mode.non_modal = True\n murn.run()\n self.assertFalse(ham.status.finished)\n self.project.wait_for_job(murn, interval_in_s=5, max_iterations=50)\n self.assertTrue(murn.status.finished)\n murn.remove()\n ham.remove()\n self.project.remove(enable=True, enforce=True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nfrom __future__ import print_function\n\nfrom copy import copy\nimport numpy as np\n\n__author__ = \"Joerg Neugebauer, Jan Janssen\"\n__copyright__ = (\n \"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - \"\n \"Computational Materials Design (CM) Department\"\n)\n__version__ = \"1.0\"\n__maintainer__ = \"Jan Janssen\"\n__email__ = \"[email protected]\"\n__status__ = \"development\"\n__date__ = \"Sep 1, 2017\"\n\n\nclass ThermoBulk(object):\n \"\"\"\n Class should provide all tools to compute bulk thermodynamic quantities. Central quantity is the Free Energy F(V,T).\n ToDo: Make it a (light weight) pyiron object (introduce a new tool rather than job object).\n\n Args:\n project:\n name:\n\n \"\"\"\n\n eV_to_J_per_mol = 1.60217662e-19 * 6.022e23\n kB = 1 / 8.6173303e-5\n\n def __init__(self, project=None, name=None):\n # only for compatibility with pyiron objects\n self._project = project\n self._name = name\n\n self._volumes = None\n self._temperatures = None\n self._energies = None\n self._entropy = None\n self._pressure = None\n self._num_atoms = None\n\n self._fit_order = 3\n\n def copy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n cls = self.__class__\n result = cls.__new__(cls)\n result.__init__()\n result.__dict__[\"_volumes\"] = copy(self._volumes)\n result.__dict__[\"_temperatures\"] = copy(self._temperatures)\n result.__dict__[\"_energies\"] = copy(self._energies)\n result.__dict__[\"_fit_order\"] = self._fit_order\n return result\n\n def _reset_energy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n if self._volumes is not None:\n if self._temperatures is not None:\n self._energies = np.zeros((len(self._temperatures), len(self._volumes)))\n # self.energies = 0\n\n @property\n def num_atoms(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n if self._num_atoms is None:\n return 1 # normalize per cell if number of atoms unknown\n return self._num_atoms\n\n @num_atoms.setter\n def num_atoms(self, num):\n \"\"\"\n\n Args:\n num:\n\n Returns:\n\n \"\"\"\n self._num_atoms = num\n\n @property\n def _coeff(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return np.polyfit(self._volumes, self._energies.T, deg=self._fit_order)\n\n @property\n def temperatures(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self._temperatures\n\n @property\n def _d_temp(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.temperatures[1] - self.temperatures[0]\n\n @property\n def _d_vol(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.volumes[1] - self.volumes[0]\n\n @temperatures.setter\n def temperatures(self, temp_lst):\n \"\"\"\n\n Args:\n temp_lst:\n\n Returns:\n\n \"\"\"\n if not hasattr(temp_lst, \"__len__\"):\n raise ValueError(\"Requires list as input parameter\")\n len_temp = -1\n if self._temperatures is not None:\n len_temp = len(self._temperatures)\n self._temperatures = np.array(temp_lst)\n if len(temp_lst) != len_temp:\n self._reset_energy()\n\n @property\n def volumes(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self._volumes\n\n @volumes.setter\n def volumes(self, volume_lst):\n \"\"\"\n\n Args:\n volume_lst:\n\n Returns:\n\n \"\"\"\n if not hasattr(volume_lst, \"__len__\"):\n raise ValueError(\"Requires list as input parameter\")\n len_vol = -1\n if self._volumes is not None:\n len_vol = len(self._volumes)\n self._volumes = np.array(volume_lst)\n if len(volume_lst) != len_vol:\n self._reset_energy()\n\n @property\n def entropy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n if self._entropy is None:\n self._compute_thermo()\n return self._entropy\n\n @property\n def pressure(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n if self._pressure is None:\n self._compute_thermo()\n return self._pressure\n\n @property\n def energies(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self._energies\n\n @energies.setter\n def energies(self, erg_lst):\n \"\"\"\n\n Args:\n erg_lst:\n\n Returns:\n\n \"\"\"\n if np.ndim(erg_lst) == 2:\n self._energies = erg_lst\n elif np.ndim(erg_lst) == 1:\n if len(erg_lst) == len(self.volumes):\n self._energies = np.tile(erg_lst, (len(self.temperatures), 1))\n else:\n raise ValueError()\n else:\n self._energies = (\n np.ones((len(self.volumes), len(self.temperatures))) * erg_lst\n )\n\n def set_temperatures(\n self, temperature_min=0, temperature_max=1500, temperature_steps=50\n ):\n \"\"\"\n\n Args:\n temperature_min:\n temperature_max:\n temperature_steps:\n\n Returns:\n\n \"\"\"\n self.temperatures = np.linspace(\n temperature_min, temperature_max, temperature_steps\n )\n\n def set_volumes(self, volume_min, volume_max=None, volume_steps=10):\n \"\"\"\n\n Args:\n volume_min:\n volume_max:\n volume_steps:\n\n Returns:\n\n \"\"\"\n if volume_max is None:\n volume_max = 1.1 * volume_min\n self.volumes = np.linspace(volume_min, volume_max, volume_steps)\n\n def meshgrid(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return np.meshgrid(self.volumes, self.temperatures)\n\n def get_minimum_energy_path(self, pressure=None):\n \"\"\"\n\n Args:\n pressure:\n\n Returns:\n\n \"\"\"\n if pressure is not None:\n raise NotImplemented()\n v_min_lst = []\n for c in self._coeff.T:\n v_min = np.roots(np.polyder(c, 1))\n p_der2 = np.polyder(c, 2)\n p_val2 = np.polyval(p_der2, v_min)\n v_m_lst = v_min[p_val2 > 0]\n if len(v_m_lst) > 0:\n v_min_lst.append(v_m_lst[0])\n else:\n v_min_lst.append(np.nan)\n return np.array(v_min_lst)\n\n def get_free_energy(self, vol, pressure=None):\n \"\"\"\n\n Args:\n vol:\n pressure:\n\n Returns:\n\n \"\"\"\n if not pressure:\n return np.polyval(self._coeff, vol)\n else:\n raise NotImplementedError()\n\n def interpolate_volume(self, volumes, fit_order=None):\n \"\"\"\n\n Args:\n volumes:\n fit_order:\n\n Returns:\n\n \"\"\"\n if fit_order is not None:\n self._fit_order = fit_order\n new = self.copy()\n new.volumes = volumes\n new.energies = np.array([np.polyval(self._coeff, v) for v in volumes]).T\n return new\n\n def _compute_thermo(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n self._entropy, self._pressure = np.gradient(\n -self.energies, self._d_temp, self._d_vol\n )\n\n def get_free_energy_p(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n coeff = np.polyfit(self._volumes, self.energies.T, deg=self._fit_order)\n return np.polyval(coeff, self.get_minimum_energy_path())\n\n def get_entropy_p(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n s_coeff = np.polyfit(self._volumes, self.entropy.T, deg=self._fit_order)\n return np.polyval(s_coeff, self.get_minimum_energy_path())\n\n def get_entropy_v(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n eq_volume = self.volumes[0]\n s_coeff = np.polyfit(self.volumes, self.entropy.T, deg=self._fit_order)\n const_v = eq_volume * np.ones(len(s_coeff.T))\n return np.polyval(s_coeff, const_v)\n\n def plot_free_energy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n plt.plot(self.temperatures, self.get_free_energy_p() / self.num_atoms)\n plt.xlabel(\"Temperature [K]\")\n plt.ylabel(\"Free energy [eV]\")\n\n def plot_entropy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n plt.plot(\n self.temperatures,\n self.eV_to_J_per_mol / self.num_atoms * self.get_entropy_p(),\n label=\"S$_p$\",\n )\n plt.plot(\n self.temperatures,\n self.eV_to_J_per_mol / self.num_atoms * self.get_entropy_v(),\n label=\"S$_V$\",\n )\n plt.legend()\n plt.xlabel(\"Temperature [K]\")\n plt.ylabel(\"Entropy [J K$^{-1}$ mol-atoms$^{-1}$]\")\n\n def plot_heat_capacity(self, to_kB=True):\n \"\"\"\n\n Args:\n to_kB:\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n if to_kB:\n units = self.kB / self.num_atoms\n plt.ylabel(\"Heat capacity [kB]\")\n else:\n units = self.eV_to_J_per_mol\n plt.ylabel(\"Heat capacity [J K$^{-1}$ mol-atoms$^{-1}$]\")\n temps = self.temperatures[:-2]\n c_p = temps * np.gradient(self.get_entropy_p(), self._d_temp)[:-2]\n c_v = temps * np.gradient(self.get_entropy_v(), self._d_temp)[:-2]\n plt.plot(temps, units * c_p, label=\"c$_p$\")\n plt.plot(temps, units * c_v, label=\"c$_v$\")\n plt.legend(loc=\"lower right\")\n plt.xlabel(\"Temperature [K]\")\n\n def contour_pressure(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n x, y = self.meshgrid()\n p_coeff = np.polyfit(self.volumes, self.pressure.T, deg=self._fit_order)\n p_grid = np.array([np.polyval(p_coeff, v) for v in self._volumes]).T\n plt.contourf(x, y, p_grid)\n plt.plot(self.get_minimum_energy_path(), self.temperatures)\n plt.xlabel(\"Volume [$\\AA^3$]\")\n plt.ylabel(\"Temperature [K]\")\n\n def contour_entropy(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n s_coeff = np.polyfit(self.volumes, self.entropy.T, deg=self._fit_order)\n s_grid = np.array([np.polyval(s_coeff, v) for v in self.volumes]).T\n x, y = self.meshgrid()\n plt.contourf(x, y, s_grid)\n plt.plot(self.get_minimum_energy_path(), self.temperatures)\n plt.xlabel(\"Volume [$\\AA^3$]\")\n plt.ylabel(\"Temperature [K]\")\n\n def plot_contourf(self, ax=None, show_min_erg_path=False):\n \"\"\"\n\n Args:\n ax:\n show_min_erg_path:\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n x, y = self.meshgrid()\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n ax.contourf(x, y, self.energies)\n if show_min_erg_path:\n plt.plot(self.get_minimum_energy_path(), self.temperatures, \"w--\")\n plt.xlabel(\"Volume [$\\AA^3$]\")\n plt.ylabel(\"Temperature [K]\")\n return ax\n\n def plot_min_energy_path(self, *args, ax=None, **qwargs):\n \"\"\"\n\n Args:\n *args:\n ax:\n **qwargs:\n\n Returns:\n\n \"\"\"\n try:\n import pylab as plt\n except ImportError:\n import matplotlib.pyplot as plt\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n ax.xlabel(\"Volume [$\\AA^3$]\")\n ax.ylabel(\"Temperature [K]\")\n ax.plot(self.get_minimum_energy_path(), self.temperatures, *args, **qwargs)\n return ax\n",
"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport numpy as np\nimport subprocess\nimport os\nimport time\nimport posixpath\nimport warnings\nfrom pyiron_base import Settings, GenericParameters, Executable\nfrom pyiron.atomistics.job.interactivewrapper import (\n InteractiveWrapper,\n ReferenceJobOutput,\n)\nfrom pyiron.atomistics.job.interactive import InteractiveInterface\nfrom pyiron.sphinx.base import InputWriter\n\n__author__ = \"Jan Janssen, Osamu Waseda\"\n__copyright__ = (\n \"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - \"\n \"Computational Materials Design (CM) Department\"\n)\n__version__ = \"1.0\"\n__maintainer__ = \"Jan Janssen\"\n__email__ = \"[email protected]\"\n__status__ = \"development\"\n__date__ = \"Sep 1, 2018\"\n\n\ns = Settings()\n\n\nclass SxExtOpt(InteractiveInterface):\n def __init__(\n self,\n structure,\n working_directory=None,\n maxDist=5,\n ionic_steps=1000,\n ionic_energy=None,\n ionic_forces=None,\n ionic_energy_tolerance=1.0e-3,\n ionic_force_tolerance=1.0e-2,\n max_step_length=1.0e-1,\n soft_mode_damping=1,\n executable=None,\n ssa=False,\n ):\n if ionic_forces is not None:\n warnings.warn(\n (\n 'ionic_forces is deprecated as of vers. 0.3.0.' +\n 'It is not guaranteed to be in service in vers. 0.4.0.' +\n 'Use ionic_force_tolerance instead'\n ), DeprecationWarning\n )\n ionic_force_tolerance = ionic_forces\n if ionic_energy is not None:\n warnings.warn(\n (\n 'ionic_energy is deprecated as of vers. 0.3.0.' +\n 'It is not guaranteed to be in service in vers. 0.4.0.' +\n 'Use ionic_energy_tolerance instead'\n ), DeprecationWarning\n )\n ionic_energy_tolerance = ionic_energy\n super().__init__()\n self.__name__ = \"SxExtOpt\"\n if working_directory is None:\n warnings.warn(\"WARNING: working_directory not set; current folder is used\")\n working_directory = os.getcwd()\n self._interactive_library = None\n self._interactive_library_read = None\n self.working_directory = working_directory\n if executable is None:\n executable = Executable(\n path_binary_codes=s.resource_paths,\n codename=\"SxExtOptInteractive\",\n module=self.__module__.split(\".\")[1],\n overwrite_nt_flag=False,\n ).executable_path\n self._start_process(\n structure=structure,\n executable=executable,\n maxDist=maxDist,\n ionic_steps=ionic_steps,\n ionic_energy_tolerance=ionic_energy_tolerance,\n ionic_force_tolerance=ionic_force_tolerance,\n max_step_length=max_step_length,\n soft_mode_damping=soft_mode_damping,\n selective_dynamics=\"selective_dynamics\" in structure._tag_list.keys(),\n ssa=ssa,\n )\n self._cell = structure.cell\n if ssa:\n self._elements = structure.get_parent_symbols()\n else:\n magmom = structure.get_initial_magnetic_moments()\n magmom[magmom!=None] = np.round(magmom[magmom!=None], decimals=1)\n magmom = np.char.mod('%s', magmom)\n self._elements = np.char.add(structure.get_parent_symbols(), magmom)\n self._elements = np.char.replace(self._elements, '-', 'm')\n self._elements = np.char.replace(self._elements, '.', 'p')\n self._positions = structure.positions\n self._converged = False\n\n def _start_process(\n self,\n structure,\n executable,\n maxDist=5,\n ionic_steps=1000,\n ionic_energy_tolerance=1.0e-3,\n ionic_force_tolerance=1.0e-2,\n max_step_length=1.0e-1,\n soft_mode_damping=1,\n selective_dynamics=False,\n ssa=False,\n ):\n if selective_dynamics:\n input_writer_obj = InputWriter()\n input_writer_obj.structure = structure\n if ssa:\n input_writer_obj.structure.set_initial_magnetic_moments(len(structure)*[None])\n input_writer_obj.write_structure(\n file_name=\"structure.sx\",\n cwd=self.working_directory,\n structure_str=None,\n symmetry_enabled=True,\n keep_angstrom=True,\n )\n self._write_input(\n working_directory=self.working_directory,\n maxDist=maxDist,\n ionic_steps=ionic_steps,\n ionic_energy_tolerance=ionic_energy_tolerance,\n ionic_force_tolerance=ionic_force_tolerance,\n max_step_length=max_step_length,\n soft_mode_damping=soft_mode_damping,\n selective_dynamics=selective_dynamics,\n )\n\n shell = os.name == \"nt\"\n try:\n with open(\n posixpath.join(self.working_directory, \"out.txt\"), mode=\"w\"\n ) as f_out:\n with open(\n posixpath.join(self.working_directory, \"error.txt\"), mode=\"w\"\n ) as f_err:\n self._process = subprocess.Popen(\n [executable],\n cwd=self.working_directory,\n shell=shell,\n stdout=f_out,\n stderr=f_err,\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n raise ValueError(\"run_job.py crashed\")\n while not self._interactive_pipes_initialized(self.working_directory):\n time.sleep(1)\n self._interactive_initialize_interface()\n\n @staticmethod\n def _write_input(\n working_directory,\n maxDist=5,\n ionic_steps=1000,\n ionic_energy_tolerance=1.0e-3,\n ionic_force_tolerance=1.0e-2,\n max_step_length=1.0e-1,\n soft_mode_damping=1,\n selective_dynamics=False,\n ):\n with open(os.path.join(working_directory, \"optim.sx\"), \"w\") as f:\n content = (\n \"main { ricQN { ric { maxDist = %f; withAngles; } maxSteps = %i; dEnergy = %f; dF = %f; maxStepLength = %f; softModeDamping = %f;}}\"\n % (\n maxDist,\n ionic_steps,\n ionic_energy_tolerance,\n ionic_force_tolerance,\n max_step_length,\n soft_mode_damping,\n )\n )\n if selective_dynamics:\n content += \"structure { include <structure.sx>; }\"\n f.write(content)\n\n @staticmethod\n def _interactive_pipes_initialized(working_directory):\n return os.path.exists(\n os.path.join(working_directory, \"control\")\n ) and os.path.exists(os.path.join(working_directory, \"response\"))\n\n def _interactive_write_line(self, line):\n self._interactive_library.write(\"%s\\n\" % line)\n self._interactive_library.flush()\n\n def _interactive_initialize_interface(self):\n self._interactive_library_read = open(\n os.path.join(self.working_directory, \"control\"), \"r\"\n )\n self._interactive_library = open(\n os.path.join(self.working_directory, \"response\"), \"w\"\n )\n\n def interactive_close(self):\n if self.interactive_is_activated():\n self._interactive_library.close()\n self._interactive_library_read.close()\n self._delete_named_pipes(working_directory=self.working_directory)\n\n @staticmethod \n def _delete_named_pipes(working_directory):\n for file in [\"control\", \"response\"]:\n file_path = posixpath.join(working_directory, file)\n if os.path.exists(file_path):\n os.remove(file_path)\n\n def interactive_is_activated(self):\n if self._interactive_library is None:\n return False\n else:\n return True\n\n def _write_cell(self, cell):\n for c in cell:\n self._interactive_write_line(\"%.16f %.16f %.16f\" % (c[0], c[1], c[2]))\n\n def _write_number_of_atoms(self, count):\n self._interactive_write_line(\"%s\" % count)\n\n def _write_positions(self, positions, elements):\n for pos, el in zip(positions, elements):\n self._interactive_write_line(\n \"%.16f %.16f %.16f %s\" % (pos[0], pos[1], pos[2], str(el))\n )\n\n def _write_energy(self):\n self._interactive_write_line(\"0\")\n\n def _write_forces(self, forces):\n for f in forces:\n self._interactive_write_line(\"%.16f %.16f %.16f\" % (f[0], f[1], f[2]))\n\n def _read_positions(self, count):\n return [\n [float(c) for c in self._interactive_library_read.readline().split()]\n for _ in range(count)\n ]\n\n def set_forces(self, forces):\n line = self._interactive_library_read.readline().split()\n if len(line) == 0 or line[0] == \"end\":\n print(\"Ending calculation\")\n self._converged = True\n elif line[0] == \"get\":\n if line[1] == \"cell\":\n self._write_cell(cell=self._cell)\n elif line[1] == \"natoms\":\n self._write_number_of_atoms(count=len(self._positions))\n elif line[1] == \"positions\":\n self._write_positions(\n positions=self._positions, elements=self._elements\n )\n elif line[1] == \"energy\":\n self._write_energy()\n elif line[1] == \"forces\":\n self._write_forces(forces=forces)\n else:\n raise ValueError(\"Unknown command:\", line)\n self.set_forces(forces)\n elif line[0] == \"run\":\n self.set_forces(forces)\n elif line[0] == \"set\":\n if line[1] == \"positions\":\n self._positions = np.array(self._read_positions(count=len(forces)))\n else:\n raise ValueError(\"Unknown command:\", line)\n else:\n raise ValueError(\"Unknown command:\", line)\n\n def get_positions(self):\n return self._positions\n\n def end(self):\n while not self.converged:\n self.set_forces(np.zeros_like(self._positions))\n\n @property\n def converged(self):\n return self._converged\n\n def __del__(self):\n self.end()\n if self.interactive_is_activated():\n self.interactive_close()\n else:\n self._delete_named_pipes(working_directory=self.working_directory)\n\n\nclass SxExtOptInteractive(InteractiveWrapper):\n def __init__(self, project, job_name):\n super(SxExtOptInteractive, self).__init__(project, job_name)\n self.__name__ = \"SxExtOptInteractive\"\n self.__version__ = (\n None\n ) # Reset the version number to the executable is set automatically\n self._executable_activate()\n self.input = Input()\n self.output = SxExtOptOutput(job=self)\n self._interactive_interface = None\n self._interactive_number_of_steps = 0\n\n def set_input_to_read_only(self):\n \"\"\"\n This function enforces read-only mode for the input classes, but it has to be implement in the individual\n classes.\n \"\"\"\n super(SxExtOptInteractive, self).set_input_to_read_only()\n self.input.read_only = True\n\n def write_input(self):\n pass\n\n def run_static(self):\n \"\"\"\n The run if modal function is called by run to execute the simulation, while waiting for the output. For this we\n use subprocess.check_output()\n \"\"\"\n self._create_working_directory()\n self._interactive_interface = SxExtOpt(\n structure=self.ref_job.structure,\n working_directory=self.working_directory,\n maxDist=int(self.input[\"maxDist\"]),\n ionic_steps=int(self.input[\"ionic_steps\"]),\n ionic_energy_tolerance=float(self.input[\"ionic_energy_tolerance\"]),\n ionic_force_tolerance=float(self.input[\"ionic_force_tolerance\"]),\n max_step_length=float(self.input[\"max_step_length\"]),\n soft_mode_damping=float(self.input[\"soft_mode_damping\"]),\n executable=self.executable.executable_path,\n ssa=self.input['ssa'],\n )\n self.status.running = True\n self._logger.info(\"job status: %s\", self.status)\n new_positions = self.ref_job.structure.positions\n self.ref_job_initialize()\n while (\n self._interactive_number_of_steps < self.input[\"ionic_steps\"]\n and not self._interactive_interface.converged\n ):\n str_temp = self.ref_job.structure\n str_temp.positions = new_positions\n self.ref_job.structure = str_temp\n if self.ref_job.server.run_mode.interactive:\n self._logger.debug(\"SxExtOpt: step start!\")\n self.ref_job.run()\n self._logger.debug(\"SxExtOpt: step finished!\")\n else:\n self.ref_job.run(delete_existing_job=True)\n self._interactive_interface.set_forces(forces=self.get_forces())\n new_positions = self._interactive_interface.get_positions()\n self._interactive_number_of_steps += 1\n self.status.collect = True\n if self.ref_job.server.run_mode.interactive:\n self.ref_job.interactive_close()\n self._interactive_interface.interactive_close()\n self.run()\n\n def get_forces(self):\n ff = np.array(self.ref_job.output.forces[-1])\n if hasattr(self.ref_job.structure, \"selective_dynamics\"):\n ff[np.array(self.ref_job.structure.selective_dynamics) == False] = 0\n return ff\n return ff\n\n def convergence_check(self):\n \"\"\"\n Validate the convergence of the calculation.\n\n Returns:\n (bool): If the calculation is converged\n \"\"\"\n if self._interactive_number_of_steps < self.input[\"ionic_steps\"]:\n return True\n else:\n return False\n\n\nclass Input(GenericParameters):\n \"\"\"\n class to control the generic input for a Sphinx calculation.\n\n Args:\n input_file_name (str): name of the input file\n table_name (str): name of the GenericParameters table\n \"\"\"\n\n def __init__(self, input_file_name=None, table_name=\"input\"):\n super(Input, self).__init__(\n input_file_name=input_file_name,\n table_name=table_name,\n comment_char=\"//\",\n separator_char=\"=\",\n end_value_char=\";\",\n )\n\n def load_default(self):\n \"\"\"\n Loads the default file content\n \"\"\"\n file_content = (\n \"ionic_steps = 1000 // maximum number of ionic steps\\n\"\n \"ionic_energy_tolerance = 1.0e-3\\n\"\n \"ionic_force_tolerance = 1.0e-2\\n\"\n \"maxDist = 5 // maximum possible distance for considering neighbors\\n\"\n \"max_step_length = 1.0e-1 // maximum displacement at each step\\n\"\n \"ssa = False // ignore different magnetic moment values when internal symmetries are considered\\n\"\n \"soft_mode_damping = 1.0 // Tikhonov damper\\n\"\n )\n self.load_string(file_content)\n\n\nclass SxExtOptOutput(ReferenceJobOutput):\n def __init__(self, job):\n super(SxExtOptOutput, self).__init__(job=job)\n"
] | [
[
"pandas.concat",
"numpy.core.defchararray.find",
"pandas.Series",
"pandas.DataFrame",
"numpy.where"
],
[
"numpy.eye",
"numpy.ones",
"numpy.allclose",
"numpy.random.randint"
],
[
"numpy.var"
],
[
"numpy.polyder",
"matplotlib.pyplot.legend",
"numpy.polyfit",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"numpy.gradient",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"numpy.ndim",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.polyval",
"matplotlib.pyplot.ylabel"
],
[
"numpy.round",
"numpy.char.mod",
"numpy.char.replace",
"numpy.zeros_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MECLabTUDA/ACS | [
"bb418c5479a3585138c48c63112352f5cc8f64b1",
"bb418c5479a3585138c48c63112352f5cc8f64b1"
] | [
"mp/models/continual/model_utils.py",
"mp/data/datasets/ds_mr_hippocampus_harp.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mp.models.segmentation.unet_fepegar import UNet2D\n\n### UNet Wrapper ###\nclass UNet2D_dis(UNet2D):\n r\"\"\"Wrapper for UNet2D to access encoder and decoder seperately.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(UNet2D_dis, self).__init__(*args, **kwargs)\n\n def forward_enc(self, x):\n skip_connections, encoding = self.encoder(x)\n encoding = self.bottom_block(encoding)\n return skip_connections, encoding\n\n def forward_dec(self, skip_connections, encoding):\n x = self.decoder(skip_connections, encoding)\n if self.monte_carlo_layer is not None:\n x = self.monte_carlo_layer(x)\n return self.classifier(x)\n\n### MODULES ###\nclass EncoderStyle(nn.Module):\n r\"\"\"Style Encoder (VAE).\n \"\"\"\n def __init__(self, in_channels):\n super(EncoderStyle, self).__init__()\n\n layers = []\n layers += [ConvBlock(in_channels=in_channels, out_channels=256)]\n layers += [ConvPoolBlock(in_channels=256, out_channels=64, pooling=False)]\n layers += [ConvPoolBlock(in_channels=64, out_channels=128, pooling=True)]\n layers += [ConvPoolBlock(in_channels=128, out_channels=128, pooling=False)]\n layers += [ConvPoolBlock(in_channels=128, out_channels=192, pooling=True)]\n layers += [ConvPoolBlock(in_channels=192, out_channels=192, pooling=False)]\n layers += [ConvPoolBlock(in_channels=192, out_channels=256, pooling=True)]\n\n global_pool = [nn.LeakyReLU(), nn.AdaptiveMaxPool2d(output_size=(3,3))]\n self.global_pool = nn.Sequential(*global_pool)\n\n self.layers = nn.Sequential(*layers)\n\n self.dense_mu = nn.Linear(in_features=3*3*256, out_features=1)\n self.dense_var = nn.Linear(in_features=3*3*256, out_features=1)\n \n def forward(self, x):\n x = self.layers(x)\n x = self.global_pool(x)\n mu = self.dense_mu(x.view(x.shape[0], -1))\n log_var = self.dense_var(x.view(x.shape[0], -1))\n return [mu, log_var]\n\nclass LatentScaler(nn.Module):\n r\"\"\"Scales samples from style encoding to be injected into the generator.\n \"\"\"\n def __init__(self, in_features):\n super(LatentScaler, self).__init__()\n\n layers = [nn.Linear(in_features=in_features, out_features=500), nn.LeakyReLU()]\n layers += [nn.Linear(in_features=500, out_features=1024), nn.LeakyReLU()]\n\n for _ in range(0, 2):\n layers += [nn.Linear(in_features=1024, out_features=1024), nn.LeakyReLU()]\n\n layers += [nn.Linear(in_features=1024, out_features=2560), nn.Tanh()]\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layers(x).reshape(x.shape[0],10,-1) # 10 occurences a 256 filters\n return x\n\nclass Generator(nn.Module):\n r\"\"\"Generator using content encoding, scaled style encoding (see LatentScaler) and domain_code to generate images.\n \"\"\"\n def __init__(self, in_channels, out_channels, domain_code_size):\n super(Generator, self).__init__()\n\n layers_BCIN = [ResBlockBCIN(in_channels=in_channels, out_channels=in_channels, layer_id=0, stride=1, padding=1, domain_code_size=domain_code_size)]\n for i in range(0,4):\n layers_BCIN += [ResBlockBCIN(in_channels=in_channels, out_channels=in_channels, layer_id=i+1, stride=1, padding=1, domain_code_size=domain_code_size)]\n\n layers = [nn.ConvTranspose2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]\n layers += [nn.ConvTranspose2d(in_channels=in_channels, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]\n layers += [nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]\n layers += [nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]\n layers += [nn.ConvTranspose2d(in_channels=64, out_channels=out_channels, kernel_size=7, stride=1, padding=3), nn.Sigmoid()]\n \n\n self.layers_BCIN = MultiInSequential(*layers_BCIN)\n self.layers = nn.Sequential(*layers)\n\n def forward(self, content, latent_scale, domain_code):\n content, latent_scale, domain_code = self.layers_BCIN(content, latent_scale, domain_code)\n x = self.layers(content)\n return x\n\nclass DiscriminatorDomain(nn.Module):\n r\"\"\"Domain Discriminator.\n \"\"\"\n def __init__(self, in_channels, domain_code_size, max_channels=512, kernel_size=4, stride=2):\n super(DiscriminatorDomain, self).__init__()\n\n layers = [ConvBlockBCIN(in_channels=in_channels, out_channels=64, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]\n layers += [ConvBlockBCIN(in_channels=64, out_channels=128, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]\n layers += [ConvBlockBCIN(in_channels=128, out_channels=max_channels//2, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]\n layers += [ConvBlockBCIN(in_channels=max_channels//2, out_channels=max_channels, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]\n layers += [ConvBlockBCIN(in_channels=max_channels, out_channels=1, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size, normalization='None')]\n self.layers = MultiInSequential(*layers)\n\n self.linear = nn.Linear(in_features=7**2, out_features=1)\n self.activation = nn.Sigmoid()\n \n def forward(self, x, domain_code):\n x, domain_code = self.layers(x, domain_code)\n x = x.view(x.shape[0],-1)\n x = self.linear(x)\n return x\n\nclass DiscriminatorContent(nn.Module):\n r\"\"\"Unet-style Content Discriminator.\n \"\"\"\n def __init__(self, in_channels, domain_code_size, max_channels=512, kernel_size=3, stride=2):\n super(DiscriminatorContent, self).__init__()\n\n self.in_channels = 16\n self.in_channels_max = 128\n self.out_channels = 32\n self.out_channels_max = 256\n padding = 1\n\n self.conv_0 = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels*2**1, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm_0 = nn.BatchNorm2d(self.in_channels*2**1)\n self.activation_0 = nn.ReLU()\n self.conv_1 = nn.Conv2d(in_channels=self.in_channels*2**1, out_channels=self.in_channels*2**2, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm_1 = nn.BatchNorm2d(self.in_channels*2**2)\n self.activation_1 = nn.ReLU()\n self.conv_2 = nn.Conv2d(in_channels=self.in_channels*2**2, out_channels=self.in_channels*2**3, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm_2 = nn.BatchNorm2d(self.in_channels*2**3)\n self.activation_2 = nn.ReLU()\n self.conv_3 = nn.Conv2d(in_channels=self.in_channels*2**3, out_channels=self.in_channels*2**4, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm_3 = nn.BatchNorm2d(self.in_channels*2**4)\n self.activation_3 = nn.ReLU()\n self.conv_4 = nn.Conv2d(in_channels=self.in_channels*2**4, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm_4 = nn.BatchNorm2d(1)\n self.activation_4 = nn.ReLU()\n \n self.dense = nn.Linear(in_features = 8**2, out_features=domain_code_size)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, skip_connections, content_x):\n out = self.conv_0(skip_connections[0])\n out = self.norm_0(out)\n out = self.activation_0(out)\n out = self.conv_1(skip_connections[1] + out)\n out = self.norm_1(out)\n out = self.activation_1(out)\n out = self.conv_2(skip_connections[2] + out)\n out = self.norm_2(out)\n out = self.activation_2(out)\n out = self.conv_3(skip_connections[3] + out)\n out = self.norm_3(out)\n out = self.activation_3(out)\n out = self.conv_4(content_x + out)\n out = self.norm_4(out)\n out = self.activation_4(out)\n out = self.dense(out.reshape(content_x.shape[0], -1))\n out = self.softmax(out)\n return out\n \n def center_crop(self, skip_connection, x):\n skip_shape = torch.tensor(skip_connection.shape)\n x_shape = torch.tensor(x.shape)\n crop = skip_shape[2:] - x_shape[2:]\n half_crop = crop // 2\n # If skip_connection is 10, 20, 30 and x is (6, 14, 12)\n # Then pad will be (-2, -2, -3, -3, -9, -9)\n pad = -torch.stack((half_crop, half_crop)).t().flatten()\n skip_connection = F.pad(skip_connection, pad.tolist())\n return skip_connection\n\n### BUILDING BLOCKS ###\nclass ConvBlock(nn.Module):\n r\"\"\"Convolutional Block with normalization and activation.\n \"\"\"\n def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.LeakyReLU, normalization='Instance'):\n super(ConvBlock, self).__init__() \n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n \n self.normalization = normalization\n if self.normalization == 'Instance':\n self.norm = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable\n if self.normalization =='BatchNorm':\n self.norm = nn.BatchNorm2d(num_features=out_channels)\n \n self.activation = activation()\n\n def forward(self,x):\n x = self.conv(x)\n if self.normalization in ['Instance', 'BatchNorm']:\n x = self.norm(x)\n x = self.activation(x)\n return x\n\nclass ConvPoolBlock(nn.Module):\n r\"\"\"Convolutional Block with normalization, activation and pooling.\n \"\"\"\n def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, pooling=True, activation=nn.LeakyReLU):\n super(ConvPoolBlock, self).__init__()\n\n self.pooling = pooling\n\n self.norm= nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable\n self.activation = activation()\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.pool = nn.AvgPool2d(kernel_size=kernel_size)\n\n def forward(self, x):\n x = self.norm(x)\n x = self.activation(x)\n x = self.conv(x)\n\n if self.pooling:\n x = self.pool(x)\n return x\n\nclass ConvBlockBCIN(nn.Module):\n r\"\"\"Convolutional Block with BCIN normalization and activation.\n \"\"\"\n def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.LeakyReLU, domain_code_size=10, normalization='BCIN'):\n super(ConvBlockBCIN, self).__init__() \n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.norm = BCIN(out_channels, domain_code_size) # not learnable\n self.activation = activation()\n\n self.normalization = normalization\n\n def forward(self, x, domain_code):\n x = self.conv(x)\n if self.normalization == 'BCIN': \n x = self.norm(x, domain_code)\n x = self.activation(x)\n return x, domain_code\n\nclass ResBlockIN(nn.Module):\n r\"\"\"Residual Block consisting of two convolutions with skip connection, instance normalization and activation.\n \"\"\"\n def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.ReLU):\n super(ResBlockIN, self).__init__()\n self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.conv1 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n\n self.norm0 = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable\n self.norm1 = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable\n self.activation = activation()\n \n def forward(self, x):\n x_in = x\n x = self.conv0(x)\n x = self.norm0(x)\n x = self.activation(x)\n x = self.conv1(x)\n x = self.norm1(x)\n x += self.center_crop(x_in, x)\n return x\n\n def center_crop(self, skip_connection, x):\n skip_shape = torch.tensor(skip_connection.shape)\n x_shape = torch.tensor(x.shape)\n crop = skip_shape[2:] - x_shape[2:]\n half_crop = crop // 2\n # If skip_connection is 10, 20, 30 and x is (6, 14, 12)\n # Then pad will be (-2, -2, -3, -3, -9, -9)\n pad = -torch.stack((half_crop, half_crop)).t().flatten()\n skip_connection = F.pad(skip_connection, pad.tolist())\n return skip_connection\n\nclass ResBlockBCIN(nn.Module):\n r\"\"\"Residual Block consisting of two convolutions with skip connection, BCIN normalization and activation.\n \"\"\"\n def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.ReLU, domain_code_size=10, layer_id=0):\n super(ResBlockBCIN, self).__init__()\n self.conv0 = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.conv1 = nn.ConvTranspose2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n\n self.norm0 = BCIN(num_features=out_channels, domain_code_size=domain_code_size, affine=True) # learnable\n self.norm1 = BCIN(num_features=out_channels, domain_code_size=domain_code_size, affine=True) # learnable\n self.activation = activation()\n\n self.layer_id = layer_id\n\n def forward(self, x, latent_scale, domain_code):\n \n x_in = x\n x = self.conv0(x)\n x = torch.mul(x, latent_scale[:,self.layer_id*2,:][:,:,None,None])\n x = self.norm0(x, domain_code)\n \n x = self.activation(x)\n\n x = self.conv1(x)\n x = torch.mul(x, latent_scale[:,self.layer_id*2+1,:][:,:,None,None])\n x = self.norm1(x, domain_code)\n\n x += self.center_crop(x_in, x)\n\n return x, latent_scale, domain_code\n\n def center_crop(self, skip_connection, x):\n skip_shape = torch.tensor(skip_connection.shape)\n x_shape = torch.tensor(x.shape)\n crop = skip_shape[2:] - x_shape[2:]\n half_crop = crop // 2\n # If skip_connection is 10, 20, 30 and x is (6, 14, 12)\n # Then pad will be (-2, -2, -3, -3, -9, -9)\n pad = -torch.stack((half_crop, half_crop)).t().flatten()\n skip_connection = F.pad(skip_connection, pad.tolist())\n return skip_connection\n\n### NORMALIZATION ###\nclass BCIN(nn.Module):\n r\"\"\"Central Biasing Instance Normalization\n https://arxiv.org/abs/1806.10050\n \"\"\"\n def __init__(self, num_features, domain_code_size, affine=True, instance_norm=False, batch_norm=False):\n super(BCIN, self).__init__()\n self.W = nn.Parameter(torch.rand(domain_code_size), requires_grad=affine)\n self.b = nn.Parameter(torch.rand(1), requires_grad=affine)\n self.activation = nn.Tanh()\n\n self.instance_norm = instance_norm\n if self.instance_norm:\n print('Using instance_norm instead of BCIN')\n self.i_norm = torch.nn.InstanceNorm2d(num_features=num_features)\n\n self.batch_norm = batch_norm\n if self.instance_norm:\n print('Using batch_norm instead of BCIN')\n self.b_norm = torch.nn.BatchNorm2d(num_features=num_features)\n\n def forward(self, x, domain_code):\n x_var = torch.sqrt(torch.var(x, (1,2,3))) # instance std\n x_mean = torch.mean(x, (1,2,3)) # instance mean\n bias = torch.matmul(domain_code, self.W) * self.b\n bias_scaled = self.activation(bias)\n\n\n if self.instance_norm:\n return self.i_norm(x)\n if self.batch_norm:\n return self.b_norm(x)\n\n return ((x-x_mean[:,None,None,None]) / x_var[:,None,None,None]) + bias_scaled[:,None,None,None]\n\n### HELPER MODULES ###\nclass MultiInSequential(nn.Sequential):\n r\"\"\"Sequential class that allows multiple inputs for forward function\n \"\"\"\n def forward(self, *input):\n for module in self._modules.values():\n input = module(*input)\n return input\n",
"# ------------------------------------------------------------------------------\n# Hippocampus segmentation task for the HarP dataset\n# (http://www.hippocampal-protocol.net/SOPs/index.php)\n# ------------------------------------------------------------------------------\n\nimport os\nimport re\n\nimport SimpleITK as sitk\nimport nibabel as nib\nimport numpy as np\n\nimport mp.data.datasets.dataset_utils as du\nfrom mp.data.datasets.dataset_segmentation import SegmentationDataset, SegmentationInstance\nfrom mp.paths import storage_data_path\nfrom mp.utils.load_restore import join_path\n\n\nclass HarP(SegmentationDataset):\n r\"\"\"Class for the segmentation of the HarP dataset,\n found at http://www.hippocampal-protocol.net/SOPs/index.php\n with the masks as .nii files and the scans as .mnc files.\n \"\"\"\n\n def __init__(self, subset=None, hold_out_ixs=None, merge_labels=True):\n # Part is either: \"Training\", \"Validation\" or \"All\"\n default = {\"Part\": \"All\"}\n if subset is not None:\n default.update(subset)\n subset = default\n else:\n subset = default\n\n if hold_out_ixs is None:\n hold_out_ixs = []\n\n global_name = 'HarP'\n name = du.get_dataset_name(global_name, subset)\n dataset_path = os.path.join(storage_data_path, global_name)\n original_data_path = du.get_original_data_path(global_name)\n\n # Build instances\n instances = []\n folders = []\n if subset[\"Part\"] in [\"Training\", \"All\"]:\n folders.append((\"100\", \"Training\"))\n if subset[\"Part\"] in [\"Validation\", \"All\"]:\n folders.append((\"35\", \"Validation\"))\n\n for orig_folder, dst_folder in folders:\n # Paths with the sub-folder for the current subset\n dst_folder_path = os.path.join(dataset_path, dst_folder)\n\n # Copy the images if not done already\n if not os.path.isdir(dst_folder_path):\n _extract_images(original_data_path, dst_folder_path, orig_folder)\n\n # Fetch all patient/study names\n study_names = set(file_name.split('.nii')[0].split('_gt')[0] for file_name\n in os.listdir(os.path.join(dataset_path, dst_folder)))\n\n for study_name in study_names:\n instances.append(SegmentationInstance(\n x_path=os.path.join(dataset_path, dst_folder, study_name + '.nii.gz'),\n y_path=os.path.join(dataset_path, dst_folder, study_name + '_gt.nii.gz'),\n name=study_name,\n group_id=None\n ))\n\n label_names = ['background', 'hippocampus']\n\n super().__init__(instances, name=name, label_names=label_names,\n modality='T1w MRI', nr_channels=1, hold_out_ixs=hold_out_ixs)\n\n\ndef _extract_images(source_path, target_path, subset):\n r\"\"\"Extracts images, merges mask labels (if specified) and saves the\n modified images.\n \"\"\"\n\n def bbox_3D(img):\n r = np.any(img, axis=(1, 2))\n c = np.any(img, axis=(0, 2))\n z = np.any(img, axis=(0, 1))\n\n rmin, rmax = np.where(r)[0][[0, -1]]\n cmin, cmax = np.where(c)[0][[0, -1]]\n zmin, zmax = np.where(z)[0][[0, -1]]\n\n return rmin, rmax, cmin, cmax, zmin, zmax\n\n # Folder 100 is for training (100 subjects), 35 subjects are left over for validation\n affine = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n images_path = os.path.join(source_path, subset)\n labels_path = os.path.join(source_path, f'Labels_{subset}_NIFTI')\n\n # Create directories\n os.makedirs(os.path.join(target_path))\n\n # For each MRI, there are 2 segmentation (left and right hippocampus)\n for filename in os.listdir(images_path):\n # Loading the .mnc file and converting it to a .nii.gz file\n minc = nib.load(os.path.join(images_path, filename))\n x = nib.Nifti1Image(minc.get_data(), affine=affine)\n\n # We need to recover the study name of the image name to construct the name of the segmentation files\n match = re.match(r\"ADNI_[0-9]+_S_[0-9]+_[0-9]+\", filename)\n if match is None:\n raise Exception(f\"A file ({filename}) does not match the expected file naming format\")\n\n # For each side of the brain\n for side in [\"_L\", \"_R\"]:\n study_name = match[0] + side\n\n y = sitk.ReadImage(os.path.join(labels_path, study_name + \".nii\"))\n y = sitk.GetArrayFromImage(y)\n\n # Shape expected: (189, 233, 197)\n # Average label shape (Training): (27.1, 36.7, 22.0)\n # Average label shape (Validation): (27.7, 35.2, 21.8)\n assert x.shape == y.shape\n # Disclaimer: next part is ugly and not many checks are made\n # BUGFIX: Some segmentation have some weird values eg {26896.988, 26897.988} instead of {0, 1}\n y = (y - np.min(y.flat)).astype(np.uint32)\n\n # So we first compute the bounding box\n rmin, rmax, cmin, cmax, zmin, zmax = bbox_3D(y)\n\n # Compute the start idx for each dim\n dr = (rmax - rmin) // 4\n dc = (cmax - cmin) // 4\n dz = (zmax - zmin) // 4\n\n # Reshaping\n y = y[rmin - dr: rmax + dr,\n cmin - dc: cmax + dc,\n zmin - dz: zmax + dz]\n\n x_cropped = x.get_data()[rmin - dr: rmax + dr,\n cmin - dc: cmax + dc,\n zmin - dz: zmax + dz]\n\n # Save new images so they can be loaded directly\n sitk.WriteImage(sitk.GetImageFromArray(y),\n join_path([target_path, study_name + \"_gt.nii.gz\"]))\n sitk.WriteImage(sitk.GetImageFromArray(x_cropped),\n join_path([target_path, study_name + \".nii.gz\"]))\n"
] | [
[
"torch.nn.Softmax",
"torch.mean",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.Sigmoid",
"torch.tensor",
"torch.mul",
"torch.rand",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.Tanh",
"torch.matmul",
"torch.var",
"torch.nn.ReLU"
],
[
"numpy.array",
"numpy.where",
"numpy.any",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hashi0203/deep-video-mvs | [
"b3943a9249d522dca3e6cd603e427f611cc7bad5",
"fa14288f149c5af7b2a49092f729f5c4f44517ba",
"fa14288f149c5af7b2a49092f729f5c4f44517ba",
"fa14288f149c5af7b2a49092f729f5c4f44517ba"
] | [
"dataset/7scenes-export/7scenes-export-color.py",
"dvmvs/pairnet/run-testing.py",
"dvmvs/pairnet/model.py",
"dvmvs/baselines/deltas/triangulation.py"
] | [
"import os\nimport shutil\nfrom multiprocessing.pool import Pool\n\nimport cv2\nimport numpy as np\nfrom functools import partial\nfrom path import Path\n\n\ndef process_scene(input_directory, output_folder):\n K = np.array([[525.0, 0.0, 320.0],\n [0.0, 525.0, 240.0],\n [0.0, 0.0, 1.0]])\n print(\"processing\", input_directory)\n image_filenames = sorted(input_directory.files(\"*color.png\"))\n pose_filenames = sorted(input_directory.files(\"*pose.txt\"))\n\n poses = []\n for pose_filename in pose_filenames:\n pose = np.loadtxt(pose_filename)\n poses.append(pose)\n\n scene = input_directory.split(\"/\")[-2]\n seq = input_directory.split(\"/\")[-1]\n current_output_dir = output_folder / scene + \"-\" + seq\n if os.path.isdir(current_output_dir):\n if os.path.exists(\"{}/poses.txt\".format(current_output_dir)) and os.path.exists(\"{}/K.txt\".format(current_output_dir)):\n return scene\n else:\n shutil.rmtree(current_output_dir)\n\n os.mkdir(current_output_dir)\n os.mkdir(os.path.join(current_output_dir, \"images\"))\n\n output_poses = []\n for current_index in range(len(image_filenames)):\n image = cv2.imread(image_filenames[current_index])\n\n output_poses.append(poses[current_index].ravel().tolist())\n\n cv2.imwrite(\"{}/images/{}.png\".format(current_output_dir, str(current_index).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])\n\n output_poses = np.array(output_poses)\n np.savetxt(\"{}/poses.txt\".format(current_output_dir), output_poses)\n np.savetxt(\"{}/K.txt\".format(current_output_dir), K)\n\n return scene\n\n\ndef main():\n input_folder = Path(\"/home/share/dataset/7scenes\")\n output_folder = Path(\"/home/nhsmt1123/master-thesis/deep-video-mvs/data/7scenes\")\n\n input_directories = [\n input_folder / \"redkitchen/seq-01\",\n input_folder / \"redkitchen/seq-07\",\n input_folder / \"chess/seq-01\",\n input_folder / \"chess/seq-02\",\n input_folder / \"heads/seq-02\",\n input_folder / \"fire/seq-01\",\n input_folder / \"fire/seq-02\",\n input_folder / \"office/seq-01\",\n input_folder / \"office/seq-03\",\n input_folder / \"pumpkin/seq-03\",\n input_folder / \"pumpkin/seq-06\",\n input_folder / \"stairs/seq-02\",\n input_folder / \"stairs/seq-06\", # train\n input_folder / \"redkitchen/seq-03\",\n input_folder / \"chess/seq-03\",\n input_folder / \"heads/seq-01\",\n input_folder / \"fire/seq-03\",\n input_folder / \"fire/seq-04\",\n input_folder / \"office/seq-02\",\n input_folder / \"pumpkin/seq-01\",\n input_folder / \"stairs/seq-01\"] # test\n\n pool = Pool(6)\n for finished_scene in pool.imap_unordered(partial(process_scene, output_folder=output_folder), input_directories):\n print(\"finished\", finished_scene)\n\n pool.join()\n pool.close()\n\n\nif __name__ == '__main__':\n main()\n",
"import cv2\nimport numpy as np\nimport torch\nfrom path import Path\nfrom tqdm import tqdm\n\nfrom dvmvs.config import Config\nfrom dvmvs.dataset_loader import PreprocessImage, load_image\nfrom dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder\nfrom dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation\n\n\ndef predict():\n print(\"System: PAIRNET\")\n\n device = torch.device(\"cuda\")\n feature_extractor = FeatureExtractor()\n feature_shrinker = FeatureShrinker()\n cost_volume_encoder = CostVolumeEncoder()\n cost_volume_decoder = CostVolumeDecoder()\n\n feature_extractor = feature_extractor.to(device)\n feature_shrinker = feature_shrinker.to(device)\n cost_volume_encoder = cost_volume_encoder.to(device)\n cost_volume_decoder = cost_volume_decoder.to(device)\n\n model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]\n\n for i in range(len(model)):\n try:\n checkpoint = sorted(Path(\"weights\").files())[i]\n weights = torch.load(checkpoint)\n model[i].load_state_dict(weights)\n model[i].eval()\n print(\"Loaded weights for\", checkpoint)\n except Exception as e:\n print(e)\n print(\"Could not find the checkpoint for module\", i)\n exit(1)\n\n feature_extractor = model[0]\n feature_shrinker = model[1]\n cost_volume_encoder = model[2]\n cost_volume_decoder = model[3]\n\n warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),\n height=int(Config.test_image_height / 2),\n device=device)\n\n scale_rgb = 255.0\n mean_rgb = [0.485, 0.456, 0.406]\n std_rgb = [0.229, 0.224, 0.225]\n\n min_depth = 0.25\n max_depth = 20.0\n n_depth_levels = 64\n\n data_path = Path(Config.test_offline_data_path)\n if Config.test_dataset_name is None:\n keyframe_index_files = sorted((Path(Config.test_offline_data_path) / \"indices\").files())\n else:\n keyframe_index_files = sorted((Path(Config.test_offline_data_path) / \"indices\").files(\"*\" + Config.test_dataset_name + \"*\"))\n for iteration, keyframe_index_file in enumerate(keyframe_index_files):\n keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split(\"/\")[-1].split(\"+\")\n\n scene_folder = data_path / dataset_name / scene_name\n print(\"Predicting for scene:\", dataset_name + \"-\" + scene_name, \" - \", iteration, \"/\", len(keyframe_index_files))\n\n keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter=\"\\n\")\n\n K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)\n poses = np.fromfile(scene_folder / \"poses.txt\", dtype=float, sep=\"\\n \").reshape((-1, 4, 4))\n image_filenames = sorted((scene_folder / 'images').files(\"*.png\"))\n depth_filenames = sorted((scene_folder / 'depth').files(\"*.png\"))\n\n input_filenames = []\n for image_filename in image_filenames:\n input_filenames.append(image_filename.split(\"/\")[-1])\n\n inference_timer = InferenceTimer()\n\n predictions = []\n reference_depths = []\n with torch.no_grad():\n for i in tqdm(range(0, len(keyframe_index_file_lines))):\n\n keyframe_index_file_line = keyframe_index_file_lines[i]\n\n if keyframe_index_file_line == \"TRACKING LOST\":\n continue\n else:\n current_input_filenames = keyframe_index_file_line.split(\" \")\n current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]\n\n reference_index = current_indices[0]\n measurement_indices = current_indices[1:]\n\n reference_pose = poses[reference_index]\n reference_image = load_image(image_filenames[reference_index])\n reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0\n\n preprocessor = PreprocessImage(K=K,\n old_width=reference_image.shape[1],\n old_height=reference_image.shape[0],\n new_width=Config.test_image_width,\n new_height=Config.test_image_height,\n distortion_crop=Config.test_distortion_crop,\n perform_crop=Config.test_perform_crop)\n\n reference_image = preprocessor.apply_rgb(image=reference_image,\n scale_rgb=scale_rgb,\n mean_rgb=mean_rgb,\n std_rgb=std_rgb)\n reference_depth = preprocessor.apply_depth(reference_depth)\n reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)\n reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)\n\n measurement_poses_torch = []\n measurement_images_torch = []\n for measurement_index in measurement_indices:\n measurement_image = load_image(image_filenames[measurement_index])\n measurement_image = preprocessor.apply_rgb(image=measurement_image,\n scale_rgb=scale_rgb,\n mean_rgb=mean_rgb,\n std_rgb=std_rgb)\n measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)\n measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)\n measurement_images_torch.append(measurement_image_torch)\n measurement_poses_torch.append(measurement_pose_torch)\n\n full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)\n\n half_K_torch = full_K_torch.clone().cuda()\n half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0\n\n inference_timer.record_start_time()\n\n measurement_feature_halfs = []\n for measurement_image_torch in measurement_images_torch:\n measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))\n measurement_feature_halfs.append(measurement_feature_half)\n\n reference_feature_half, reference_feature_quarter, \\\n reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))\n\n cost_volume = cost_volume_fusion(image1=reference_feature_half,\n image2s=measurement_feature_halfs,\n pose1=reference_pose_torch,\n pose2s=measurement_poses_torch,\n K=half_K_torch,\n warp_grid=warp_grid,\n min_depth=min_depth,\n max_depth=max_depth,\n n_depth_levels=n_depth_levels,\n device=device,\n dot_product=True)\n\n skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,\n features_quarter=reference_feature_quarter,\n features_one_eight=reference_feature_one_eight,\n features_one_sixteen=reference_feature_one_sixteen,\n cost_volume=cost_volume)\n\n prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)\n\n inference_timer.record_end_time_and_elapsed_time()\n\n prediction = prediction.cpu().numpy().squeeze()\n reference_depths.append(reference_depth)\n predictions.append(prediction)\n\n if Config.test_visualize:\n visualize_predictions(numpy_reference_image=reference_image,\n numpy_measurement_image=measurement_image,\n numpy_predicted_depth=prediction,\n normalization_mean=mean_rgb,\n normalization_std=std_rgb,\n normalization_scale=scale_rgb)\n\n inference_timer.print_statistics()\n\n system_name = \"{}_{}_{}_{}_{}_dvmvs_pairnet\".format(keyframing_type,\n dataset_name,\n Config.test_image_width,\n Config.test_image_height,\n n_measurement_frames)\n\n save_results(predictions=predictions,\n groundtruths=reference_depths,\n system_name=system_name,\n scene_name=scene_name,\n save_folder=Config.test_result_folder)\n\n\nif __name__ == '__main__':\n predict()\n",
"from collections import OrderedDict\n\nimport torch\nfrom torchvision import models\nfrom torchvision.ops import FeaturePyramidNetwork\n\nfrom dvmvs.config import Config\nfrom dvmvs.layers import conv_layer, depth_layer_3x3\n\nfpn_output_channels = 32\nhyper_channels = 32\n\n\nclass StandardLayer(torch.nn.Module):\n def __init__(self, channels, kernel_size, apply_bn_relu):\n super(StandardLayer, self).__init__()\n self.conv1 = conv_layer(input_channels=channels,\n output_channels=channels,\n stride=1,\n kernel_size=kernel_size,\n apply_bn_relu=True)\n self.conv2 = conv_layer(input_channels=channels,\n output_channels=channels,\n stride=1,\n kernel_size=kernel_size,\n apply_bn_relu=apply_bn_relu)\n\n def forward(self, inp):\n x = self.conv1(inp)\n x = self.conv2(x)\n return x\n\n\nclass DownconvolutionLayer(torch.nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size):\n super(DownconvolutionLayer, self).__init__()\n self.down_conv = conv_layer(input_channels=input_channels,\n output_channels=output_channels,\n stride=2,\n kernel_size=kernel_size,\n apply_bn_relu=True)\n\n def forward(self, inp):\n x = self.down_conv(inp)\n return x\n\n\nclass UpconvolutionLayer(torch.nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size):\n super(UpconvolutionLayer, self).__init__()\n self.conv = conv_layer(input_channels=input_channels,\n output_channels=output_channels,\n stride=1,\n kernel_size=kernel_size,\n apply_bn_relu=True)\n\n def forward(self, inp):\n x = torch.nn.functional.interpolate(input=inp, scale_factor=2, mode='bilinear', align_corners=True)\n x = self.conv(x)\n return x\n\n\nclass EncoderBlock(torch.nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size):\n super(EncoderBlock, self).__init__()\n self.down_convolution = DownconvolutionLayer(input_channels=input_channels,\n output_channels=output_channels,\n kernel_size=kernel_size)\n\n self.standard_convolution = StandardLayer(channels=output_channels,\n kernel_size=kernel_size,\n apply_bn_relu=True)\n\n def forward(self, inp):\n x = self.down_convolution(inp)\n x = self.standard_convolution(x)\n return x\n\n\nclass DecoderBlock(torch.nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size, apply_bn_relu, plus_one):\n super(DecoderBlock, self).__init__()\n # Upsample the inpput coming from previous layer\n self.up_convolution = UpconvolutionLayer(input_channels=input_channels,\n output_channels=output_channels,\n kernel_size=kernel_size)\n\n if plus_one:\n next_input_channels = input_channels + 1\n else:\n next_input_channels = input_channels\n\n # Aggregate skip and upsampled input\n self.convolution1 = conv_layer(input_channels=next_input_channels,\n output_channels=output_channels,\n kernel_size=kernel_size,\n stride=1,\n apply_bn_relu=True)\n\n # Learn from aggregation\n self.convolution2 = conv_layer(input_channels=output_channels,\n output_channels=output_channels,\n kernel_size=kernel_size,\n stride=1,\n apply_bn_relu=apply_bn_relu)\n\n def forward(self, inp, skip, depth):\n inp = self.up_convolution(inp)\n\n if depth is None:\n x = torch.cat([inp, skip], dim=1)\n else:\n depth = torch.nn.functional.interpolate(depth, scale_factor=2, mode='bilinear', align_corners=True)\n x = torch.cat([inp, skip, depth], dim=1)\n\n x = self.convolution1(x)\n x = self.convolution2(x)\n return x\n\n\nclass FeatureExtractor(torch.nn.Module):\n def __init__(self):\n super(FeatureExtractor, self).__init__()\n backbone_mobile_layers = list(models.mnasnet1_0(pretrained=True).layers.children())\n\n self.layer1 = torch.nn.Sequential(*backbone_mobile_layers[0:8])\n self.layer2 = torch.nn.Sequential(*backbone_mobile_layers[8:9])\n self.layer3 = torch.nn.Sequential(*backbone_mobile_layers[9:10])\n self.layer4 = torch.nn.Sequential(*backbone_mobile_layers[10:12])\n self.layer5 = torch.nn.Sequential(*backbone_mobile_layers[12:14])\n\n def forward(self, image):\n layer1 = self.layer1(image)\n layer2 = self.layer2(layer1)\n layer3 = self.layer3(layer2)\n layer4 = self.layer4(layer3)\n layer5 = self.layer5(layer4)\n\n return layer1, layer2, layer3, layer4, layer5\n\n\nclass FeatureShrinker(torch.nn.Module):\n def __init__(self):\n super(FeatureShrinker, self).__init__()\n self.fpn = FeaturePyramidNetwork(in_channels_list=[16, 24, 40, 96, 320],\n out_channels=fpn_output_channels,\n extra_blocks=None)\n\n def forward(self, layer1, layer2, layer3, layer4, layer5):\n fpn_input = OrderedDict()\n fpn_input['layer1'] = layer1\n fpn_input['layer2'] = layer2\n fpn_input['layer3'] = layer3\n fpn_input['layer4'] = layer4\n fpn_input['layer5'] = layer5\n fpn_output = self.fpn(fpn_input)\n\n features_half = fpn_output['layer1']\n features_quarter = fpn_output['layer2']\n features_one_eight = fpn_output['layer3']\n features_one_sixteen = fpn_output['layer4']\n\n return features_half, features_quarter, features_one_eight, features_one_sixteen\n\n\nclass CostVolumeEncoder(torch.nn.Module):\n def __init__(self):\n super(CostVolumeEncoder, self).__init__()\n self.aggregator0 = conv_layer(input_channels=Config.train_n_depth_levels + fpn_output_channels,\n output_channels=hyper_channels,\n kernel_size=5,\n stride=1,\n apply_bn_relu=True)\n self.encoder_block0 = EncoderBlock(input_channels=hyper_channels,\n output_channels=hyper_channels * 2,\n kernel_size=5)\n ###\n self.aggregator1 = conv_layer(input_channels=hyper_channels * 2 + fpn_output_channels,\n output_channels=hyper_channels * 2,\n kernel_size=3,\n stride=1,\n apply_bn_relu=True)\n self.encoder_block1 = EncoderBlock(input_channels=hyper_channels * 2,\n output_channels=hyper_channels * 4,\n kernel_size=3)\n ###\n self.aggregator2 = conv_layer(input_channels=hyper_channels * 4 + fpn_output_channels,\n output_channels=hyper_channels * 4,\n kernel_size=3,\n stride=1,\n apply_bn_relu=True)\n self.encoder_block2 = EncoderBlock(input_channels=hyper_channels * 4,\n output_channels=hyper_channels * 8,\n kernel_size=3)\n\n ###\n self.aggregator3 = conv_layer(input_channels=hyper_channels * 8 + fpn_output_channels,\n output_channels=hyper_channels * 8,\n kernel_size=3,\n stride=1,\n apply_bn_relu=True)\n self.encoder_block3 = EncoderBlock(input_channels=hyper_channels * 8,\n output_channels=hyper_channels * 16,\n kernel_size=3)\n\n def forward(self, features_half, features_quarter, features_one_eight, features_one_sixteen, cost_volume):\n inp0 = torch.cat([features_half, cost_volume], dim=1)\n inp0 = self.aggregator0(inp0)\n out0 = self.encoder_block0(inp0)\n\n inp1 = torch.cat([features_quarter, out0], dim=1)\n inp1 = self.aggregator1(inp1)\n out1 = self.encoder_block1(inp1)\n\n inp2 = torch.cat([features_one_eight, out1], dim=1)\n inp2 = self.aggregator2(inp2)\n out2 = self.encoder_block2(inp2)\n\n inp3 = torch.cat([features_one_sixteen, out2], dim=1)\n inp3 = self.aggregator3(inp3)\n out3 = self.encoder_block3(inp3)\n\n return inp0, inp1, inp2, inp3, out3\n\n\nclass CostVolumeDecoder(torch.nn.Module):\n def __init__(self):\n super(CostVolumeDecoder, self).__init__()\n\n self.inverse_depth_base = 1 / Config.train_max_depth\n self.inverse_depth_multiplier = 1 / Config.train_min_depth - 1 / Config.train_max_depth\n\n self.decoder_block1 = DecoderBlock(input_channels=hyper_channels * 16,\n output_channels=hyper_channels * 8,\n kernel_size=3,\n apply_bn_relu=True,\n plus_one=False)\n\n self.decoder_block2 = DecoderBlock(input_channels=hyper_channels * 8,\n output_channels=hyper_channels * 4,\n kernel_size=3,\n apply_bn_relu=True,\n plus_one=True)\n\n self.decoder_block3 = DecoderBlock(input_channels=hyper_channels * 4,\n output_channels=hyper_channels * 2,\n kernel_size=3,\n apply_bn_relu=True,\n plus_one=True)\n\n self.decoder_block4 = DecoderBlock(input_channels=hyper_channels * 2,\n output_channels=hyper_channels,\n kernel_size=5,\n apply_bn_relu=True,\n plus_one=True)\n\n self.refine = torch.nn.Sequential(conv_layer(input_channels=hyper_channels + 4,\n output_channels=hyper_channels,\n kernel_size=5,\n stride=1,\n apply_bn_relu=True),\n conv_layer(input_channels=hyper_channels,\n output_channels=hyper_channels,\n kernel_size=5,\n stride=1,\n apply_bn_relu=True))\n\n self.depth_layer_one_sixteen = depth_layer_3x3(hyper_channels * 8)\n self.depth_layer_one_eight = depth_layer_3x3(hyper_channels * 4)\n self.depth_layer_quarter = depth_layer_3x3(hyper_channels * 2)\n self.depth_layer_half = depth_layer_3x3(hyper_channels)\n self.depth_layer_full = depth_layer_3x3(hyper_channels)\n\n def forward(self, image, skip0, skip1, skip2, skip3, bottom):\n # work on cost volume\n decoder_block1 = self.decoder_block1(bottom, skip3, None)\n sigmoid_depth_one_sixteen = self.depth_layer_one_sixteen(decoder_block1)\n inverse_depth_one_sixteen = self.inverse_depth_multiplier * sigmoid_depth_one_sixteen + self.inverse_depth_base\n\n decoder_block2 = self.decoder_block2(decoder_block1, skip2, sigmoid_depth_one_sixteen)\n sigmoid_depth_one_eight = self.depth_layer_one_eight(decoder_block2)\n inverse_depth_one_eight = self.inverse_depth_multiplier * sigmoid_depth_one_eight + self.inverse_depth_base\n\n decoder_block3 = self.decoder_block3(decoder_block2, skip1, sigmoid_depth_one_eight)\n sigmoid_depth_quarter = self.depth_layer_quarter(decoder_block3)\n inverse_depth_quarter = self.inverse_depth_multiplier * sigmoid_depth_quarter + self.inverse_depth_base\n\n decoder_block4 = self.decoder_block4(decoder_block3, skip0, sigmoid_depth_quarter)\n sigmoid_depth_half = self.depth_layer_half(decoder_block4)\n inverse_depth_half = self.inverse_depth_multiplier * sigmoid_depth_half + self.inverse_depth_base\n\n scaled_depth = torch.nn.functional.interpolate(sigmoid_depth_half, scale_factor=2, mode='bilinear', align_corners=True)\n scaled_decoder = torch.nn.functional.interpolate(decoder_block4, scale_factor=2, mode='bilinear', align_corners=True)\n scaled_combined = torch.cat([scaled_decoder, scaled_depth, image], dim=1)\n scaled_combined = self.refine(scaled_combined)\n inverse_depth_full = self.inverse_depth_multiplier * self.depth_layer_full(scaled_combined) + self.inverse_depth_base\n\n depth_full = 1.0 / inverse_depth_full.squeeze(1)\n depth_half = 1.0 / inverse_depth_half.squeeze(1)\n depth_quarter = 1.0 / inverse_depth_quarter.squeeze(1)\n depth_one_eight = 1.0 / inverse_depth_one_eight.squeeze(1)\n depth_one_sixteen = 1.0 / inverse_depth_one_sixteen.squeeze(1)\n\n return depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen\n",
"import numpy as np\nimport torch\nfrom torch import svd\n\nfrom .base_model import BaseModel\n\n\ndef homogeneous_to_euclidean(points):\n \"\"\"Converts homogeneous points to euclidean\n\n Args:\n points numpy array or torch tensor of shape (N, M + 1): N homogeneous points of dimension M\n\n Returns:\n numpy array or torch tensor of shape (N, M): euclidean points\n \"\"\"\n if isinstance(points, np.ndarray):\n return (points.T[:-1] / points.T[-1]).T\n elif torch.is_tensor(points):\n return (points.transpose(1, 0)[:-1] / points.transpose(1, 0)[-1]).transpose(1, 0)\n else:\n raise TypeError(\"Works only with numpy arrays and PyTorch tensors.\")\n\n\ndef triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies, points, confidences=None):\n \"\"\"Similar as triangulate_point_from_multiple_views_linear() but for PyTorch.\n For more information see its documentation.\n Args:\n proj_matricies torch tensor of shape (N, 3, 4): sequence of projection matricies (3x4)\n points torch tensor of of shape (N, 2): sequence of points' coordinates\n confidences None or torch tensor of shape (N,): confidences of points [0.0, 1.0].\n If None, all confidences are supposed to be 1.0\n Returns:\n point_3d numpy torch tensor of shape (3,): triangulated point\n \"\"\"\n\n assert len(proj_matricies) == len(points)\n\n n_views = len(proj_matricies)\n\n if confidences is None:\n confidences = torch.ones(points.shape[1], n_views, dtype=torch.float32, device=points.device)\n\n ##multiple points\n points_t = points.transpose(0, 1)\n proj_mat = proj_matricies[:, 2:3].expand(n_views, 2, 4).unsqueeze(0)\n points_tview = points_t.view(points_t.size(0), n_views, 2, 1).expand(points_t.size(0), n_views, 2, 4)\n A_all = proj_mat * points_tview\n A_all -= proj_matricies[:, :2].unsqueeze(0)\n\n A_all *= confidences.view(confidences.size(0), n_views, 1, 1)\n\n A_all = A_all.contiguous().view(A_all.size(0), A_all.size(1) * A_all.size(2), 4)\n\n U, S, V = svd(A_all)\n\n points_3d_homo_all = -V[:, :, 3]\n points_3d = homogeneous_to_euclidean(points_3d_homo_all)\n\n return points_3d\n\n\ndef triangulate_batch_of_points(proj_matricies_batch, points_batch, confidences_batch=None):\n \"\"\"Triangulates for a batch of points\"\"\"\n batch_size, n_views = proj_matricies_batch.shape[:2]\n\n points_3d_batch = []\n for batch_i in range(batch_size):\n n_points = points_batch[batch_i].shape[1]\n points = points_batch[batch_i]\n confidences = confidences_batch[batch_i] if confidences_batch is not None else None\n points_3d = triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies_batch[batch_i], points, confidences=confidences)\n points_3d_batch.append(points_3d)\n\n return points_3d_batch\n\n\ndef integrate_tensor_2d(heatmaps, softmax=True): # ,temperature = 1.0):\n \"\"\"Applies softmax to heatmaps and integrates them to get their's \"center of masses\"\n Args:\n heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps\n Returns:\n coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps\n \"\"\"\n batch_size, n_heatmaps, h, w = heatmaps.shape\n\n heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))\n\n if softmax:\n heatmaps = torch.nn.functional.softmax(heatmaps, dim=2)\n else:\n heatmaps = torch.nn.functional.relu(heatmaps)\n\n heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))\n\n mass_x = heatmaps.sum(dim=2)\n mass_y = heatmaps.sum(dim=3)\n\n mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)\n mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)\n\n x = mass_times_coord_x.sum(dim=2, keepdim=True)\n y = mass_times_coord_y.sum(dim=2, keepdim=True)\n\n if not softmax:\n x = x / mass_x.sum(dim=2, keepdim=True)\n y = y / mass_y.sum(dim=2, keepdim=True)\n\n coordinates = torch.cat((x, y), dim=2)\n coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))\n\n return coordinates\n\n\ndef unproject_ij(keypoints_2d, z, camera_matrix):\n \"\"\"Unprojects points into 3D using intrinsics\"\"\"\n\n z = z.squeeze(2).squeeze(1)\n x = ((keypoints_2d[:, :, 0] - camera_matrix[:, [0], [2]]) / camera_matrix[:, [0], [0]]) * z\n y = ((keypoints_2d[:, :, 1] - camera_matrix[:, [1], [2]]) / camera_matrix[:, [1], [1]]) * z\n xyz = torch.cat((x.unsqueeze(1), y.unsqueeze(1), z.unsqueeze(1)), dim=1)\n return xyz\n\n\ndef reproject_points(pose, pts, intrinsic, Z):\n \"\"\"Projects 3d points onto 2D image plane\"\"\"\n\n kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)\n kp_arr[:, :, :2] = pts\n\n K = intrinsic.unsqueeze(1)\n R = pose[:, :, :3, :3]\n T = pose[:, :, :3, 3:]\n\n kp_arr = kp_arr.unsqueeze(1)\n reproj_val = ((K @ R) @ (torch.inverse(K))) @ kp_arr.transpose(3, 2)\n\n proj_z = K @ T / Z\n reproj = reproj_val + proj_z\n reproj = reproj / reproj[:, :, 2:, :]\n\n return reproj[:, :, :2, :]\n\n\ndef patch_for_kp(keypoints, ker_size, out_length, roi_patch):\n \"\"\"Creates patch for key-point\"\"\"\n\n keypts_array = keypoints.unsqueeze(1)\n n_view = roi_patch.shape[1]\n keypts_array = keypts_array.repeat(1, n_view, 1, 1)\n\n xc = keypts_array[:, :, :, 0]\n yc = keypts_array[:, :, :, 1]\n\n h = torch.ones((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device) * ker_size # 3 #kernel_size\n w = ker_size * roi_patch[:, :, :, 3] / out_length\n theta = torch.zeros((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device)\n\n keypoint_patch = torch.stack((xc, yc, h, w, theta), 3)\n return keypoint_patch\n\n\ndef match_corr(embed_ref, embed_srch):\n \"\"\" Matches the two embeddings using the correlation layer. As per usual\n it expects input tensors of the form [B, C, H, W].\n Args:\n embed_ref: (torch.Tensor) The embedding of the reference image, or\n the template of reference (the average of many embeddings for\n example).\n embed_srch: (torch.Tensor) The embedding of the search image.\n Returns:\n match_map: (torch.Tensor) The correlation between\n \"\"\"\n\n _, _, k1, k2 = embed_ref.shape\n b, c, h, w = embed_srch.shape\n\n if k1 == 1 and k2 == 1:\n pad_img = (0, 0)\n else:\n pad_img = (0, 1)\n match_map = torch.nn.functional.conv2d(embed_srch.contiguous().view(1, b * c, h, w), embed_ref, groups=b, padding=pad_img)\n\n match_map = match_map.permute(1, 0, 2, 3)\n\n return match_map\n\n\ndef create_transform_matrix(roi_patch):\n \"\"\"Creates a 3x3 transformation matrix for the patches\"\"\"\n transform_matrix = torch.zeros((roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 3, 3)).to(roi_patch.device)\n transform_matrix[:, :, :, 0, 0] = torch.cos(roi_patch[:, :, :, 4])\n transform_matrix[:, :, :, 0, 1] = -torch.sin(roi_patch[:, :, :, 4])\n transform_matrix[:, :, :, 0, 2] = roi_patch[:, :, :, 0]\n transform_matrix[:, :, :, 1, 0] = torch.sin(roi_patch[:, :, :, 4])\n transform_matrix[:, :, :, 1, 1] = torch.cos(roi_patch[:, :, :, 4])\n transform_matrix[:, :, :, 1, 2] = roi_patch[:, :, :, 1]\n transform_matrix[:, :, :, 2, 2] = 1.0\n\n return transform_matrix\n\n\ndef patch_sampler(roi_patch, out_length=640, distance=2, do_img=True, align_corners=False):\n \"\"\"Creates, scales and aligns the patch\"\"\"\n\n ##create a regular grid centered at xc,yc\n if out_length > 1:\n width_sample = torch.linspace(-0.5, 0.5, steps=out_length)\n else:\n width_sample = torch.tensor([0.])\n\n height_sample = torch.linspace(-distance, distance, steps=2 * distance + 1)\n xv, yv = torch.meshgrid([width_sample, height_sample])\n zv = torch.ones(xv.shape)\n patch_sample = torch.stack((xv, yv, zv), 2).to(roi_patch.device)\n\n arange_array = patch_sample.repeat(roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 1, 1, 1)\n\n ## scaling the x dimension to ensure unform sampling\n arange_array[:, :, :, :, :, 0] = (roi_patch[:, :, :, [3]].unsqueeze(4)) * arange_array[:, :, :, :, :, 0]\n aras = arange_array.shape\n arange_array = arange_array.contiguous().view(aras[0], aras[1], aras[2], aras[3] * aras[4], aras[5]).transpose(4, 3)\n\n # create matrix transform\n transform_matrix = create_transform_matrix(roi_patch)\n # transform\n patch_kp = transform_matrix @ arange_array\n\n patch_kp = patch_kp.view(aras[0], aras[1], aras[2], aras[5], aras[3], aras[4])\n patch_kp = patch_kp[:, :, :, :2, :, :].transpose(5, 3)\n return patch_kp, transform_matrix\n\n\ndef patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=2, min_depth=0.5, max_depth=10.0, align_corners=False):\n \"\"\"Represents search patch for a key-point using xc,yc, h,w, theta\"\"\"\n\n # get epilines\n n_view = pose.shape[1]\n pts = keypoints\n\n kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)\n kp_arr[:, :, :2] = pts\n kp_arr = kp_arr.unsqueeze(1)\n Fund, _ = get_fundamental_matrix(pose, intrinsic, intrinsic)\n lines_epi = (Fund @ (kp_arr.transpose(3, 2))).transpose(3, 2)\n\n # image shape\n height = img_shape[2]\n width = img_shape[3]\n\n # default intercepts\n array_zeros = torch.zeros((pts.shape[0], n_view, pts.shape[1])).to(pts.device)\n array_ones = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device)\n\n x2ord = array_zeros.clone().detach()\n y2ord = array_zeros.clone().detach()\n x3ord = array_zeros.clone().detach()\n y3ord = array_zeros.clone().detach()\n\n x0_f = array_zeros.clone().detach()\n y0_f = array_zeros.clone().detach()\n x1_f = array_zeros.clone().detach()\n y1_f = array_zeros.clone().detach()\n\n ##get x2,x3 and order\n x2_y2 = reproject_points(pose, keypoints, intrinsic, min_depth)\n x2 = x2_y2[:, :, 0, :]\n y2 = x2_y2[:, :, 1, :]\n x3_y3 = reproject_points(pose, keypoints, intrinsic, max_depth)\n x3 = x3_y3[:, :, 0, :]\n y3 = x3_y3[:, :, 1, :]\n\n x_ord = x3 >= x2\n x2ord[x_ord] = x2[x_ord]\n y2ord[x_ord] = y2[x_ord]\n x3ord[x_ord] = x3[x_ord]\n y3ord[x_ord] = y3[x_ord]\n\n cx_ord = x2 > x3\n x2ord[cx_ord] = x3[cx_ord]\n y2ord[cx_ord] = y3[cx_ord]\n x3ord[cx_ord] = x2[cx_ord]\n y3ord[cx_ord] = y2[cx_ord]\n\n if align_corners:\n x_ord0 = (x2ord >= 0) & (x2ord < width)\n x_ord1 = (x3ord >= 0) & (x3ord < width)\n\n y_ord0 = (y2ord >= 0) & (y2ord < height)\n y_ord1 = (y3ord >= 0) & (y3ord < height)\n else:\n x_ord0 = (x2ord >= -0.5) & (x2ord < (width - 0.5))\n x_ord1 = (x3ord >= -0.5) & (x3ord < (width - 0.5))\n\n y_ord0 = (y2ord >= -0.5) & (y2ord < (height - 0.5))\n y_ord1 = (y3ord >= -0.5) & (y3ord < (height - 0.5))\n\n all_range = x_ord0 & x_ord1 & y_ord0 & y_ord1\n\n x0_f[all_range] = x2ord[all_range]\n y0_f[all_range] = y2ord[all_range]\n\n x1_f[all_range] = x3ord[all_range]\n y1_f[all_range] = y3ord[all_range]\n\n cond_null = ~all_range\n x0_f[cond_null] = array_zeros.clone().detach()[cond_null]\n y0_f[cond_null] = array_zeros.clone().detach()[cond_null]\n x1_f[cond_null] = array_zeros.clone().detach()[cond_null]\n y1_f[cond_null] = array_zeros.clone().detach()[cond_null]\n\n ## find box representation using #xc,yc, h,w, theta\n xc = (x0_f + x1_f) / 2.\n yc = (y0_f + y1_f) / 2.\n h = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device) * max(2 * distance, 1)\n w = torch.sqrt((x1_f - x0_f) ** 2 + (y1_f - y0_f) ** 2)\n\n theta = torch.atan2(-lines_epi[:, :, :, 0], lines_epi[:, :, :, 1])\n\n if torch.sum(torch.isnan(theta)):\n import pdb;\n pdb.set_trace()\n roi_patch = torch.stack((xc, yc, h, w, theta), 3)\n\n return roi_patch\n\n\ndef sample_descriptors_epi(keypoints, descriptors, s, normalize=True, align_corner=False):\n \"\"\"Samples descriptors at point locations\"\"\"\n\n b, c, h, w = descriptors.shape\n\n keypoints = keypoints - s / 2 + 0.5\n keypoints /= torch.tensor([(w * s - s / 2 - 0.5), (h * s - s / 2 - 0.5)], device=keypoints.device)[None]\n\n keypoints = keypoints * 2 - 1\n if len(keypoints.shape) == 4:\n descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, keypoints.shape[1], keypoints.shape[2], 2), mode='bilinear',\n align_corners=align_corner) ##pythorch 1.3+\n elif len(keypoints.shape) == 3:\n descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', align_corners=align_corner) ##pythorch 1.3+\n\n if normalize:\n descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)\n\n return descriptors\n\n\ndef vec_to_skew_symmetric(v):\n \"\"\"Creates skew-symmetric matrix\"\"\"\n zero = torch.zeros_like(v[:, 0])\n M = torch.stack([\n zero, -v[:, 2], v[:, 1],\n v[:, 2], zero, -v[:, 0],\n -v[:, 1], v[:, 0], zero,\n ], dim=1)\n return M.reshape(-1, 3, 3)\n\n\ndef get_fundamental_matrix(T_10, K0, K1):\n \"\"\"Generates fundamental matrix\"\"\"\n\n ##Expects BX3x3 matrix \n k0 = torch.inverse(K0)\n k1 = torch.inverse(K1).transpose(1, 2)\n\n k0 = k0.unsqueeze(1)\n k1 = k1.unsqueeze(1)\n\n T_10 = T_10.view(-1, 4, 4)\n t_skew = vec_to_skew_symmetric(T_10[:, :3, 3])\n E = t_skew @ T_10[:, :3, :3] ##Essential matrix\n E = E.view(k0.shape[0], -1, 3, 3)\n\n Fu = (k1 @ E) @ k0 ##Fundamental matrix\n F_norm = Fu[:, :, 2:, 2:]\n F_norm[F_norm == 0.] = 1.\n Fu = Fu / F_norm ##normalize it\n return Fu, E\n\n\nclass TriangulationNet(BaseModel):\n \"\"\"Triangulation module\"\"\"\n default_config = {\n\n 'depth_range': True,\n 'arg_max_weight': 1.0,\n\n 'dist_ortogonal': 1,\n 'kernel_size': 1,\n 'out_length': 100,\n 'has_confidence': True,\n\n 'min_depth': 0.5,\n 'max_depth': 10.0,\n 'align_corners': False,\n\n }\n\n def _init(self):\n\n self.relu = torch.nn.ReLU(inplace=False)\n self.bn_match_convD = torch.nn.BatchNorm2d(1)\n\n ##confidence layers\n pool_shape = (self.config['out_length'], 1 + (5 - self.config['kernel_size']))\n pad_shape = (0, 1) if self.config['dist_ortogonal'] == 2 else (1, 1)\n\n if self.config['has_confidence']:\n self.convD_confa = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=pad_shape)\n self.bnconvD_confa = torch.nn.BatchNorm2d(1)\n self.pool_convD_conf = torch.nn.MaxPool2d(pool_shape, stride=self.config['out_length'], return_indices=False)\n\n def _forward(self, data):\n\n pose = data['pose']\n intrinsic = data['intrinsics']\n img_shape = data['img_shape']\n desc = data['descriptors']\n desc_views = data['descriptors_views']\n sequence_length = data['sequence_length']\n keypoints = data['keypoints']\n depth_all = data['depth']\n depth_ref = data['ref_depths']\n\n del data\n\n st = img_shape[2] // desc.shape[2]\n dist = self.config['dist_ortogonal']\n ker_size = self.config['kernel_size']\n out_length = self.config['out_length']\n\n pred = {}\n pred['keypoints'] = keypoints\n\n ## Creates patches for matching\n depth_at_kp = sample_descriptors_epi(keypoints, depth_all.unsqueeze(1), 1, False, self.config['align_corners'])\n roi_patch = patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=dist, min_depth=self.config['min_depth'],\n max_depth=self.config['max_depth'], align_corners=self.config['align_corners'])\n keypoint_patch = patch_for_kp(keypoints, ker_size, out_length, roi_patch)\n\n ## Extract sampled keypoints \n kp_image, transform_matrix = patch_sampler(roi_patch, out_length=out_length, distance=dist, do_img=True, align_corners=self.config['align_corners'])\n kp_anchor, _ = patch_sampler(keypoint_patch, out_length=ker_size, distance=ker_size // 2, do_img=False, align_corners=self.config['align_corners'])\n\n ## Reshape along batch dimenstion\n kp_image_shp = kp_image.shape\n kp_image = kp_image.contiguous().view(kp_image_shp[0] * kp_image_shp[1], kp_image_shp[2], kp_image_shp[3] * kp_image_shp[4], kp_image_shp[5])\n kp_anchor_shp = kp_anchor.shape\n kp_anchor = kp_anchor.contiguous().view(kp_anchor_shp[0] * kp_anchor_shp[1], kp_image_shp[2], kp_anchor_shp[3] * kp_anchor_shp[4], kp_anchor_shp[5])\n\n ## Sample\n desc_views_shp = desc_views.shape\n desc_views = desc_views.reshape(desc_views_shp[0] * desc_views_shp[1], desc_views_shp[2], desc_views_shp[3], desc_views_shp[4])\n descriptor_at_image = sample_descriptors_epi(kp_image.detach(), desc_views, st, True, self.config['align_corners'])\n descriptor_at_anchor = sample_descriptors_epi(kp_anchor.detach(), desc.repeat_interleave(sequence_length, dim=0), st, True,\n self.config['align_corners'])\n\n del kp_image, kp_anchor, keypoint_patch, desc, desc_views\n\n descriptor_at_anchor = descriptor_at_anchor.contiguous().view(descriptor_at_anchor.shape[0], descriptor_at_anchor.shape[1], kp_anchor_shp[2],\n kp_anchor_shp[3], kp_anchor_shp[4])\n descriptor_at_image = descriptor_at_image.contiguous().view(descriptor_at_image.shape[0], descriptor_at_image.shape[1], kp_image_shp[2],\n kp_image_shp[3], kp_image_shp[4])\n\n descriptor_at_anchor = descriptor_at_anchor.transpose(2, 1)\n descriptor_at_image = descriptor_at_image.transpose(2, 1)\n\n dancs = descriptor_at_anchor.shape\n dimgs = descriptor_at_image.shape\n\n descriptor_at_anchor = descriptor_at_anchor.contiguous().view(dancs[0] * dancs[1], dancs[2], dancs[3], dancs[4])\n descriptor_at_image = descriptor_at_image.contiguous().view(dimgs[0] * dimgs[1], dimgs[2], dimgs[3], dimgs[4])\n\n ## Do cross correlation\n match_map = match_corr(descriptor_at_anchor, descriptor_at_image)\n match_map = self.bn_match_convD(match_map)\n match_map = self.relu(match_map)\n\n del descriptor_at_anchor, descriptor_at_image\n\n if self.config['has_confidence']:\n conf_da = match_map\n conf_da = torch.nn.functional.adaptive_max_pool2d(conf_da, (1, 1))\n conf_da = conf_da.contiguous().view(kp_image_shp[0], kp_image_shp[1], -1)\n\n sc_factor = 1.0\n conf_da = torch.sigmoid(sc_factor * conf_da)\n conf_damp = roi_patch[:, :, :, 3] > 0.\n conf_da = conf_da * (conf_damp.float() + 0.001)\n\n self_confidence = torch.ones((conf_da.shape[0], 1, conf_da.shape[2])).to(conf_da.device)\n conf_da = torch.cat((self_confidence, conf_da), 1)\n conf_da = conf_da.transpose(2, 1)\n pred['confidence'] = conf_da\n else:\n pred['confidence'] = None\n\n ## SOFTARGMAX\n out_kp_match = integrate_tensor_2d(match_map * self.config['arg_max_weight'], True)\n\n ## Change from local coordinates to image coordinates\n out_kp_match /= torch.tensor([match_map.shape[3] - 1., max(match_map.shape[2] - 1., 1.)], device=out_kp_match.device)[None]\n\n if match_map.shape[2] == 1:\n sub_roi = (torch.tensor([0.5, 0.]).unsqueeze(0).unsqueeze(1)).to(out_kp_match.device)\n else:\n sub_roi = 0.5\n\n out_kp_match -= sub_roi\n out_ones = torch.ones((out_kp_match.shape[0], 1, 1)).to(out_kp_match.device)\n out_kp_match = torch.cat((out_kp_match, out_ones), 2)\n out_kp_match = out_kp_match.view(kp_image_shp[0], kp_image_shp[1], kp_image_shp[2], 3)\n\n ## scale the local x coordinate to match sampling frequency\n mult_0 = roi_patch[:, :, :, [3]]\n mult_1 = torch.ones_like(mult_0)\n mult_1[mult_0 == 0.] = 0.\n roi_mult = torch.cat((mult_0, mult_1, mult_1), 3)\n out_kp_match *= roi_mult\n\n range_kp = roi_patch[:, :, :, 3] > 0.\n pred['range_kp'] = range_kp\n\n ##global coordinates\n val_kp_match = ((transform_matrix @ out_kp_match.unsqueeze(4))[:, :, :, :2, :]).squeeze(4)\n pred['multiview_matches'] = val_kp_match\n\n del out_kp_match, transform_matrix, match_map\n\n ## 3d GT\n keypoints_3d_gt = unproject_ij(keypoints, depth_at_kp, intrinsic)\n pred['keypoints3d_gt'] = keypoints_3d_gt.transpose(2, 1)\n\n #### Triangulation\n pose_tiled = pose[:, :, :3, :]\n intrinsic_tiled = intrinsic\n confidence = pred['confidence']\n\n anchor_keypoints = keypoints.unsqueeze(1)\n multiview_matches = torch.cat((anchor_keypoints, val_kp_match), 1)\n\n projection_mat = []\n projection_ref = []\n proj_identity = torch.tensor([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.]])\n if torch.cuda.is_available():\n proj_identity = proj_identity.cuda()\n\n for batch_idx in range(pose_tiled.size(0)):\n proj_ref_idx = torch.mm(intrinsic_tiled[batch_idx], proj_identity).unsqueeze(0)\n projection_ref.append(proj_ref_idx)\n\n projection_mat_view = []\n for j in range(sequence_length):\n proj_mat_idx = torch.mm(intrinsic_tiled[batch_idx], pose_tiled[batch_idx][j]).unsqueeze(0)\n projection_mat_view.append(proj_mat_idx)\n\n projection_mat_view = torch.cat(projection_mat_view, 0).unsqueeze(0)\n projection_mat.append(projection_mat_view)\n\n projection_mat = torch.cat(projection_mat, 0)\n projection_ref = torch.cat(projection_ref, 0).unsqueeze(1)\n\n proj_matrices = torch.cat([projection_ref, projection_mat], 1)\n\n del projection_ref, projection_mat\n\n if self.config['has_confidence']:\n keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches, confidence)\n else:\n keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches)\n\n keypoints_3d = torch.stack(keypoints_3d, 0)\n if torch.sum(torch.isinf(keypoints_3d)) > 0:\n keypoints_3d = torch.clamp(keypoints_3d, min=-1000.0, max=1000.0)\n\n pred['keypoints_3d'] = keypoints_3d\n\n return pred\n\n def loss(self, pred, data):\n raise NotImplementedError\n\n def metrics(self):\n raise NotImplementedError\n"
] | [
[
"numpy.array",
"numpy.loadtxt"
],
[
"numpy.fromfile",
"torch.load",
"torch.from_numpy",
"torch.no_grad",
"numpy.transpose",
"torch.device",
"numpy.loadtxt"
],
[
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.cat"
],
[
"torch.nn.functional.softmax",
"torch.svd",
"torch.sin",
"torch.cat",
"torch.zeros",
"torch.cuda.is_available",
"torch.clamp",
"torch.mm",
"torch.ones",
"torch.sqrt",
"torch.inverse",
"torch.tensor",
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.functional.relu",
"torch.arange",
"torch.ones_like",
"torch.cos",
"torch.linspace",
"torch.sigmoid",
"torch.isinf",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.is_tensor",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.atan2",
"torch.nn.functional.normalize",
"torch.isnan",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lukasc-ch/QuantLab | [
"7ddcc51ec1131a58269768cd898ce04e8b49beb6"
] | [
"quantlab/COCO/YOLOv3Tiny/postprocess.py"
] | [
"# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport torch\n\nfrom ..utils.utils import xywh2xyxy, bbox_iou\n\n\ndef clip_boxes(boxes):\n boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=1)\n boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=1)\n\n\ndef postprocess_pr(pr_outs, conf_thres=0.001, overlap_thres=0.5):\n \"\"\"Restructure YOLOv3Tiny tensors into lists, then filter out non-maximal\n (redundant) annotations from the predictions.\"\"\"\n # pr_outs = [[bs, grid_positions, 85], [bs, 4*grid_positions, 85]]\n # when its two components are concatenated, we get a tensor [bs, 5*gridpositions, 85], which `bs` \"slices\"\n # have to be \"stripped\" to remove redundant components\n # strip each slice (corresponding to a single image in the batch) to get sequences of (possibly) different lengths:\n # the natural data structure to use to collect these sequences is a list\n pr_outs = [p.view(p.size(0), -1, p.size(-1)) for p in pr_outs]\n pr_outs = torch.cat(pr_outs, 1).detach().cpu()\n pr_labels = [None] * len(pr_outs)\n for img_id, pr in enumerate(pr_outs):\n # filter out irrelevant predictions\n pr_cls_prob, pr_cls_id = pr[:, 5:].max(1)\n pr[:, 4] *= pr_cls_prob\n i = (pr[:, 4] > conf_thres) & torch.isfinite(pr).all(1)\n pr = pr[i]\n if len(pr) == 0:\n continue\n pr_cls_prob = pr_cls_prob[i]\n pr_cls_id = pr_cls_id[i].unsqueeze(1).float()\n pr[:, :4] = xywh2xyxy(pr[:, :4])\n pr = torch.cat((pr[:, :5], pr_cls_prob.unsqueeze(1), pr_cls_id), 1)\n pr = pr[(-pr[:, 4]).argsort()]\n detections = []\n for c in pr[:, -1].unique():\n pr_anno_c = pr[pr[:, -1] == c]\n n = len(pr_anno_c)\n if n == 1:\n detections.append(pr_anno_c)\n continue\n elif n > 100:\n pr_anno_c = pr_anno_c[:100]\n while len(pr_anno_c) > 0:\n if len(pr_anno_c) == 1:\n detections.append(pr_anno_c)\n break\n redundant = bbox_iou(pr_anno_c[0], pr_anno_c) > overlap_thres\n weights = pr_anno_c[redundant, 4:5]\n pr_anno_c[0, :4] = (weights * pr_anno_c[redundant, 0:4]).sum(0) / weights.sum()\n detections.append(pr_anno_c[0:1]) # keep leading dimension 1 for 1D tensor\n pr_anno_c = pr_anno_c[~redundant]\n if len(detections) > 0:\n detections = torch.cat(detections)\n clip_boxes(detections[:, :4])\n pr_labels[img_id] = detections[(-detections[:, 4]).argsort()]\n return pr_labels\n\n\ndef postprocess_gt(gt_labels):\n gt_labels = gt_labels.detach().cpu()\n bs = gt_labels[0, 0].to(torch.int)\n gt_labels = [gt_labels[gt_labels[:, 1] == i, 2:] for i in range(bs)]\n return gt_labels\n"
] | [
[
"torch.isfinite",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NicoSerranoP/PySyft | [
"87fcd566c46fce4c16d363c94396dd26bd82a016",
"87fcd566c46fce4c16d363c94396dd26bd82a016",
"87fcd566c46fce4c16d363c94396dd26bd82a016",
"87fcd566c46fce4c16d363c94396dd26bd82a016"
] | [
"syft/frameworks/torch/mpc/fss.py",
"test/torch/mpc/test_fss.py",
"syft/execution/translation/torchscript.py",
"syft/frameworks/torch/fl/dataloader.py"
] | [
"\"\"\"\nThis is an implementation of Function Secret Sharing\n\nUseful papers are:\n- Function Secret Sharing- Improvements and Extensions, Boyle 2017\n Link: https://eprint.iacr.org/2018/707.pdf\n- Secure Computation with Preprocessing via Function Secret Sharing, Boyle 2019\n Link: https://eprint.iacr.org/2019/1095\n\nNote that the protocols are quite different in aspect from those papers\n\"\"\"\nimport hashlib\n\nimport torch as th\nimport syft as sy\n\n\nλ = 110 # 6 # 110 or 63 # security parameter\nn = 32 # 8 # 32 # bit precision\ndtype = th.int32\n\nno_wrap = {\"no_wrap\": True}\n\n\ndef initialize_crypto_plans(worker):\n \"\"\"\n This is called manually for the moment, to build the plan used to perform\n Function Secret Sharing on a specific worker.\n \"\"\"\n eq_plan_1 = sy.Plan(\n forward_func=lambda x, y: mask_builder(x, y, \"eq\"),\n owner=worker,\n tags=[\"#fss_eq_plan_1\"],\n is_built=True,\n )\n worker.register_obj(eq_plan_1)\n eq_plan_2 = sy.Plan(\n forward_func=eq_eval_plan, owner=worker, tags=[\"#fss_eq_plan_2\"], is_built=True\n )\n worker.register_obj(eq_plan_2)\n\n comp_plan_1 = sy.Plan(\n forward_func=lambda x, y: mask_builder(x, y, \"comp\"),\n owner=worker,\n tags=[\"#fss_comp_plan_1\"],\n is_built=True,\n )\n worker.register_obj(comp_plan_1)\n comp_plan_2 = sy.Plan(\n forward_func=comp_eval_plan, owner=worker, tags=[\"#fss_comp_plan_2\"], is_built=True\n )\n worker.register_obj(comp_plan_2)\n\n xor_add_plan = sy.Plan(\n forward_func=xor_add_convert_1, owner=worker, tags=[\"#xor_add_1\"], is_built=True\n )\n worker.register_obj(xor_add_plan)\n xor_add_plan = sy.Plan(\n forward_func=xor_add_convert_2, owner=worker, tags=[\"#xor_add_2\"], is_built=True\n )\n worker.register_obj(xor_add_plan)\n\n\ndef request_run_plan(worker, plan_tag, location, return_value, args=(), kwargs={}):\n response_ids = (sy.ID_PROVIDER.pop(),)\n args = (args, response_ids)\n\n response = worker.send_command(\n cmd_name=\"run\",\n target=plan_tag,\n recipient=location,\n return_ids=response_ids,\n return_value=return_value,\n kwargs_=kwargs,\n args_=args,\n )\n return response\n\n\ndef fss_op(x1, x2, type_op=\"eq\"):\n \"\"\"\n Define the workflow for a binary operation using Function Secret Sharing\n\n Currently supported operand are = & <=, respectively corresponding to\n type_op = 'eq' and 'comp'\n\n Args:\n x1: first AST\n x2: second AST\n type_op: type of operation to perform, should be 'eq' or 'comp'\n\n Returns:\n shares of the comparison\n \"\"\"\n\n me = sy.local_worker\n locations = x1.locations\n\n shares = []\n for location in locations:\n args = (x1.child[location.id], x2.child[location.id])\n share = request_run_plan(\n me, f\"#fss_{type_op}_plan_1\", location, return_value=True, args=args\n )\n shares.append(share)\n\n mask_value = sum(shares) % 2 ** n\n\n shares = []\n for i, location in enumerate(locations):\n args = (th.IntTensor([i]), mask_value)\n share = request_run_plan(\n me, f\"#fss_{type_op}_plan_2\", location, return_value=False, args=args\n )\n shares.append(share)\n\n if type_op == \"comp\":\n prev_shares = shares\n shares = []\n for prev_share, location in zip(prev_shares, locations):\n share = request_run_plan(\n me, \"#xor_add_1\", location, return_value=True, args=(prev_share,)\n )\n shares.append(share)\n\n masked_value = shares[0] ^ shares[1] # TODO case >2 workers ?\n\n shares = {}\n for i, prev_share, location in zip(range(len(locations)), prev_shares, locations):\n share = request_run_plan(\n me,\n \"#xor_add_2\",\n location,\n return_value=False,\n args=(th.IntTensor([i]), masked_value),\n )\n shares[location.id] = share\n else:\n shares = {loc.id: share for loc, share in zip(locations, shares)}\n\n response = sy.AdditiveSharingTensor(shares, **x1.get_class_attributes())\n return response\n\n\n# share level\ndef mask_builder(x1, x2, type_op):\n x = x1 - x2\n # Keep the primitive in store as we use it after\n alpha, s_0, *CW = x1.owner.crypto_store.get_keys(\n f\"fss_{type_op}\", n_instances=x1.numel(), remove=False\n )\n return x + alpha.reshape(x.shape)\n\n\n# share level\ndef eq_eval_plan(b, x_masked):\n alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(\n type_op=\"fss_eq\", n_instances=x_masked.numel(), remove=True\n )\n result_share = DPF.eval(b, x_masked, s_0, *CW)\n return result_share\n\n\n# share level\ndef comp_eval_plan(b, x_masked):\n alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(\n type_op=\"fss_comp\", n_instances=x_masked.numel(), remove=True\n )\n result_share = DIF.eval(b, x_masked, s_0, *CW)\n return result_share\n\n\ndef xor_add_convert_1(x):\n xor_share, add_share = x.owner.crypto_store.get_keys(\n type_op=\"xor_add_couple\", n_instances=x.numel(), remove=False\n )\n return x ^ xor_share.reshape(x.shape)\n\n\ndef xor_add_convert_2(b, x):\n xor_share, add_share = x.owner.crypto_store.get_keys(\n type_op=\"xor_add_couple\", n_instances=x.numel(), remove=True\n )\n return add_share.reshape(x.shape) * (1 - 2 * x) + x * b\n\n\ndef eq(x1, x2):\n return fss_op(x1, x2, \"eq\")\n\n\ndef le(x1, x2):\n return fss_op(x1, x2, \"comp\")\n\n\nclass DPF:\n \"\"\"Distributed Point Function - used for equality\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def keygen(n_values=1):\n beta = th.tensor([1], dtype=dtype)\n alpha = th.randint(0, 2 ** n, (n_values,))\n\n α = bit_decomposition(alpha)\n s, t, CW = (\n Array(n + 1, 2, λ, n_values),\n Array(n + 1, 2, n_values),\n Array(n, 2 * (λ + 1), n_values),\n )\n s[0] = randbit(size=(2, λ, n_values))\n t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t()\n for i in range(0, n):\n g0 = G(s[i, 0])\n g1 = G(s[i, 1])\n # Re-use useless randomness\n sL_0, _, sR_0, _ = split(g0, [λ, 1, λ, 1])\n sL_1, _, sR_1, _ = split(g1, [λ, 1, λ, 1])\n s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i])\n\n cw_i = TruthTableDPF(s_rand, α[i])\n CW[i] = cw_i ^ g0 ^ g1\n\n for b in (0, 1):\n τ = [g0, g1][b] ^ (t[i, b] * CW[i])\n τ = τ.reshape(2, λ + 1, n_values)\n # filtered_τ = τ[𝛼[i]] OLD\n α_i = α[i].unsqueeze(0).expand(λ + 1, n_values).unsqueeze(0).long()\n filtered_τ = th.gather(τ, 0, α_i).squeeze(0)\n s[i + 1, b], t[i + 1, b] = split(filtered_τ, [λ, 1])\n\n CW_n = (-1) ** t[n, 1].to(dtype) * (beta - Convert(s[n, 0]) + Convert(s[n, 1]))\n\n return (alpha,) + s[0].unbind() + (CW, CW_n)\n\n @staticmethod\n def eval(b, x, *k_b):\n original_shape = x.shape\n x = x.reshape(-1)\n n_values = x.shape[0]\n x = bit_decomposition(x)\n s, t = Array(n + 1, λ, n_values), Array(n + 1, 1, n_values)\n s[0] = k_b[0]\n # here k[1:] is (CW, CW_n)\n CW = k_b[1].unbind() + (k_b[2],)\n t[0] = b\n for i in range(0, n):\n τ = G(s[i]) ^ (t[i] * CW[i])\n τ = τ.reshape(2, λ + 1, n_values)\n x_i = x[i].unsqueeze(0).expand(λ + 1, n_values).unsqueeze(0).long()\n filtered_τ = th.gather(τ, 0, x_i).squeeze(0)\n s[i + 1], t[i + 1] = split(filtered_τ, [λ, 1])\n flat_result = (-1) ** b * (Convert(s[n]) + t[n].squeeze() * CW[n])\n return flat_result.reshape(original_shape)\n\n\nclass DIF:\n \"\"\"Distributed Interval Function - used for comparison <=\"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def keygen(n_values=1):\n alpha = th.randint(0, 2 ** n, (n_values,))\n α = bit_decomposition(alpha)\n s, t, CW = (\n Array(n + 1, 2, λ, n_values),\n Array(n + 1, 2, n_values),\n Array(n, 2 + 2 * (λ + 1), n_values),\n )\n s[0] = randbit(size=(2, λ, n_values))\n t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t()\n for i in range(0, n):\n h0 = H(s[i, 0])\n h1 = H(s[i, 1])\n # Re-use useless randomness\n _, _, sL_0, _, sR_0, _ = split(h0, [1, 1, λ, 1, λ, 1])\n _, _, sL_1, _, sR_1, _ = split(h1, [1, 1, λ, 1, λ, 1])\n s_rand = (sL_0 ^ sL_1) * α[i] + (sR_0 ^ sR_1) * (1 - α[i])\n cw_i = TruthTableDIF(s_rand, α[i])\n CW[i] = cw_i ^ h0 ^ h1\n\n for b in (0, 1):\n τ = [h0, h1][b] ^ (t[i, b] * CW[i])\n τ = τ.reshape(2, λ + 2, n_values)\n # filtered_τ = τ[𝛼[i]] OLD\n α_i = α[i].unsqueeze(0).expand(λ + 2, n_values).unsqueeze(0).long()\n filtered_τ = th.gather(τ, 0, α_i).squeeze(0)\n σ_leaf, s[i + 1, b], t[i + 1, b] = split(filtered_τ, [1, λ, 1])\n\n return (alpha,) + s[0].unbind() + (CW,)\n\n @staticmethod\n def eval(b, x, *k_b):\n original_shape = x.shape\n x = x.reshape(-1)\n n_values = x.shape[0]\n x = bit_decomposition(x)\n FnOutput = Array(n + 1, n_values)\n s, t = Array(n + 1, λ, n_values), Array(n + 1, 1, n_values)\n s[0] = k_b[0]\n CW = k_b[1].unbind()\n t[0] = b\n for i in range(0, n):\n τ = H(s[i]) ^ (t[i] * CW[i])\n τ = τ.reshape(2, λ + 2, n_values)\n x_i = x[i].unsqueeze(0).expand(λ + 2, n_values).unsqueeze(0).long()\n filtered_τ = th.gather(τ, 0, x_i).squeeze(0)\n σ_leaf, s[i + 1], t[i + 1] = split(filtered_τ, [1, λ, 1])\n FnOutput[i] = σ_leaf\n\n # Last tour, the other σ is also a leaf:\n FnOutput[n] = t[n]\n flat_result = FnOutput.sum(axis=0) % 2\n return flat_result.reshape(original_shape)\n\n\n# PRG\ndef G(seed):\n assert seed.shape[0] == λ\n seed_t = seed.t().tolist()\n gen_list = []\n for seed_bit in seed_t:\n enc_str = str(seed_bit).encode()\n h = hashlib.sha3_256(enc_str)\n r = h.digest()\n binary_str = bin(int.from_bytes(r, byteorder=\"big\"))[2 : 2 + (2 * (λ + 1))]\n gen_list.append(list(map(int, binary_str)))\n\n return th.tensor(gen_list, dtype=th.uint8).t()\n\n\ndef H(seed):\n assert seed.shape[0] == λ\n seed_t = seed.t().tolist()\n gen_list = []\n for seed_bit in seed_t:\n enc_str = str(seed_bit).encode()\n h = hashlib.sha3_256(enc_str)\n r = h.digest()\n binary_str = bin(int.from_bytes(r, byteorder=\"big\"))[2 : 2 + 2 + (2 * (λ + 1))]\n gen_list.append(list(map(int, binary_str)))\n\n return th.tensor(gen_list, dtype=th.uint8).t()\n\n\ndef Convert(bits):\n bit_pow_lambda = th.flip(2 ** th.arange(λ), (0,)).unsqueeze(-1).to(th.long)\n return (bits.to(th.long) * bit_pow_lambda).sum(dim=0).to(dtype)\n\n\ndef Array(*shape):\n return th.empty(shape, dtype=th.uint8)\n\n\nbit_pow_n = th.flip(2 ** th.arange(n), (0,))\n\n\ndef bit_decomposition(x):\n x = x.unsqueeze(-1)\n z = bit_pow_n & x\n z = z.t()\n return (z > 0).to(th.uint8)\n\n\ndef randbit(size):\n return th.randint(2, size=size)\n\n\ndef concat(*args, **kwargs):\n return th.cat(args, **kwargs)\n\n\ndef split(x, idx):\n return th.split(x, idx)\n\n\ndef TruthTableDPF(s, α_i):\n one = th.ones((1, s.shape[1])).to(th.uint8)\n s_one = concat(s, one)\n Table = th.zeros((2, λ + 1, len(α_i)), dtype=th.uint8)\n for j, el in enumerate(α_i):\n Table[el.item(), :, j] = s_one[:, j]\n return Table.reshape(-1, Table.shape[2])\n\n\ndef TruthTableDIF(s, α_i):\n leafTable = th.zeros((2, 1, len(α_i)), dtype=th.uint8)\n # TODO optimize: just put alpha on first line\n leaf_value = α_i\n for j, el in enumerate(α_i):\n leafTable[(1 - el).item(), 0, j] = leaf_value[j]\n\n one = th.ones((1, s.shape[1])).to(th.uint8)\n s_one = concat(s, one)\n nextTable = th.zeros((2, λ + 1, len(α_i)), dtype=th.uint8)\n for j, el in enumerate(α_i):\n nextTable[el.item(), :, j] = s_one[:, j]\n\n Table = concat(leafTable, nextTable, axis=1)\n Table = Table.reshape(-1, Table.shape[2])\n return Table\n",
"import pytest\nimport torch as th\n\nfrom syft.frameworks.torch.mpc.fss import DPF, DIF, n\n\n\[email protected](\"op\", [\"eq\", \"le\"])\ndef test_fss_class(op):\n class_ = {\"eq\": DPF, \"le\": DIF}[op]\n th_op = {\"eq\": th.eq, \"le\": th.le}[op]\n gather_op = {\"eq\": \"__add__\", \"le\": \"__xor__\"}[op]\n\n # single value\n primitive = class_.keygen(n_values=1)\n alpha, s_00, s_01, *CW = primitive\n mask = th.randint(0, 2 ** n, alpha.shape)\n k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]\n\n x = th.tensor([0])\n x_masked = x + k0[0] + k1[0]\n y0 = class_.eval(0, x_masked, *k0[1:])\n y1 = class_.eval(1, x_masked, *k1[1:])\n\n assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()\n\n # 1D tensor\n primitive = class_.keygen(n_values=3)\n alpha, s_00, s_01, *CW = primitive\n mask = th.randint(0, 2 ** n, alpha.shape)\n k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]\n\n x = th.tensor([0, 2, -2])\n x_masked = x + k0[0] + k1[0]\n y0 = class_.eval(0, x_masked, *k0[1:])\n y1 = class_.eval(1, x_masked, *k1[1:])\n\n assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()\n\n # 2D tensor\n primitive = class_.keygen(n_values=4)\n alpha, s_00, s_01, *CW = primitive\n mask = th.randint(0, 2 ** n, alpha.shape)\n k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]\n\n x = th.tensor([[0, 2], [-2, 0]])\n x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)\n y0 = class_.eval(0, x_masked, *k0[1:])\n y1 = class_.eval(1, x_masked, *k1[1:])\n\n assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()\n\n # 3D tensor\n primitive = class_.keygen(n_values=8)\n alpha, s_00, s_01, *CW = primitive\n mask = th.randint(0, 2 ** n, alpha.shape)\n k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]\n\n x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]])\n x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)\n y0 = class_.eval(0, x_masked, *k0[1:])\n y1 = class_.eval(1, x_masked, *k1[1:])\n\n assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()\n",
"from torch import jit\nfrom syft.execution.placeholder import PlaceHolder\nfrom syft.execution.translation.abstract import AbstractPlanTranslator\n\n\nclass PlanTranslatorTorchscript(AbstractPlanTranslator):\n \"\"\"Performs translation from 'list of ops' Plan into torchscript Plan\"\"\"\n\n def __init__(self, plan):\n super().__init__(plan)\n\n def translate(self):\n translation_plan = self.plan.copy()\n translation_plan.forward = None\n\n args = translation_plan.create_dummy_args()\n\n # jit.trace clones input args and can change their type, so we have to skip types check\n # TODO see if type check can be made less strict,\n # e.g. tensor/custom tensor/nn.Parameter could be considered same type\n translation_plan.validate_input_types = False\n\n # To avoid storing Plan state tensors in torchscript, they will be sent as parameters\n # we trace wrapper func, which accepts state parameters as last arg\n # and sets them into the Plan before executing the Plan\n def wrap_stateful_plan(*args):\n role = translation_plan.role\n state = args[-1]\n if 0 < len(role.state.state_placeholders) == len(state) and isinstance(\n state, (list, tuple)\n ):\n state_placeholders = tuple(\n role.placeholders[ph.id.value] for ph in role.state.state_placeholders\n )\n PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)\n PlaceHolder.instantiate_placeholders(state_placeholders, state)\n\n return translation_plan(*args[:-1])\n\n plan_params = translation_plan.parameters()\n if len(plan_params) > 0:\n torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))\n else:\n torchscript_plan = jit.trace(translation_plan, args)\n\n self.plan.torchscript = torchscript_plan\n return self.plan\n\n def remove(self):\n self.plan.torchscript = None\n\n return self.plan\n",
"import torch\nfrom torch.utils.data import SequentialSampler, RandomSampler, BatchSampler\nfrom torch._six import string_classes, int_classes, container_abcs\n\nimport logging\nimport math\n\nnumpy_type_map = {\n \"float64\": torch.DoubleTensor,\n \"float32\": torch.FloatTensor,\n \"float16\": torch.HalfTensor,\n \"int64\": torch.LongTensor,\n \"int32\": torch.IntTensor,\n \"int16\": torch.ShortTensor,\n \"int8\": torch.CharTensor,\n \"uint8\": torch.ByteTensor,\n}\n\n\ndef default_collate(batch):\n \"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n return torch.stack(batch, 0)\n elif (\n elem_type.__module__ == \"numpy\"\n and elem_type.__name__ != \"str_\"\n and elem_type.__name__ != \"string_\"\n ): # pragma: no cover\n elem = batch[0]\n if elem_type.__name__ == \"ndarray\":\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith(\"float\") else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes): # pragma: no cover\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float): # pragma: no cover\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes): # pragma: no cover\n return batch\n elif isinstance(batch[0], container_abcs.Mapping): # pragma: no cover\n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], container_abcs.Sequence): # pragma: no cover\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))\n\n\nclass _DataLoaderIter(object):\n \"\"\"Iterates once over the DataLoader's dataset, as specified by the samplers\"\"\"\n\n def __init__(self, loader, worker_idx):\n self.loader = loader\n self.federated_dataset = loader.federated_dataset\n\n # Assign the first worker to invoke\n self.worker_idx = worker_idx\n # List workers in a dict\n self.workers = {idx: worker for idx, worker in enumerate(loader.workers)}\n\n # The function used to stack all samples together\n self.collate_fn = loader.collate_fn\n\n # Create a sample iterator for each worker\n self.sample_iter = {\n worker: iter(batch_sampler) for worker, batch_sampler in loader.batch_samplers.items()\n }\n\n def __len__(self):\n return len(self.federated_dataset)\n\n def _get_batch(self):\n # If all workers have been used, end the iterator\n if len(self.workers) == 0:\n self.stop()\n\n worker = self.workers[self.worker_idx]\n\n try:\n indices = next(self.sample_iter[worker])\n batch = self.collate_fn([self.federated_dataset[worker][i] for i in indices])\n return batch\n # All the data for this worker has been used\n except StopIteration:\n # Forget this worker\n del self.workers[self.worker_idx]\n # Find another worker which is not busy\n worker_busy_ids = [it.worker_idx for it in self.loader.iterators]\n for idx in self.workers.keys():\n if idx not in worker_busy_ids:\n self.worker_idx = idx\n return self._get_batch()\n\n # If nothing is found, stop the iterator\n self.stop()\n\n def __next__(self):\n batch = self._get_batch()\n return batch\n\n def __iter__(self):\n return self\n\n def stop(self):\n self.worker_idx = -1\n raise StopIteration\n\n\nclass _DataLoaderOneWorkerIter(object):\n \"\"\"Iterates once over the worker's dataset, as specified by its sampler\"\"\"\n\n def __init__(self, loader, worker_idx):\n self.loader = loader\n self.federated_dataset = loader.federated_dataset\n\n # Assign the worker to invoke\n self.worker = loader.workers[worker_idx]\n\n # The function used to stack all samples together\n self.collate_fn = loader.collate_fn\n\n # Create a sample iterator for each worker\n self.sample_iter = iter(loader.batch_samplers[self.worker])\n\n def _get_batch(self):\n # If all workers have been used, end the iterator\n if not self.worker:\n self.stop()\n\n try:\n indices = next(self.sample_iter)\n batch = self.collate_fn([self.federated_dataset[self.worker][i] for i in indices])\n return batch\n # All the data for this worker has been used\n except StopIteration:\n # If nothing is found, stop the iterator\n self.stop()\n\n # TODO: implement a length function. It should return the number of elements\n # of the federated dataset that are located at this worker\n # def __len__(self):\n # return len(self.federated_dataset)\n\n def __next__(self):\n return self._get_batch()\n\n def __iter__(self):\n return self\n\n def stop(self):\n self.worker = None\n raise StopIteration\n\n\nclass FederatedDataLoader(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single or several iterators over the dataset.\n\n Arguments:\n federated_dataset (FederatedDataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: ``1``).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n num_iterators (int): number of workers from which to retrieve data in parallel.\n num_iterators <= len(federated_dataset.workers) - 1\n the effect is to retrieve num_iterators epochs of data but at each step data\n from num_iterators distinct workers is returned.\n iter_per_worker (bool): if set to true, __next__() will return a dictionary\n containing one batch per worker\n \"\"\"\n\n __initialized = False\n\n def __init__(\n self,\n federated_dataset,\n batch_size=8,\n shuffle=False,\n num_iterators=1,\n drop_last=False,\n collate_fn=default_collate,\n iter_per_worker=False,\n **kwargs,\n ):\n if len(kwargs) > 0:\n options = \", \".join([f\"{k}: {v}\" for k, v in kwargs.items()])\n logging.warning(f\"The following options are not supported: {options}\")\n\n try:\n self.workers = federated_dataset.workers\n except AttributeError:\n raise Exception(\n \"Your dataset is not a FederatedDataset, please use \"\n \"torch.utils.data.DataLoader instead.\"\n )\n\n self.federated_dataset = federated_dataset\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.collate_fn = collate_fn\n self.iter_class = _DataLoaderOneWorkerIter if iter_per_worker else _DataLoaderIter\n\n # Build a batch sampler per worker\n self.batch_samplers = {}\n for worker in self.workers:\n data_range = range(len(federated_dataset[worker]))\n if shuffle:\n sampler = RandomSampler(data_range)\n else:\n sampler = SequentialSampler(data_range)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last)\n self.batch_samplers[worker] = batch_sampler\n\n if iter_per_worker:\n self.num_iterators = len(self.workers)\n else:\n # You can't have more iterators than n - 1 workers, because you always\n # need a worker idle in the worker switch process made by iterators\n if len(self.workers) == 1:\n self.num_iterators = 1\n else:\n self.num_iterators = min(num_iterators, len(self.workers) - 1)\n\n def __iter__(self):\n self.iterators = []\n for idx in range(self.num_iterators):\n self.iterators.append(self.iter_class(self, worker_idx=idx))\n return self\n\n def __next__(self):\n if self.num_iterators > 1:\n batches = {}\n for iterator in self.iterators:\n data, target = next(iterator)\n batches[data.location] = (data, target)\n return batches\n else:\n iterator = self.iterators[0]\n data, target = next(iterator)\n return data, target\n\n def __len__(self):\n length = len(self.federated_dataset) / self.batch_size\n if self.drop_last:\n return int(length)\n else:\n return math.ceil(length)\n"
] | [
[
"torch.randint",
"torch.empty",
"torch.ones",
"torch.cat",
"torch.IntTensor",
"torch.arange",
"torch.tensor",
"torch.split",
"torch.gather"
],
[
"torch.randint",
"torch.tensor"
],
[
"torch.jit.trace"
],
[
"torch.LongTensor",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.from_numpy",
"torch.stack",
"torch.DoubleTensor",
"torch.utils.data.BatchSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neurodebian/scikits.image-1 | [
"33206f87c5e0208e7ff0d5910ac082b3353fe04e",
"33206f87c5e0208e7ff0d5910ac082b3353fe04e",
"33206f87c5e0208e7ff0d5910ac082b3353fe04e"
] | [
"skimage/exposure/exposure.py",
"skimage/filter/_gabor.py",
"doc/examples/plot_regional_maxima.py"
] | [
"import warnings\nimport numpy as np\n\nfrom skimage import img_as_float\nfrom skimage.util.dtype import dtype_range, dtype_limits\nfrom skimage._shared.utils import deprecated\n\n\n__all__ = ['histogram', 'cumulative_distribution', 'equalize',\n 'rescale_intensity', 'adjust_gamma',\n 'adjust_log', 'adjust_sigmoid']\n\n\ndef histogram(image, nbins=256):\n \"\"\"Return histogram of image.\n\n Unlike `numpy.histogram`, this function returns the centers of bins and\n does not rebin integer arrays. For integer arrays, each integer value has\n its own bin, which improves speed and intensity-resolution.\n\n The histogram is computed on the flattened image: for color images, the\n function should be used separately on each channel to obtain a histogram\n for each color channel.\n\n Parameters\n ----------\n image : array\n Input image.\n nbins : int\n Number of bins used to calculate histogram. This value is ignored for\n integer arrays.\n\n Returns\n -------\n hist : array\n The values of the histogram.\n bin_centers : array\n The values at the center of the bins.\n\n Examples\n --------\n >>> from skimage import data, exposure, util\n >>> image = util.img_as_float(data.camera())\n >>> np.histogram(image, bins=2)\n (array([107432, 154712]), array([ 0. , 0.5, 1. ]))\n >>> exposure.histogram(image, nbins=2)\n (array([107432, 154712]), array([ 0.25, 0.75]))\n \"\"\"\n sh = image.shape\n if len(sh) == 3 and sh[-1] < 4:\n warnings.warn(\"This might be a color image. The histogram will be \"\n \"computed on the flattened image. You can instead \"\n \"apply this function to each color channel.\")\n\n # For integer types, histogramming with bincount is more efficient.\n if np.issubdtype(image.dtype, np.integer):\n offset = 0\n if np.min(image) < 0:\n offset = np.min(image)\n hist = np.bincount(image.ravel() - offset)\n bin_centers = np.arange(len(hist)) + offset\n\n # clip histogram to start with a non-zero bin\n idx = np.nonzero(hist)[0][0]\n return hist[idx:], bin_centers[idx:]\n else:\n hist, bin_edges = np.histogram(image.flat, nbins)\n bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.\n return hist, bin_centers\n\n\ndef cumulative_distribution(image, nbins=256):\n \"\"\"Return cumulative distribution function (cdf) for the given image.\n\n Parameters\n ----------\n image : array\n Image array.\n nbins : int\n Number of bins for image histogram.\n\n Returns\n -------\n img_cdf : array\n Values of cumulative distribution function.\n bin_centers : array\n Centers of bins.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Cumulative_distribution_function\n\n \"\"\"\n hist, bin_centers = histogram(image, nbins)\n img_cdf = hist.cumsum()\n img_cdf = img_cdf / float(img_cdf[-1])\n return img_cdf, bin_centers\n\n\n@deprecated('equalize_hist')\ndef equalize(image, nbins=256):\n return equalize_hist(image, nbins)\n\n\ndef equalize_hist(image, nbins=256):\n \"\"\"Return image after histogram equalization.\n\n Parameters\n ----------\n image : array\n Image array.\n nbins : int\n Number of bins for image histogram.\n\n Returns\n -------\n out : float array\n Image array after histogram equalization.\n\n Notes\n -----\n This function is adapted from [1]_ with the author's permission.\n\n References\n ----------\n .. [1] http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html\n .. [2] http://en.wikipedia.org/wiki/Histogram_equalization\n\n \"\"\"\n image = img_as_float(image)\n cdf, bin_centers = cumulative_distribution(image, nbins)\n out = np.interp(image.flat, bin_centers, cdf)\n return out.reshape(image.shape)\n\n\ndef rescale_intensity(image, in_range=None, out_range=None):\n \"\"\"Return image after stretching or shrinking its intensity levels.\n\n The image intensities are uniformly rescaled such that the minimum and\n maximum values given by `in_range` match those given by `out_range`.\n\n Parameters\n ----------\n image : array\n Image array.\n in_range : 2-tuple (float, float)\n Min and max *allowed* intensity values of input image. If None, the\n *allowed* min/max values are set to the *actual* min/max values in the\n input image.\n out_range : 2-tuple (float, float)\n Min and max intensity values of output image. If None, use the min/max\n intensities of the image data type. See `skimage.util.dtype` for\n details.\n\n Returns\n -------\n out : array\n Image array after rescaling its intensity. This image is the same dtype\n as the input image.\n\n Examples\n --------\n By default, intensities are stretched to the limits allowed by the dtype:\n\n >>> image = np.array([51, 102, 153], dtype=np.uint8)\n >>> rescale_intensity(image)\n array([ 0, 127, 255], dtype=uint8)\n\n It's easy to accidentally convert an image dtype from uint8 to float:\n\n >>> 1.0 * image\n array([ 51., 102., 153.])\n\n Use `rescale_intensity` to rescale to the proper range for float dtypes:\n\n >>> image_float = 1.0 * image\n >>> rescale_intensity(image_float)\n array([ 0. , 0.5, 1. ])\n\n To maintain the low contrast of the original, use the `in_range` parameter:\n\n >>> rescale_intensity(image_float, in_range=(0, 255))\n array([ 0.2, 0.4, 0.6])\n\n If the min/max value of `in_range` is more/less than the min/max image\n intensity, then the intensity levels are clipped:\n\n >>> rescale_intensity(image_float, in_range=(0, 102))\n array([ 0.5, 1. , 1. ])\n\n If you have an image with signed integers but want to rescale the image to\n just the positive range, use the `out_range` parameter:\n\n >>> image = np.array([-10, 0, 10], dtype=np.int8)\n >>> rescale_intensity(image, out_range=(0, 127))\n array([ 0, 63, 127], dtype=int8)\n\n \"\"\"\n dtype = image.dtype.type\n\n if in_range is None:\n imin = np.min(image)\n imax = np.max(image)\n else:\n imin, imax = in_range\n\n if out_range is None:\n omin, omax = dtype_range[dtype]\n if imin >= 0:\n omin = 0\n else:\n omin, omax = out_range\n\n image = np.clip(image, imin, imax)\n\n image = (image - imin) / float(imax - imin)\n return dtype(image * (omax - omin) + omin)\n\n\ndef _assert_non_negative(image):\n\n if np.any(image < 0):\n raise ValueError('Image Correction methods work correctly only on '\n 'images with non-negative values. Use '\n 'skimage.exposure.rescale_intensity.')\n\n\ndef adjust_gamma(image, gamma=1, gain=1):\n \"\"\"Performs Gamma Correction on the input image.\n\n Also known as Power Law Transform.\n This function transforms the input image pixelwise according to the\n equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n gamma : float\n Non negative real number. Default value is 1.\n gain : float\n The constant multiplier. Default value is 1.\n\n Returns\n -------\n out : ndarray\n Gamma corrected output image.\n\n Notes\n -----\n For gamma greater than 1, the histogram will shift towards left and\n the output image will be darker than the input image.\n\n For gamma less than 1, the histogram will shift towards right and\n the output image will be brighter than the input image.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gamma_correction\n\n \"\"\"\n _assert_non_negative(image)\n dtype = image.dtype.type\n\n if gamma < 0:\n return \"Gamma should be a non-negative real number\"\n\n scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\n\n out = ((image / scale) ** gamma) * scale * gain\n return dtype(out)\n\n\ndef adjust_log(image, gain=1, inv=False):\n \"\"\"Performs Logarithmic correction on the input image.\n\n This function transforms the input image pixelwise according to the\n equation ``O = gain*log(1 + I)`` after scaling each pixel to the range 0 to 1.\n For inverse logarithmic correction, the equation is ``O = gain*(2**I - 1)``.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n gain : float\n The constant multiplier. Default value is 1.\n inv : float\n If True, it performs inverse logarithmic correction,\n else correction will be logarithmic. Defaults to False.\n\n Returns\n -------\n out : ndarray\n Logarithm corrected output image.\n\n References\n ----------\n .. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf\n\n \"\"\"\n _assert_non_negative(image)\n dtype = image.dtype.type\n scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\n\n if inv:\n out = (2 ** (image / scale) - 1) * scale * gain\n return dtype(out)\n\n out = np.log2(1 + image / scale) * scale * gain\n return dtype(out)\n\n\ndef adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False):\n \"\"\"Performs Sigmoid Correction on the input image.\n\n Also known as Contrast Adjustment.\n This function transforms the input image pixelwise according to the\n equation ``O = 1/(1 + exp*(gain*(cutoff - I)))`` after scaling each pixel\n to the range 0 to 1.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n cutoff : float\n Cutoff of the sigmoid function that shifts the characteristic curve\n in horizontal direction. Default value is 0.5.\n gain : float\n The constant multiplier in exponential's power of sigmoid function.\n Default value is 10.\n inv : bool\n If True, returns the negative sigmoid correction. Defaults to False.\n\n Returns\n -------\n out : ndarray\n Sigmoid corrected output image.\n\n References\n ----------\n .. [1] Gustav J. Braun, \"Image Lightness Rescaling Using Sigmoidal Contrast\n Enhancement Functions\",\n http://www.cis.rit.edu/fairchild/PDFs/PAP07.pdf\n\n \"\"\"\n _assert_non_negative(image)\n dtype = image.dtype.type\n scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\n\n if inv:\n out = (1 - 1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale\n return dtype(out)\n\n out = (1 / (1 + np.exp(gain * (cutoff - image/scale)))) * scale\n return dtype(out)\n",
"import numpy as np\nfrom scipy import ndimage\n\n\n__all__ = ['gabor_kernel', 'gabor_filter']\n\n\ndef _sigma_prefactor(bandwidth):\n b = bandwidth\n # See http://www.cs.rug.nl/~imaging/simplecell.html\n return 1.0 / np.pi * np.sqrt(np.log(2)/2.0) * (2.0**b + 1) / (2.0**b - 1)\n\n\ndef gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None,\n offset=0):\n \"\"\"Return complex 2D Gabor filter kernel.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n g : complex array\n Complex filter kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n \"\"\"\n if sigma_x is None:\n sigma_x = _sigma_prefactor(bandwidth) / frequency\n if sigma_y is None:\n sigma_y = _sigma_prefactor(bandwidth) / frequency\n\n n_stds = 3\n x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),\n np.abs(n_stds * sigma_y * np.sin(theta)), 1))\n y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),\n np.abs(n_stds * sigma_x * np.sin(theta)), 1))\n y, x = np.mgrid[-y0:y0+1, -x0:x0+1]\n\n rotx = x * np.cos(theta) + y * np.sin(theta)\n roty = -x * np.sin(theta) + y * np.cos(theta)\n\n g = np.zeros(y.shape, dtype=np.complex)\n g[:] = np.exp(-0.5 * (rotx**2 / sigma_x**2 + roty**2 / sigma_y**2))\n g /= 2 * np.pi * sigma_x * sigma_y\n g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))\n\n return g\n\n\ndef gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,\n sigma_y=None, offset=0, mode='reflect', cval=0):\n \"\"\"Return real and imaginary responses to Gabor filter.\n\n The real and imaginary parts of the Gabor filter kernel are applied to the\n image and the response is returned as a pair of arrays.\n\n Frequency and orientation representations of the Gabor filter are similar\n to those of the human visual system. It is especially suitable for texture\n classification using Gabor filter banks.\n\n Parameters\n ----------\n image : array\n Input image.\n frequency : float\n Frequency of the harmonic function.\n theta : float\n Orientation in radians. If 0, the harmonic is in the x-direction.\n bandwidth : float\n The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`\n and `sigma_y` will decrease with increasing frequency. This value is\n ignored if `sigma_x` and `sigma_y` are set by the user.\n sigma_x, sigma_y : float\n Standard deviation in x- and y-directions. These directions apply to\n the kernel *before* rotation. If `theta = pi/2`, then the kernel is\n rotated 90 degrees so that `sigma_x` controls the *vertical* direction.\n offset : float, optional\n Phase offset of harmonic function in radians.\n\n Returns\n -------\n real, imag : arrays\n Filtered images using the real and imaginary parts of the Gabor filter\n kernel.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Gabor_filter\n .. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf\n\n \"\"\"\n\n g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)\n\n filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)\n filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)\n\n return filtered_real, filtered_imag\n",
"\"\"\"\n=========================\nFiltering regional maxima\n=========================\n\nHere, we use morphological reconstruction to create a background image, which\nwe can subtract from the original image to isolate bright features (regional\nmaxima).\n\nFirst we try reconstruction by dilation starting at the edges of the image. We\ninitialize a seed image to the minimum intensity of the image, and set its\nborder to be the pixel values in the original image. These maximal pixels will\nget dilated in order to reconstruct the background image.\n\n\"\"\"\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import img_as_float\nfrom skimage.morphology import reconstruction\n\n# Convert to float: Important for subtraction later which won't work with uint8\nimage = img_as_float(data.coins())\nimage = gaussian_filter(image, 1)\n\nseed = np.copy(image)\nseed[1:-1, 1:-1] = image.min()\nmask = image\n\ndilated = reconstruction(seed, mask, method='dilation')\n\n\"\"\"\nSubtracting the dilated image leaves an image with just the coins and a flat,\nblack background, as shown below.\n\"\"\"\n\nfig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 2.5))\n\nax1.imshow(image)\nax1.set_title('original image')\nax1.axis('off')\n\nax2.imshow(dilated, vmin=image.min(), vmax=image.max())\nax2.set_title('dilated')\nax2.axis('off')\n\nax3.imshow(image - dilated)\nax3.set_title('image - dilated')\nax3.axis('off')\n\nplt.tight_layout()\n\n\"\"\"\n\n.. image:: PLOT2RST.current_figure\n\nAlthough the features (i.e. the coins) are clearly isolated, the coins\nsurrounded by a bright background in the original image are dimmer in the\nsubtracted image. We can attempt to correct this using a different seed image.\n\nInstead of creating a seed image with maxima along the image border, we can use\nthe features of the image itself to seed the reconstruction process. Here, the\nseed image is the original image minus a fixed value, ``h``.\n\"\"\"\n\nh = 0.4\nseed = image - h\ndilated = reconstruction(seed, mask, method='dilation')\nhdome = image - dilated\n\n\"\"\"\nTo get a feel for the reconstruction process, we plot the intensity of the\nmask, seed, and dilated images along a slice of the image (indicated by red\nline).\n\"\"\"\n\nfig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 2.5))\n\nyslice = 197\n\nax1.plot(mask[yslice], '0.5', label='mask')\nax1.plot(seed[yslice], 'k', label='seed')\nax1.plot(dilated[yslice], 'r', label='dilated')\nax1.set_ylim(-0.2, 2)\nax1.set_title('image slice')\nax1.set_xticks([])\nax1.legend()\n\nax2.imshow(dilated, vmin=image.min(), vmax=image.max())\nax2.axhline(yslice, color='r', alpha=0.4)\nax2.set_title('dilated')\nax2.axis('off')\n\nax3.imshow(hdome)\nax3.axhline(yslice, color='r', alpha=0.4)\nax3.set_title('image - dilated')\nax3.axis('off')\n\nplt.tight_layout()\nplt.show()\n\n\"\"\"\n.. image:: PLOT2RST.current_figure\n\nAs you can see in the image slice, each coin is given a different baseline\nintensity in the reconstructed image; this is because we used the local\nintensity (shifted by ``h``) as a seed value. As a result, the coins in the\nsubtracted image have similar pixel intensities. The final result is known as\nthe h-dome of an image since this tends to isolate regional maxima of height\n``h``. This operation is particularly useful when your images are unevenly\nilluminated.\n\"\"\"\n"
] | [
[
"numpy.log2",
"numpy.nonzero",
"numpy.clip",
"numpy.min",
"numpy.issubdtype",
"numpy.max",
"numpy.interp",
"numpy.any",
"numpy.exp",
"numpy.histogram"
],
[
"numpy.log",
"numpy.imag",
"numpy.cos",
"numpy.sin",
"numpy.real",
"numpy.exp",
"numpy.zeros"
],
[
"matplotlib.pyplot.tight_layout",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.subplots",
"numpy.copy",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
daoran/opendr | [
"bca25f6a43244fe9c219a24576181f94a0726923",
"bca25f6a43244fe9c219a24576181f94a0726923",
"bca25f6a43244fe9c219a24576181f94a0726923"
] | [
"tests/sources/tools/perception/object_tracking_2d/deep_sort/test_object_tracking_2d_deep_sort.py",
"tests/sources/tools/perception/heart_anomaly_detection/attention_neural_bag_of_feature/test_attention_neural_bag_of_feature_learner.py",
"src/opendr/perception/object_detection_2d/datasets/transforms.py"
] | [
"# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\nimport shutil\nimport torch\nfrom opendr.perception.object_tracking_2d import ObjectTracking2DDeepSortLearner\nfrom opendr.perception.object_tracking_2d import (\n Market1501Dataset,\n Market1501DatasetIterator,\n)\nfrom opendr.perception.object_tracking_2d import (\n MotDataset,\n RawMotWithDetectionsDatasetIterator,\n)\nimport os\n\nDEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'\n\nprint(\"Using device:\", DEVICE)\nprint(\"Using device:\", DEVICE, file=sys.stderr)\n\n\ndef rmfile(path):\n try:\n os.remove(path)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\ndef rmdir(_dir):\n try:\n shutil.rmtree(_dir)\n except OSError as e:\n print(\"Error: %s - %s.\" % (e.filename, e.strerror))\n\n\nclass TestObjectTracking2DDeepSortLearner(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.temp_dir = os.path.join(\"tests\", \"sources\", \"tools\",\n \"perception\", \"object_tracking_2d\",\n \"deep_sort\",\n \"deep_sort_temp\")\n\n cls.train_split_paths = {\n \"nano_mot20\": os.path.join(\n \".\", \"src\", \"opendr\", \"perception\", \"object_tracking_2d\",\n \"datasets\", \"splits\", \"nano_mot20.train\"\n )\n }\n\n cls.model_names = [\n \"deep_sort\",\n ]\n\n cls.mot_dataset_path = MotDataset.download_nano_mot20(\n os.path.join(cls.temp_dir, \"mot_dataset\"), True\n ).path\n cls.market1501_dataset_path = Market1501Dataset.download_nano_market1501(\n os.path.join(cls.temp_dir, \"market1501_dataset\"), True\n ).path\n\n print(\"Dataset downloaded\", file=sys.stderr)\n\n for model_name in cls.model_names:\n ObjectTracking2DDeepSortLearner.download(\n model_name, cls.temp_dir\n )\n\n print(\"Models downloaded\", file=sys.stderr)\n\n @classmethod\n def tearDownClass(cls):\n # Clean up downloaded files\n\n rmdir(os.path.join(cls.temp_dir))\n\n def test_fit(self):\n\n def test_model(name):\n dataset = Market1501Dataset(self.market1501_dataset_path)\n\n learner = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n\n starting_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone()\n\n learner.fit(\n dataset,\n epochs=2,\n val_epochs=2,\n verbose=True,\n )\n new_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone()\n self.assertFalse(torch.equal(starting_param, new_param))\n\n print(\"Fit\", name, \"ok\", file=sys.stderr)\n\n for name in self.model_names:\n test_model(name)\n\n def test_fit_iterator(self):\n def test_model(name):\n dataset = Market1501DatasetIterator(\n os.path.join(self.market1501_dataset_path, \"bounding_box_train\"),\n )\n eval_dataset = Market1501DatasetIterator(\n os.path.join(self.market1501_dataset_path, \"bounding_box_test\"),\n )\n\n learner = ObjectTracking2DDeepSortLearner(\n checkpoint_after_iter=3,\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n\n starting_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone()\n\n learner.fit(\n dataset,\n epochs=2,\n val_dataset=eval_dataset,\n val_epochs=2,\n verbose=True,\n )\n new_param = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone()\n self.assertFalse(torch.equal(starting_param, new_param))\n\n print(\"Fit iterator\", name, \"ok\", file=sys.stderr)\n\n for name in self.model_names:\n test_model(name)\n\n def test_eval(self):\n def test_model(name):\n model_path = os.path.join(self.temp_dir, name)\n train_split_paths = {\n \"nano_mot20\": os.path.join(\n \".\", \"src\", \"opendr\", \"perception\", \"object_tracking_2d\",\n \"datasets\", \"splits\", \"nano_mot20.train\"\n )\n }\n\n dataset = RawMotWithDetectionsDatasetIterator(\n self.mot_dataset_path,\n train_split_paths\n )\n\n learner = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n learner.load(model_path, verbose=True)\n result = learner.eval(dataset)\n\n self.assertGreater(len(result[\"mota\"]), 0)\n\n for name in self.model_names:\n test_model(name)\n\n def test_infer(self):\n def test_model(name):\n model_path = os.path.join(self.temp_dir, name)\n train_split_paths = {\n \"nano_mot20\": os.path.join(\n \".\", \"src\", \"opendr\", \"perception\", \"object_tracking_2d\",\n \"datasets\", \"splits\", \"nano_mot20.train\"\n )\n }\n\n dataset = RawMotWithDetectionsDatasetIterator(\n self.mot_dataset_path,\n train_split_paths\n )\n\n learner = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n learner.load(model_path, verbose=True)\n result = learner.infer(dataset[0][0], 1)\n\n self.assertTrue(len(result) > 0)\n\n learner.reset()\n\n result = learner.infer([\n dataset[0][0],\n dataset[1][0],\n ])\n\n self.assertTrue(len(result) == 2)\n self.assertTrue(len(result[0]) > 0)\n\n for name in self.model_names:\n test_model(name)\n\n def test_save(self):\n def test_model(name):\n model_path = os.path.join(self.temp_dir, \"test_save_\" + name)\n save_path = os.path.join(model_path, \"save\")\n\n learner = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n\n learner.save(save_path, True)\n starting_param_1 = list(learner.tracker.deepsort.extractor.net.parameters())[0].clone()\n\n learner2 = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n learner2.load(save_path)\n\n new_param = list(learner2.tracker.deepsort.extractor.net.parameters())[0].clone()\n self.assertTrue(torch.equal(starting_param_1, new_param))\n\n for name in self.model_names:\n test_model(name)\n\n def test_optimize(self):\n def test_model(name):\n model_path = os.path.join(self.temp_dir, name)\n train_split_paths = {\n \"nano_mot20\": os.path.join(\n \".\", \"src\", \"opendr\", \"perception\", \"object_tracking_2d\",\n \"datasets\", \"splits\", \"nano_mot20.train\"\n )\n }\n\n dataset = RawMotWithDetectionsDatasetIterator(\n self.mot_dataset_path,\n train_split_paths\n )\n\n learner = ObjectTracking2DDeepSortLearner(\n temp_path=self.temp_dir,\n device=DEVICE,\n )\n learner.load(model_path, verbose=True)\n learner.optimize()\n result = learner.eval(dataset)\n\n self.assertGreater(len(result[\"mota\"]), 0)\n\n for name in self.model_names:\n test_model(name)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020-2022 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport torch\nimport tempfile\nimport numpy as np\nimport random\n\n# OpenDR imports\nfrom opendr.perception.heart_anomaly_detection import AttentionNeuralBagOfFeatureLearner\nfrom opendr.engine.datasets import DatasetIterator\nfrom opendr.engine.data import Timeseries\nfrom opendr.engine.target import Category\n\n\nclass DummyDataset(DatasetIterator):\n def __init__(self, in_channels, series_length, n_class, n_sample=4):\n super(DummyDataset, self).__init__()\n self.in_channels = in_channels\n self.series_length = series_length\n self.n_sample = n_sample\n self.n_class = n_class\n\n def __len__(self,):\n return self.n_sample\n\n def __getitem__(self, i):\n x = np.random.rand(self.in_channels, self.series_length)\n y = np.random.randint(low=0, high=self.n_class)\n return Timeseries(x), Category(y)\n\n\nclass TestAttentionNeuralBagOfFeatureLearner(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print(\"\\n\\n**********************************\\nTEST Attention Neural Bag-of-Feature Learner\\n\"\n \"**********************************\")\n pass\n\n @classmethod\n def tearDownClass(cls):\n return\n\n def test_fit(self):\n in_channels = random.choice([1, 2])\n series_length = random.choice([30 * 300, 40 * 300])\n n_class = np.random.randint(low=2, high=100)\n quantization_type = random.choice(['nbof', 'tnbof'])\n attention_type = random.choice(['spatial', 'temporal', 'spatialsa', 'temporalsa', 'spatiotemporal'])\n\n train_set = DummyDataset(in_channels, series_length, n_class)\n val_set = DummyDataset(in_channels, series_length, n_class)\n test_set = DummyDataset(in_channels, series_length, n_class)\n\n learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n iters=1,\n batch_size=4,\n test_mode=True)\n\n old_weight = list(learner.model.parameters())[0].clone()\n learner.fit(train_set, val_set, test_set, silent=True, verbose=False)\n new_weight = list(learner.model.parameters())[0].clone()\n\n self.assertFalse(torch.equal(old_weight, new_weight),\n msg=\"Model parameters did not change after running fit.\")\n\n def test_eval(self):\n in_channels = random.choice([1, 2])\n series_length = random.choice([30 * 300, 40 * 300])\n n_class = np.random.randint(low=2, high=100)\n quantization_type = random.choice(['nbof', 'tnbof'])\n attention_type = random.choice(['spatial', 'temporal', 'spatialsa', 'temporalsa', 'spatiotemporal'])\n\n learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n iters=1,\n batch_size=4,\n test_mode=True)\n\n dataset = DummyDataset(in_channels, series_length, n_class)\n performance = learner.eval(dataset, silent=True, verbose=False)\n\n self.assertTrue('cross_entropy' in performance.keys())\n self.assertTrue('acc' in performance.keys())\n self.assertTrue('precision' in performance.keys())\n self.assertTrue('recall' in performance.keys())\n self.assertTrue('f1' in performance.keys())\n\n def test_infer(self):\n in_channels = random.choice([1, 2])\n series_length = random.choice([30 * 300, 40 * 300])\n n_class = np.random.randint(low=2, high=100)\n quantization_type = random.choice(['nbof', 'tnbof'])\n attention_type = random.choice(['spatial', 'temporal', 'spatialsa', 'temporalsa', 'spatiotemporal'])\n\n learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n iters=1,\n batch_size=4,\n test_mode=True)\n\n series = Timeseries(np.random.rand(in_channels, series_length))\n pred = learner.infer(series)\n self.assertTrue(isinstance(pred, Category))\n self.assertTrue(pred.data < learner.n_class,\n msg=\"Predicted class label must be less than the number of class\")\n\n def test_save_load(self):\n in_channels = random.choice([1, 2])\n series_length = random.choice([30 * 300, 40 * 300])\n n_class = np.random.randint(low=2, high=100)\n quantization_type = random.choice(['nbof', 'tnbof'])\n attention_type = random.choice(['spatial', 'temporal', 'spatialsa', 'temporalsa', 'spatiotemporal'])\n\n learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n iters=1,\n batch_size=4,\n test_mode=True)\n\n temp_dir = tempfile.TemporaryDirectory()\n learner.save(temp_dir.name, verbose=False)\n\n new_learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n iters=1,\n batch_size=4,\n test_mode=True)\n\n new_learner.load(temp_dir.name, verbose=False)\n series = Timeseries(np.random.rand(in_channels, series_length))\n old_pred = learner.infer(series).confidence\n new_pred = new_learner.infer(series).confidence\n\n self.assertEqual(old_pred, new_pred)\n temp_dir.cleanup()\n\n def test_download(self):\n in_channels = 1\n series_length = 30 * 300\n n_class = 4\n quantization_type = 'nbof'\n attention_type = 'temporal'\n n_codeword = random.choice([256, 512])\n fold_idx = random.choice([0, 1, 2, 3, 4])\n\n learner = AttentionNeuralBagOfFeatureLearner(in_channels,\n series_length,\n n_class,\n quantization_type=quantization_type,\n attention_type=attention_type,\n n_codeword=n_codeword)\n\n temp_dir = tempfile.TemporaryDirectory()\n learner.download(temp_dir.name, fold_idx)\n learner.load(temp_dir.name)\n temp_dir.cleanup()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020-2022 OpenDR European Project\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport mxnet as mx\r\nimport gluoncv.data.transforms.image as timage\r\n\r\n\r\ndef np_to_mx(img_np):\r\n \"\"\"\r\n Convert numpy image to MXNet image.\r\n \"\"\"\r\n img_mx = mx.image.image.nd.from_numpy(np.float32(img_np))\r\n return img_mx\r\n\r\n\r\ndef bbox_to_np(bbox):\r\n \"\"\"\r\n BoundingBox to [xmin, ymin, xmax, ymax, conf, cls] numpy array.\r\n \"\"\"\r\n bbox_np = np.asarray([bbox.left, bbox.top, bbox.left + bbox.width, bbox.top + bbox.height, bbox.confidence, bbox.name])\r\n return bbox_np\r\n\r\n\r\nclass BoundingBoxListToNumpyArray:\r\n \"\"\"\r\n Transform object to convert OpenDR BoundingBoxList to numpy array of [[xmin, ymin, xmax, ymax, score, cls_id],...] format.\r\n \"\"\"\r\n def __call__(self, bbox_list):\r\n return np.asarray([bbox_to_np(bbox) for bbox in bbox_list.data])\r\n\r\n\r\nclass ImageToNDArrayTransform:\r\n \"\"\"\r\n Transform object to convert OpenDR Image to MXNext image.\r\n \"\"\"\r\n def __call__(self, img):\r\n return np_to_mx(img.data)\r\n\r\n\r\nclass ImageToNumpyArrayTransform:\r\n \"\"\"\r\n Transform object to convert OpenDR Image to Numpy array.\r\n \"\"\"\r\n def __call__(self, img):\r\n return img.data\r\n\r\n\r\nclass ResizeImageAndBoxesTransform:\r\n \"\"\"\r\n Resizes a numpy image and corresponding bounding boxes to fit the given dimensions.\r\n \"\"\"\r\n def __init__(self, w, h):\r\n self.w = w\r\n self.h = h\r\n\r\n def __call__(self, img, labels):\r\n h, w, _ = img.shape\r\n w_r = self.w / w\r\n h_r = self.h / h\r\n img = cv2.resize(img, (self.w, self.h), interpolation=cv2.INTER_LINEAR)\r\n labels[:, 0] *= w_r\r\n labels[:, 2] *= w_r\r\n labels[:, 1] *= h_r\r\n labels[:, 3] *= h_r\r\n return img, labels\r\n\r\n\r\ndef transform_test_resize(imgs, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), w=640, h=480):\r\n \"\"\"\r\n Function adapted from gluoncv.data.transforms.presets.ssd, resizes the image to a preset size.\r\n :param imgs:\r\n :type imgs:\r\n :param mean:\r\n :type mean:\r\n :param std:\r\n :type std:\r\n :param w: Desired width of the output tensor.\r\n :type w: int\r\n :param h: Desired height of the output tensor.\r\n :type h: int\r\n :return:\r\n :rtype:\r\n \"\"\"\r\n if isinstance(imgs, mx.nd.NDArray):\r\n imgs = [imgs]\r\n for im in imgs:\r\n assert isinstance(im, mx.nd.NDArray), \"Expect NDArray, got {}\".format(type(im))\r\n\r\n tensors = []\r\n origs = []\r\n for img in imgs:\r\n img = timage.imresize(img, w, h)\r\n orig_img = img.asnumpy().astype('uint8')\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=mean, std=std)\r\n tensors.append(img.expand_dims(0))\r\n origs.append(orig_img)\r\n if len(tensors) == 1:\r\n return tensors[0], origs[0]\r\n return tensors, origs\r\n\r\n\r\ndef transform_test(imgs, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):\r\n \"\"\"\r\n Function dapted from gluoncv.data.transforms.presets.ssd, normalizes and converts image to tensor.\r\n :param imgs:\r\n :type imgs:\r\n :param mean:\r\n :type mean:\r\n :param std:\r\n :type std:\r\n :return:\r\n :rtype:\r\n \"\"\"\r\n if isinstance(imgs, mx.nd.NDArray):\r\n imgs = [imgs]\r\n for im in imgs:\r\n assert isinstance(im, mx.nd.NDArray), \"Expect NDArray, got {}\".format(type(im))\r\n\r\n tensors = []\r\n origs = []\r\n for img in imgs:\r\n orig_img = img.asnumpy().astype('uint8')\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=mean, std=std)\r\n tensors.append(img.expand_dims(0))\r\n origs.append(orig_img)\r\n if len(tensors) == 1:\r\n return tensors[0], origs[0]\r\n return tensors, origs\r\n\r\n\r\ndef pad_test(img, min_size=512):\r\n h_pad_size = 0\r\n min_dim = 2 + np.argmin([img.shape[2:4]])\r\n img_padded = img\r\n if img.shape[min_dim] < min_size:\r\n h_pad_size = int((min_size - img.shape[min_dim]) / 2.0)\r\n if min_dim == 2:\r\n img_padded = mx.nd.pad(img, mode=\"constant\", constant_value=0,\r\n pad_width=(0, 0, 0, 0, h_pad_size,\r\n h_pad_size, 0, 0))\r\n else:\r\n img_padded = mx.nd.pad(img, mode=\"constant\", constant_value=0,\r\n pad_width=(0, 0, 0, 0, 0, 0,\r\n h_pad_size, h_pad_size))\r\n return img_padded\r\n"
] | [
[
"torch.equal"
],
[
"numpy.random.rand",
"torch.equal",
"numpy.random.randint"
],
[
"numpy.asarray",
"numpy.argmin",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maxiaoba/rlk | [
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d",
"3e23473f6bbc59552b6b2bcd97245e024d7ca95d"
] | [
"tests/DifferentialGame/masac_gnn_gaussian.py",
"tests/Simple/SupLstm/visualize_policy.py",
"tests/MultiDifferentialGame/r2g_gnn11_share_gaussian.py",
"tests/Particle/coma_gaussian.py",
"tests/Cartpole/r2g_gnn12_share_gaussian.py",
"rlkit/torch/policies/tanh_policy.py",
"tests/ParticleGNN/r2g_gnn11_gaussian.py",
"tests/RoboSumo/visualize_against.py",
"tests/Traffic/traffic/scenarios/t_intersection_multi.py",
"tests/Traffic/sup_sep_softmax_lstm_policy.py",
"tests/Traffic/traffic/actions/xy_accel_action.py",
"rlkit/torch/vpg/ppo_sup_vanilla.py",
"rlkit/torch/masac/masac_discrete.py",
"rlkit/data_management/simple_replay_buffer.py",
"rlkit/torch/r2g/r2g_gnn12.py",
"tests/Traffic/traffic/scenarios/t_intersection_multi_2.py"
] | [
"import copy\nimport torch.nn as nn\nfrom rlkit.launchers.launcher_util import setup_logger\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.ma_eval_util import get_generic_ma_path_information\n\ndef experiment(variant):\n num_agent = variant['num_agent']\n from differential_game import DifferentialGame\n expl_env = DifferentialGame(game_name=args.exp_name)\n eval_env = DifferentialGame(game_name=args.exp_name)\n obs_dim = eval_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n from rlkit.torch.networks.graph_builders import FullGraphBuilder\n graph_builder1 = FullGraphBuilder(\n input_node_dim=obs_dim+action_dim,\n num_node=num_agent,\n contain_self_loop=False)\n from rlkit.torch.networks.gnn_networks import GNNNet\n gnn1 = GNNNet(\n graph_builder1,\n node_dim=variant['qf_kwargs']['hidden_dim'],\n conv_type=variant['qf_kwargs']['conv_type'],\n num_conv_layers=1,\n hidden_activation='relu',\n output_activation='relu',\n )\n qf1 = nn.Sequential(\n gnn1,\n nn.Linear(variant['qf_kwargs']['hidden_dim'],1)\n )\n target_qf1 = copy.deepcopy(qf1)\n\n from rlkit.torch.networks.graph_builders import FullGraphBuilder\n graph_builder2 = FullGraphBuilder(\n input_node_dim=obs_dim+action_dim,\n num_node=num_agent,\n contain_self_loop=False)\n from rlkit.torch.networks.gnn_networks import GNNNet\n gnn2 = GNNNet(\n graph_builder2,\n node_dim=variant['qf_kwargs']['hidden_dim'],\n conv_type=variant['qf_kwargs']['conv_type'],\n num_conv_layers=1,\n hidden_activation='relu',\n output_activation='relu',\n )\n qf2 = nn.Sequential(\n gnn2,\n nn.Linear(variant['qf_kwargs']['hidden_dim'],1)\n )\n target_qf2 = copy.deepcopy(qf2)\n\n policy_n, eval_policy_n, expl_policy_n = [], [], []\n for i in range(num_agent):\n from rlkit.torch.networks.layers import SplitLayer\n policy = nn.Sequential(\n nn.Linear(obs_dim,variant['policy_kwargs']['hidden_dim']),\n nn.ReLU(),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],variant['policy_kwargs']['hidden_dim']),\n nn.ReLU(),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\n policy = TanhGaussianPolicy(module=policy)\n from rlkit.torch.policies.make_deterministic import MakeDeterministic\n eval_policy = MakeDeterministic(policy)\n from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy\n if variant['random_exploration']:\n from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy\n expl_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0),\n policy=policy,\n )\n else:\n expl_policy = policy\n\n policy_n.append(policy)\n eval_policy_n.append(eval_policy)\n expl_policy_n.append(expl_policy)\n\n from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector\n eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n)\n expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n)\n\n from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer\n replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent)\n\n from rlkit.torch.masac.masac_gnn import MASACGNNTrainer\n trainer = MASACGNNTrainer(\n env = expl_env,\n qf1=qf1,\n target_qf1=target_qf1,\n qf2=qf2,\n target_qf2=target_qf2,\n policy_n=policy_n,\n **variant['trainer_kwargs']\n )\n\n from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n log_path_function=get_generic_ma_path_information,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', type=str, default='zero_sum')\n parser.add_argument('--log_dir', type=str, default='MASACGNNGaussian')\n parser.add_argument('--conv', type=str, default='GSage')\n parser.add_argument('--hidden', type=int, default=16)\n parser.add_argument('--oa', action='store_true', default=False) # online action\n parser.add_argument('--snl', action='store_true', default=False) # sum n loss\n parser.add_argument('--re', action='store_true', default=False) # random exploration\n parser.add_argument('--alpha', type=float, default=None) # init alpha\n parser.add_argument('--fa', action='store_true', default=False) # fix alpha\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--bs', type=int, default=None)\n parser.add_argument('--epoch', type=int, default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--snapshot_mode', type=str, default=\"gap_and_last\")\n parser.add_argument('--snapshot_gap', type=int, default=500)\n args = parser.parse_args()\n import os.path as osp\n pre_dir = './Data/'+args.exp_name\n main_dir = args.log_dir\\\n +args.conv\\\n +('hidden'+str(args.hidden))\\\n +('oa' if args.oa else '')\\\n +('snl' if args.snl else '')\\\n +('re' if args.re else '')\\\n +(('alpha'+str(args.alpha)) if args.alpha else '')\\\n +('fa' if args.fa else '')\\\n +(('lr'+str(args.lr)) if args.lr else '')\\\n +(('bs'+str(args.bs)) if args.bs else '')\n log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n # noinspection PyTypeChecker\n variant = dict(\n num_agent=2,\n random_exploration=args.re,\n algorithm_kwargs=dict(\n num_epochs=(args.epoch if args.epoch else 100),\n num_eval_steps_per_epoch=100,\n num_trains_per_train_loop=100,\n num_expl_steps_per_train_loop=100,\n min_num_steps_before_training=100,\n max_path_length=100,\n batch_size=(args.bs if args.bs else 256),\n ),\n trainer_kwargs=dict(\n use_soft_update=True,\n tau=1e-2,\n discount=0.99,\n qf_learning_rate=(args.lr if args.lr else 1e-3),\n policy_learning_rate=(args.lr if args.lr else 1e-4),\n online_action=args.oa,\n sum_n_loss=args.snl,\n init_alpha=(args.alpha if args.alpha else 1.),\n use_automatic_entropy_tuning=(not args.fa),\n ),\n qf_kwargs=dict(\n conv_type=args.conv,\n hidden_dim=args.hidden,\n ),\n policy_kwargs=dict(\n hidden_dim=args.hidden,\n ),\n replay_buffer_size=int(1E6),\n )\n import os\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n with open(osp.join(log_dir,'variant.json'),'w') as out_json:\n import json\n json.dump(variant,out_json,indent=2)\n import sys\n cmd_input = 'python ' + ' '.join(sys.argv) + '\\n'\n with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:\n f.write(cmd_input)\n setup_logger(args.exp_name+'/'+main_dir, variant=variant,\n snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,\n log_dir=log_dir)\n import numpy as np\n import torch\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n # ptu.set_gpu_mode(True) # optionally set the GPU (default=False)\n experiment(variant)\n",
"import torch\nimport numpy as np\nimport time\nimport pdb\nfrom rlkit.torch.core import eval_np, np_ify\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='SimpleSupLSTM')\nparser.add_argument('--extra_name', type=str, default='obs1int10')\nparser.add_argument('--log_dir', type=str, default='SupLSTMlayer1hidden16')\nparser.add_argument('--file', type=str, default='params')\nparser.add_argument('--epoch', type=int, default=None)\nparser.add_argument('--seed', type=int, default=0)\nargs = parser.parse_args()\n\npre_dir = './Data/'+args.exp_name+args.extra_name\nimport os\ndata_path = '{}/{}/seed{}/{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.file)\ndata = torch.load(data_path,map_location='cpu')\n\npolicy = data['trainer/policy']\n# from rlkit.torch.policies.make_deterministic import MakeDeterministic\n# policy = MakeDeterministic(policy)\n\nif 'trainer/sup_learner' in data.keys():\n\tsup_learner = data['trainer/sup_learner']\nelse:\n\tsup_learner = None\n\nimport sys\nimport json\nwith open('{}/{}/seed{}/variant.json'.format(pre_dir,args.log_dir,args.seed)) as f:\n variant = json.load(f)\nfrom simple_sup_lstm import SimpleSupLSTMEnv\nenv = SimpleSupLSTMEnv(**variant['env_kwargs'])\no = env.reset()\npolicy.reset()\n\nmax_path_length = 10\npath_length = 0\ndone = False\nc_r = 0.\nwhile True:\n\tpath_length += 1\n\ta, agent_info = policy.get_action(o)\n\to, r, done, env_info = env.step(a)\n\n\tif sup_learner:\n\t\tintentions = eval_np(sup_learner, o[None,:])\n\telif hasattr(policy, 'sup_prob'):\n\t\tintentions = eval_np(policy.sup_prob, o[None,:])[0]\n\telse:\n\t\tintentions = None\n\n\tc_r += r\n\tprint(\"step: \",path_length)\n\tprint(\"intentions: \",intentions)\n\tprint(\"a: \",a)\n\tprint(\"env_info: \",env_info)\n\tprint('r: ',r)\n\tprint(done)\n\t# pdb.set_trace()\n\ttime.sleep(0.1)\n\tif path_length > max_path_length or done:\n\t\tprint('c_r: ',c_r)\n\t\tpath_length = 0\n\t\tdone = False\n\t\tc_r = 0.\n\t\tpdb.set_trace()\n\t\to = env.reset()\n\t\tpolicy.reset()",
"import copy\nimport torch.nn as nn\nfrom rlkit.launchers.launcher_util import setup_logger\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.ma_eval_util import get_generic_ma_path_information\n\ndef experiment(variant):\n from multi_differential_game import MultiDifferentialGame\n expl_env = MultiDifferentialGame(**variant['env_kwargs'])\n eval_env = MultiDifferentialGame(**variant['env_kwargs'])\n num_agent = expl_env.agent_num\n obs_dim = eval_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n from rlkit.torch.networks.graph_builders import FullGraphBuilder\n graph_builder_obs = FullGraphBuilder(\n input_node_dim=obs_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n graph_builder_eval = FullGraphBuilder(\n input_node_dim=graph_builder_obs.output_node_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n if variant['concat_emb']:\n gnn_out_dim = int(obs_dim + variant['graph_kwargs']['node_dim']*variant['graph_kwargs']['num_conv_layers'])\n else:\n gnn_out_dim = variant['graph_kwargs']['node_dim']\n from rlkit.torch.networks.networks import FlattenMlp\n post_mlp1 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n from rlkit.torch.networks.graph_r2g_qnet import R2GQNet\n qf1 = R2GQNet(\n obs_graph_builder=graph_builder_obs,\n eval_graph_builder=graph_builder_eval,\n obs_dim=obs_dim,\n action_dim=action_dim, \n post_mlp=post_mlp1,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n **variant['graph_kwargs'],\n )\n target_qf1 = copy.deepcopy(qf1)\n\n post_mlp2 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n from rlkit.torch.networks.graph_r2g_qnet import R2GQNet\n qf2 = R2GQNet(\n obs_graph_builder=graph_builder_obs,\n eval_graph_builder=graph_builder_eval,\n obs_dim=obs_dim,\n action_dim=action_dim, \n post_mlp=post_mlp2,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n **variant['graph_kwargs'],\n )\n target_qf2 = copy.deepcopy(qf2)\n\n graph_builder_ca = FullGraphBuilder(\n input_node_dim=obs_dim+action_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n from rlkit.torch.networks.gnn_networks import GNNNet\n cgca = GNNNet(\n graph_builder_ca,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n **variant['graph_kwargs'],\n )\n from rlkit.torch.networks.networks import FlattenMlp\n from rlkit.torch.networks.layers import SplitLayer\n from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\n cactor = nn.Sequential(\n cgca,\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['cactor_kwargs']['hidden_dim'],\n hidden_sizes=[variant['cactor_kwargs']['hidden_dim']]*(variant['cactor_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n nn.LeakyReLU(negative_slope=0.2),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n cactor = TanhGaussianPolicy(module=cactor)\n\n graph_builder_policy = FullGraphBuilder(\n input_node_dim=obs_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n shared_gnn = GNNNet(\n graph_builder_policy,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n **variant['graph_kwargs'],\n )\n policy_n, expl_policy_n, eval_policy_n = [], [], []\n for i in range(num_agent):\n policy = nn.Sequential(\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['policy_kwargs']['hidden_dim'],\n hidden_sizes=[variant['policy_kwargs']['hidden_dim']]*(variant['policy_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n policy = TanhGaussianPolicy(module=policy)\n from rlkit.torch.policies.make_deterministic import MakeDeterministic\n eval_policy = MakeDeterministic(policy)\n if variant['random_exploration']:\n from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy\n from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy\n expl_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0),\n policy=policy,\n )\n else:\n expl_policy = policy\n \n policy_n.append(policy)\n expl_policy_n.append(expl_policy)\n eval_policy_n.append(eval_policy)\n \n from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector\n eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n, shared_encoder=shared_gnn)\n expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n, shared_encoder=shared_gnn)\n\n from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer\n replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent)\n\n from rlkit.torch.r2g.r2g_gnn11 import R2GGNNTrainer\n trainer = R2GGNNTrainer(\n env=expl_env,\n qf1=qf1,\n target_qf1=target_qf1,\n qf2=qf2,\n target_qf2=target_qf2,\n cactor=cactor,\n policy_n=policy_n,\n shared_gnn=shared_gnn,\n **variant['trainer_kwargs']\n )\n\n from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n log_path_function=get_generic_ma_path_information,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n # save init params\n from rlkit.core import logger\n snapshot = algorithm._get_snapshot()\n file_name = osp.join(logger._snapshot_dir, 'itr_-1.pkl')\n torch.save(snapshot, file_name)\n\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', type=str, default='zero_sum')\n parser.add_argument('--num_ag', type=int, default=2)\n parser.add_argument('--gpu', type=int, default=None)\n parser.add_argument('--log_dir', type=str, default='R2GGNN11ShareGaussian')\n parser.add_argument('--conv', type=str, default='GSage')\n parser.add_argument('--layer', type=int, default=2)\n parser.add_argument('--hidden', type=int, default=16)\n parser.add_argument('--glayer', type=int, default=2)\n parser.add_argument('--hnode', type=int, default=16)\n parser.add_argument('--sc', action='store_true', default=False) # skip connection between gnn layers\n parser.add_argument('--ceb', action='store_true', default=False) # concat gnn embeddings from each layer\n parser.add_argument('--ce', action='store_true', default=False) # cactor entropy\n parser.add_argument('--er', action='store_true', default=False) # entropy reward\n parser.add_argument('--re', action='store_true', default=False) # random exploration\n parser.add_argument('--alpha', type=float, default=None) # init alpha\n parser.add_argument('--fa', action='store_true', default=False) # fix alpha\n parser.add_argument('--dcig', action='store_true', default=False) # deterministic cactor in graph\n parser.add_argument('--dna', action='store_true', default=False) # deterministic next action\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--bs', type=int, default=None)\n parser.add_argument('--epoch', type=int, default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--snapshot_mode', type=str, default=\"gap_and_last\")\n parser.add_argument('--snapshot_gap', type=int, default=100)\n args = parser.parse_args()\n import os.path as osp\n pre_dir = './Data/'+args.exp_name+'_p'+str(args.num_ag)\n main_dir = args.log_dir\\\n +args.conv\\\n +('layer'+str(args.layer))\\\n +('hidden'+str(args.hidden))\\\n +('glayer'+str(args.glayer))\\\n +('hnode'+str(args.hnode))\\\n +('sc' if args.sc else '')\\\n +('ceb' if args.ceb else '')\\\n +('ce' if args.ce else '')\\\n +('er' if args.er else '')\\\n +('re' if args.re else '')\\\n +(('alpha'+str(args.alpha)) if args.alpha else '')\\\n +('fa' if args.fa else '')\\\n +('dcig' if args.dcig else '')\\\n +('dna' if args.dna else '')\\\n +(('lr'+str(args.lr)) if args.lr else '')\\\n +(('bs'+str(args.bs)) if args.bs else '')\n log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n # noinspection PyTypeChecker\n variant = dict(\n random_exploration=args.re,\n env_kwargs=dict(\n game_name=args.exp_name,\n agent_num=args.num_ag,\n ),\n algorithm_kwargs=dict(\n num_epochs=(args.epoch+1 if args.epoch else 101),\n num_eval_steps_per_epoch=100,\n num_trains_per_train_loop=100*args.num_ag,\n num_expl_steps_per_train_loop=100*args.num_ag,\n min_num_steps_before_training=100*args.num_ag,\n max_path_length=100,\n batch_size=(args.bs if args.bs else 256),\n ),\n trainer_kwargs=dict(\n use_soft_update=True,\n tau=1e-2,\n discount=0.99,\n qf_learning_rate=(args.lr if args.lr else 1e-3),\n cactor_learning_rate=(args.lr if args.lr else 1e-4),\n policy_learning_rate=(args.lr if args.lr else 1e-4),\n use_entropy_loss=True,\n use_entropy_reward=args.er,\n use_cactor_entropy_loss=args.ce,\n init_alpha=(args.alpha if args.alpha else 1.),\n use_automatic_entropy_tuning=(not args.fa),\n deterministic_cactor_in_graph=args.dcig,\n deterministic_next_action=args.dna,\n ),\n graph_kwargs=dict(\n conv_type=args.conv,\n node_dim=args.hnode,\n num_conv_layers=args.glayer,\n skip_connect=args.sc,\n ),\n concat_emb=args.ceb,\n qf_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n cactor_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n policy_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n replay_buffer_size=int(1E6),\n )\n import os\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n with open(osp.join(log_dir,'variant.json'),'w') as out_json:\n import json\n json.dump(variant,out_json,indent=2)\n import sys\n cmd_input = 'python ' + ' '.join(sys.argv) + '\\n'\n with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:\n f.write(cmd_input)\n setup_logger(args.exp_name+'/'+main_dir, variant=variant,\n snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,\n log_dir=log_dir)\n import numpy as np\n import torch\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if isinstance(args.gpu, int):\n print('using gpu ',args.gpu)\n ptu.set_gpu_mode(True, gpu_id=args.gpu)\n experiment(variant)\n",
"import copy\nimport torch.nn as nn\nfrom rlkit.launchers.launcher_util import setup_logger\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.ma_eval_util import get_generic_ma_path_information\n\ndef experiment(variant):\n import sys\n sys.path.append(\"./multiagent-particle-envs\")\n from make_env import make_env\n from particle_env_wrapper import ParticleEnv\n expl_env = ParticleEnv(make_env(args.exp_name,discrete_action_space=False,world_args=variant['world_args']))\n eval_env = ParticleEnv(make_env(args.exp_name,discrete_action_space=False,world_args=variant['world_args']))\n num_agent = expl_env.num_agent\n obs_dim = eval_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n policy_n, qf_n = [], []\n policy_optimizer_n, qf_optimizer_n = None, None\n for i in range(num_agent):\n from rlkit.torch.networks.networks import FlattenMlp\n from rlkit.torch.networks.layers import SplitLayer\n policy = nn.Sequential(\n FlattenMlp(input_size=obs_dim,\n output_size=variant['policy_kwargs']['hidden_dim'],\n hidden_sizes=[variant['policy_kwargs']['hidden_dim']]*(variant['policy_kwargs']['num_layer']-1),\n ),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\n policy = TanhGaussianPolicy(module=policy,return_raw_action=True)\n \n qf = FlattenMlp(\n input_size=(obs_dim*num_agent+action_dim*num_agent),\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*variant['qf_kwargs']['num_layer'],\n )\n\n policy_n.append(policy)\n qf_n.append(qf)\n\n from rlkit.torch.policies.make_deterministic import MakeDeterministic\n eval_policy_n = [MakeDeterministic(policy) for policy in policy_n]\n expl_policy_n = policy_n\n\n from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector\n eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n)\n expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n, collect_raw_actions=True)\n\n from rlkit.torch.coma.coma import COMATrainer\n trainer = COMATrainer(\n env = expl_env,\n policy_n=policy_n,\n qf_n=qf_n,\n policy_optimizer_n=policy_optimizer_n,\n qf_optimizer_n=qf_optimizer_n,\n **variant['trainer_kwargs']\n )\n\n from rlkit.torch.torch_rl_algorithm import TorchOnlineRLAlgorithm\n algorithm = TorchOnlineRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n log_path_function=get_generic_ma_path_information,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', type=str, default='simple')\n parser.add_argument('--boundary', action='store_true', default=False)\n parser.add_argument('--num_ag', type=int, default=None)\n parser.add_argument('--num_adv', type=int, default=None)\n parser.add_argument('--num_l', type=int, default=None)\n parser.add_argument('--mpl', type=int, default=25) # max path length\n parser.add_argument('--log_dir', type=str, default='COMAGaussian')\n parser.add_argument('--layer', type=int, default=2)\n parser.add_argument('--hidden', type=int, default=64)\n parser.add_argument('--mc', type=int, default=1)\n parser.add_argument('--em', type=str, default=None) # entropy method\n parser.add_argument('--ec', type=float, default=0.) # entropy coefficient\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--bs', type=int, default=None)\n parser.add_argument('--epoch', type=int, default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--snapshot_mode', type=str, default=\"gap_and_last\")\n parser.add_argument('--snapshot_gap', type=int, default=500)\n args = parser.parse_args()\n import os.path as osp\n pre_dir = './Data/'+args.exp_name\\\n +('bd' if args.boundary else '')\\\n +(('ag'+str(args.num_ag)) if args.num_ag else '')\\\n +(('adv'+str(args.num_adv)) if args.num_adv else '')\\\n +(('l'+str(args.num_l)) if args.num_l else '')\\\n +'_mpl'+str(args.mpl)\n main_dir = args.log_dir\\\n +('layer'+str(args.layer))\\\n +('hidden'+str(args.hidden))\\\n +('mc'+str(args.mc))\\\n +(('em'+str(args.em)+'ec'+str(args.ec)) if args.em else '')\\\n +(('lr'+str(args.lr)) if args.lr else '')\\\n +(('bs'+str(args.bs)) if args.bs else '')\n log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n # noinspection PyTypeChecker\n variant = dict(\n world_args=dict(\n num_agents=args.num_ag,\n num_adversaries=args.num_adv,\n num_landmarks=args.num_l,\n boundary=([[-1.,-1.],[1.,1.]] if args.boundary else None)\n ),\n algorithm_kwargs=dict(\n num_epochs=(args.epoch+1 if args.epoch else 1001),\n num_eval_steps_per_epoch=1000,\n num_trains_per_train_loop=1,\n num_expl_steps_per_train_loop=1000,\n max_path_length=args.mpl,\n ),\n trainer_kwargs=dict(\n discount=0.99,\n qf_learning_rate=(args.lr if args.lr else 1e-3),\n policy_learning_rate=(args.lr if args.lr else 1e-4),\n max_path_length=args.mpl,\n entropy_method=args.em,\n policy_ent_coeff=args.ec,\n batch_size=(args.bs if args.bs else 256),\n mc_num=args.mc,\n ),\n qf_kwargs=dict(\n num_layer=args.layer,\n hidden_dim=args.hidden,\n ),\n policy_kwargs=dict(\n num_layer=args.layer,\n hidden_dim=args.hidden,\n ),\n )\n import os\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n with open(osp.join(log_dir,'variant.json'),'w') as out_json:\n import json\n json.dump(variant,out_json,indent=2)\n import sys\n cmd_input = 'python ' + ' '.join(sys.argv) + '\\n'\n with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:\n f.write(cmd_input)\n setup_logger(args.exp_name+'/'+main_dir, variant=variant,\n snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,\n log_dir=log_dir,text_log_file=None)\n import numpy as np\n import torch\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n # ptu.set_gpu_mode(True) # optionally set the GPU (default=False)\n experiment(variant)\n",
"import copy\nimport torch.nn as nn\nfrom rlkit.launchers.launcher_util import setup_logger\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.ma_eval_util import get_generic_ma_path_information\n\ndef experiment(variant):\n from cartpole import CartPoleEnv\n expl_env = CartPoleEnv(mode=3)\n eval_env = CartPoleEnv(mode=3)\n num_agent = expl_env.num_agents\n obs_dim = eval_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n from rlkit.torch.networks.graph_builders import FullGraphBuilder\n graph_builder_obs = FullGraphBuilder(\n input_node_dim=obs_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n from rlkit.torch.networks.gnn_networks import GNNNet\n obs_gnn_1 = GNNNet(\n graph_builder_obs,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n **variant['graph_kwargs'],\n )\n\n graph_builder_eval = FullGraphBuilder(\n input_node_dim=graph_builder_obs.output_node_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n if variant['concat_emb']:\n gnn_out_dim = int(obs_dim + variant['graph_kwargs']['node_dim']*variant['graph_kwargs']['num_conv_layers'])\n else:\n gnn_out_dim = variant['graph_kwargs']['node_dim']\n from rlkit.torch.networks.networks import FlattenMlp\n post_mlp1 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n from rlkit.torch.networks.graph_r2g_qnet2 import R2GQNet\n qf1 = R2GQNet(\n obs_gnn=obs_gnn_1,\n pre_graph_builder=graph_builder_eval,\n obs_dim=obs_dim,\n action_dim=action_dim, \n post_mlp=post_mlp1,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n **variant['graph_kwargs'],\n )\n target_qf1 = copy.deepcopy(qf1)\n\n obs_gnn_2 = GNNNet(\n graph_builder_obs,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n **variant['graph_kwargs'],\n )\n post_mlp2 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*(variant['qf_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n qf2 = R2GQNet(\n obs_gnn=obs_gnn_2,\n pre_graph_builder=graph_builder_eval,\n obs_dim=obs_dim,\n action_dim=action_dim, \n post_mlp=post_mlp2,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n **variant['graph_kwargs'],\n )\n target_qf2 = copy.deepcopy(qf2)\n\n graph_builder_ca = FullGraphBuilder(\n input_node_dim=obs_dim+action_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n from rlkit.torch.networks.gnn_networks import GNNNet\n cgca = GNNNet(\n graph_builder_ca,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n **variant['graph_kwargs'],\n )\n from rlkit.torch.networks.networks import FlattenMlp\n from rlkit.torch.networks.layers import SplitLayer\n from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\n cactor = nn.Sequential(\n cgca,\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['cactor_kwargs']['hidden_dim'],\n hidden_sizes=[variant['cactor_kwargs']['hidden_dim']]*(variant['cactor_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n nn.LeakyReLU(negative_slope=0.2),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n cactor = TanhGaussianPolicy(module=cactor)\n\n graph_builder_policy = FullGraphBuilder(\n input_node_dim=obs_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n policy_n, expl_policy_n, eval_policy_n = [], [], []\n for i in range(num_agent):\n policy = nn.Sequential(\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['policy_kwargs']['hidden_dim'],\n hidden_sizes=[variant['policy_kwargs']['hidden_dim']]*(variant['policy_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n policy = TanhGaussianPolicy(module=policy)\n from rlkit.torch.policies.make_deterministic import MakeDeterministic\n eval_policy = MakeDeterministic(policy)\n if variant['random_exploration']:\n from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy\n from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy\n expl_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0),\n policy=policy,\n )\n else:\n expl_policy = policy\n \n policy_n.append(policy)\n expl_policy_n.append(expl_policy)\n eval_policy_n.append(eval_policy)\n \n from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector\n eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n, shared_encoder=obs_gnn_1)\n expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n, shared_encoder=obs_gnn_1)\n\n from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer\n replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent)\n\n from rlkit.torch.r2g.r2g_gnn12 import R2GGNNTrainer\n trainer = R2GGNNTrainer(\n env=expl_env,\n qf1=qf1,\n target_qf1=target_qf1,\n qf2=qf2,\n target_qf2=target_qf2,\n cactor=cactor,\n policy_n=policy_n,\n **variant['trainer_kwargs']\n )\n\n from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n log_path_function=get_generic_ma_path_information,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n # save init params\n from rlkit.core import logger\n snapshot = algorithm._get_snapshot()\n file_name = osp.join(logger._snapshot_dir, 'itr_-1.pkl')\n torch.save(snapshot, file_name)\n\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', type=str, default='Cartpole')\n parser.add_argument('--mpl', type=int, default=100) # max path length\n parser.add_argument('--gpu', type=int, default=None)\n parser.add_argument('--log_dir', type=str, default='R2GGNN12ShareGaussian')\n parser.add_argument('--conv', type=str, default='GSage')\n parser.add_argument('--layer', type=int, default=2)\n parser.add_argument('--hidden', type=int, default=64)\n parser.add_argument('--glayer', type=int, default=2)\n parser.add_argument('--hnode', type=int, default=32)\n parser.add_argument('--sc', action='store_true', default=False) # skip connection between gnn layers\n parser.add_argument('--ceb', action='store_true', default=False) # concat gnn embeddings from each layer\n parser.add_argument('--ce', action='store_true', default=False) # cactor entropy\n parser.add_argument('--er', action='store_true', default=False) # entropy reward\n parser.add_argument('--re', action='store_true', default=False) # random exploration\n parser.add_argument('--alpha', type=float, default=None) # init alpha\n parser.add_argument('--fa', action='store_true', default=False) # fix alpha\n parser.add_argument('--dcig', action='store_true', default=False) # deterministic cactor in graph\n parser.add_argument('--dna', action='store_true', default=False) # deterministic next action\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--cg', type=float, default=None)\n parser.add_argument('--bs', type=int, default=None)\n parser.add_argument('--epoch', type=int, default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--snapshot_mode', type=str, default=\"gap_and_last\")\n parser.add_argument('--snapshot_gap', type=int, default=100)\n args = parser.parse_args()\n import os.path as osp\n pre_dir = './Data/'+args.exp_name+'_mpl'+str(args.mpl)\n main_dir = args.log_dir\\\n +args.conv\\\n +('layer'+str(args.layer))\\\n +('hidden'+str(args.hidden))\\\n +('glayer'+str(args.glayer))\\\n +('hnode'+str(args.hnode))\\\n +('sc' if args.sc else '')\\\n +('ceb' if args.ceb else '')\\\n +('ce' if args.ce else '')\\\n +('er' if args.er else '')\\\n +('re' if args.re else '')\\\n +(('alpha'+str(args.alpha)) if args.alpha else '')\\\n +('fa' if args.fa else '')\\\n +('dcig' if args.dcig else '')\\\n +('dna' if args.dna else '')\\\n +(('lr'+str(args.lr)) if args.lr else '')\\\n +(('cg'+str(args.cg)) if args.cg else '')\\\n +(('bs'+str(args.bs)) if args.bs else '')\n log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n # noinspection PyTypeChecker\n variant = dict(\n random_exploration=args.re,\n algorithm_kwargs=dict(\n num_epochs=(args.epoch if args.epoch else 200),\n num_eval_steps_per_epoch=200,\n num_trains_per_train_loop=200,\n num_expl_steps_per_train_loop=200,\n min_num_steps_before_training=200,\n max_path_length=args.mpl,\n batch_size=(args.bs if args.bs else 256),\n ),\n trainer_kwargs=dict(\n use_soft_update=True,\n tau=1e-2,\n discount=0.99,\n qf_learning_rate=(args.lr if args.lr else 1e-3),\n cactor_learning_rate=(args.lr if args.lr else 1e-4),\n policy_learning_rate=(args.lr if args.lr else 1e-4),\n clip_gradient=(args.cg if args.cg else 0.),\n use_entropy_loss=True,\n use_entropy_reward=args.er,\n use_cactor_entropy_loss=args.ce,\n init_alpha=(args.alpha if args.alpha else 1.),\n use_automatic_entropy_tuning=(not args.fa),\n deterministic_cactor_in_graph=args.dcig,\n deterministic_next_action=args.dna,\n ),\n graph_kwargs=dict(\n conv_type=args.conv,\n node_dim=args.hnode,\n num_conv_layers=args.glayer,\n skip_connect=args.sc,\n ),\n concat_emb=args.ceb,\n qf_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n cactor_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n policy_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n replay_buffer_size=int(1E6),\n )\n import os\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n with open(osp.join(log_dir,'variant.json'),'w') as out_json:\n import json\n json.dump(variant,out_json,indent=2)\n import sys\n cmd_input = 'python ' + ' '.join(sys.argv) + '\\n'\n with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:\n f.write(cmd_input)\n setup_logger(args.exp_name+'/'+main_dir, variant=variant,\n snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,\n log_dir=log_dir)\n import numpy as np\n import torch\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if isinstance(args.gpu, int):\n print('using gpu ',args.gpu)\n ptu.set_gpu_mode(True, gpu_id=args.gpu)\n experiment(variant)\n",
"import numpy as np\nimport torch\nfrom torch import nn as nn\n\nfrom rlkit.policies.base import Policy\nfrom rlkit.torch.core import eval_np\nfrom rlkit.torch.distributions.tanh_normal import TanhNormal\n\nclass TanhPolicy(Policy, nn.Module):\n\n def __init__(\n self,\n module,\n return_raw_action=False,\n ):\n super().__init__()\n self.module = module\n self.return_raw_action = return_raw_action\n\n def get_action(self, obs_np):\n if self.return_raw_action:\n actions, raw_actions = self.get_actions(obs_np[None])\n return actions[0, :], {'raw_action':raw_actions[0,:]}\n else:\n actions = self.get_actions(obs_np[None])\n return actions[0, :], {}\n\n def get_actions(self, obs_np):\n if self.return_raw_action:\n with torch.no_grad():\n actions, info = self.forward(torch.tensor(obs_np).float(), return_info=True)\n raw_actions = info['preactivation']\n return np.array(actions), np.array(raw_actions)\n else:\n return eval_np(self, obs_np)\n\n def forward(\n self,\n obs,\n return_info=False,\n ):\n \"\"\"\n :param obs: Observation\n :param return_info: If True, return info\n \"\"\"\n pre_tanh_value = self.module(obs)\n action = torch.tanh(pre_tanh_value)\n\n info = dict(\n preactivation=pre_tanh_value,\n )\n\n if return_info:\n return action, info\n else:\n return action\n",
"import copy\nimport torch.nn as nn\nfrom rlkit.launchers.launcher_util import setup_logger\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.ma_eval_util import get_generic_ma_path_information\n\ndef experiment(variant):\n import sys\n sys.path.append(\"./particle-graph-envs\")\n from make_env import make_env\n expl_env = make_env(args.exp_name,discrete_action_space=False,world_args=variant['world_args'])\n eval_env = make_env(args.exp_name,discrete_action_space=False,world_args=variant['world_args'])\n num_agent = expl_env.num_agents\n obs_dim = eval_env.observation_space.low.size\n action_dim = eval_env.action_space.low.size\n\n from particle_graph import ParticleGraphBuilder\n graph_builder_obs = ParticleGraphBuilder(\n num_agents=expl_env.scenario.num_agents,\n num_landmarks=expl_env.scenario.num_landmarks,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n append_action=False,\n contain_self_loop=False,\n )\n from rlkit.torch.networks.graph_builders import FullGraphBuilder\n graph_builder_eval = FullGraphBuilder(\n input_node_dim=graph_builder_obs.output_node_dim,\n num_node=num_agent,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n contain_self_loop=False)\n if variant['concat_emb']:\n gnn_out_dim = int(graph_builder_obs.output_node_dim + variant['graph_kwargs']['node_dim']*variant['graph_kwargs']['num_conv_layers'])\n else:\n gnn_out_dim = variant['graph_kwargs']['node_dim']\n from rlkit.torch.networks.networks import FlattenMlp\n post_mlp1 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*variant['qf_kwargs']['num_layer'],\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n from rlkit.torch.networks.graph_r2g_qnet import R2GQNet\n qf1 = R2GQNet(\n obs_graph_builder=graph_builder_obs,\n eval_graph_builder=graph_builder_eval,\n obs_dim=graph_builder_obs.output_node_dim,\n action_dim=action_dim, \n post_mlp=post_mlp1,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n num_conv_layers=variant['q_num_conv_layers'],\n **variant['graph_kwargs'],\n )\n target_qf1 = copy.deepcopy(qf1)\n\n post_mlp2 = FlattenMlp(input_size=gnn_out_dim,\n output_size=1,\n hidden_sizes=[variant['qf_kwargs']['hidden_dim']]*variant['qf_kwargs']['num_layer'],\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n )\n from rlkit.torch.networks.graph_r2g_qnet import R2GQNet\n qf2 = R2GQNet(\n obs_graph_builder=graph_builder_obs,\n eval_graph_builder=graph_builder_eval,\n obs_dim=graph_builder_obs.output_node_dim,\n action_dim=action_dim, \n post_mlp=post_mlp2,\n normalize_emb=False,\n output_activation=None,\n concat_emb=variant['concat_emb'],\n num_conv_layers=variant['q_num_conv_layers'],\n **variant['graph_kwargs'],\n )\n target_qf2 = copy.deepcopy(qf2)\n print('qf parameter num: ',sum(p.numel() for p in qf1.parameters() if p.requires_grad))\n\n graph_builder_ca = ParticleGraphBuilder(\n num_agents=expl_env.scenario.num_agents,\n num_landmarks=expl_env.scenario.num_landmarks,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n append_action=True,\n contain_self_loop=False,\n )\n from rlkit.torch.networks.gnn_networks import GNNNet\n from rlkit.torch.networks.layers import SelectLayer\n cgca = nn.Sequential(\n GNNNet(\n graph_builder_ca,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n num_conv_layers=variant['ca_num_conv_layers'],\n **variant['graph_kwargs'],\n ),\n SelectLayer(dim=1, index=torch.arange(num_agent)),\n )\n from rlkit.torch.networks.layers import SplitLayer\n from rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\n cactor = nn.Sequential(\n cgca,\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['cactor_kwargs']['hidden_dim'],\n hidden_sizes=[variant['cactor_kwargs']['hidden_dim']]*(variant['cactor_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n nn.LeakyReLU(negative_slope=0.2),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n cactor = TanhGaussianPolicy(module=cactor)\n print('ca parameter num: ',sum(p.numel() for p in cactor.parameters() if p.requires_grad))\n\n graph_builder_policy = ParticleGraphBuilder(\n num_agents=expl_env.scenario.num_agents,\n num_landmarks=expl_env.scenario.num_landmarks,\n batch_size=variant['algorithm_kwargs']['batch_size'],\n append_action=False,\n contain_self_loop=False,\n )\n\n policy_n, expl_policy_n, eval_policy_n = [], [], []\n for agent in range(num_agent):\n gnn_policy = GNNNet(\n graph_builder_policy,\n hidden_activation='lrelu0.2',\n output_activation='lrelu0.2',\n num_conv_layers=variant['p_num_conv_layers'],\n **variant['graph_kwargs'],\n )\n from rlkit.torch.networks.layers import SplitLayer, FlattenLayer\n policy = nn.Sequential(\n gnn_policy,\n SelectLayer(dim=1, index=agent),\n FlattenLayer(),\n FlattenMlp(input_size=variant['graph_kwargs']['node_dim'],\n output_size=variant['policy_kwargs']['hidden_dim'],\n hidden_sizes=[variant['policy_kwargs']['hidden_dim']]*(variant['policy_kwargs']['num_layer']-1),\n hidden_activation=nn.LeakyReLU(negative_slope=0.2),\n output_activation=nn.LeakyReLU(negative_slope=0.2),\n ),\n SplitLayer(layers=[nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim),\n nn.Linear(variant['policy_kwargs']['hidden_dim'],action_dim)])\n )\n policy = TanhGaussianPolicy(module=policy)\n from rlkit.torch.policies.make_deterministic import MakeDeterministic\n eval_policy = MakeDeterministic(policy)\n if variant['random_exploration']:\n from rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy\n from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy\n expl_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=EpsilonGreedy(expl_env.action_space, prob_random_action=1.0),\n policy=policy,\n )\n else:\n expl_policy = policy\n \n policy_n.append(policy)\n expl_policy_n.append(expl_policy)\n eval_policy_n.append(eval_policy)\n \n from rlkit.samplers.data_collector.ma_path_collector import MAMdpPathCollector\n eval_path_collector = MAMdpPathCollector(eval_env, eval_policy_n, shared_obs=True)\n expl_path_collector = MAMdpPathCollector(expl_env, expl_policy_n, shared_obs=True)\n\n from rlkit.data_management.ma_env_replay_buffer import MAEnvReplayBuffer\n replay_buffer = MAEnvReplayBuffer(variant['replay_buffer_size'], expl_env, num_agent=num_agent, shared_obs=True)\n\n from rlkit.torch.r2g.r2g_gnn11 import R2GGNNTrainer\n trainer = R2GGNNTrainer(\n env=expl_env,\n qf1=qf1,\n target_qf1=target_qf1,\n qf2=qf2,\n target_qf2=target_qf2,\n cactor=cactor,\n policy_n=policy_n,\n shared_obs=True,\n shared_gnn=None,\n **variant['trainer_kwargs']\n )\n\n from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n log_path_function=get_generic_ma_path_information,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n # save init params\n from rlkit.core import logger\n snapshot = algorithm._get_snapshot()\n file_name = osp.join(logger._snapshot_dir, 'itr_-1.pkl')\n torch.save(snapshot, file_name)\n\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_name', type=str, default='simple')\n parser.add_argument('--boundary', action='store_true', default=False)\n parser.add_argument('--num_ag', type=int, default=None)\n parser.add_argument('--num_adv', type=int, default=None)\n parser.add_argument('--num_l', type=int, default=None)\n parser.add_argument('--ar', type=float, default=None) # agent radius\n parser.add_argument('--mpl', type=int, default=25) # max path length\n parser.add_argument('--gpu', type=int, default=None)\n parser.add_argument('--log_dir', type=str, default='R2GGNN11Gaussian')\n parser.add_argument('--conv', type=str, default='GSage')\n parser.add_argument('--layer', type=int, default=2)\n parser.add_argument('--hidden', type=int, default=64)\n parser.add_argument('--pglayer', type=int, default=2)\n parser.add_argument('--caglayer', type=int, default=2)\n parser.add_argument('--qglayer', type=int, default=2)\n parser.add_argument('--hnode', type=int, default=32)\n parser.add_argument('--sc', action='store_true', default=False) # skip connection between gnn layers\n parser.add_argument('--ceb', action='store_true', default=False) # concat gnn embeddings from each layer\n parser.add_argument('--dka', action='store_true', default=False) # detach k1 actions in policy training\n parser.add_argument('--ce', action='store_true', default=False) # cactor entropy\n parser.add_argument('--er', action='store_true', default=False) # entropy reward\n parser.add_argument('--re', action='store_true', default=False) # random exploration\n parser.add_argument('--alpha', type=float, default=None) # init alpha\n parser.add_argument('--fa', action='store_true', default=False) # fix alpha\n parser.add_argument('--dcig', action='store_true', default=False) # deterministic cactor in graph\n parser.add_argument('--dna', action='store_true', default=False) # deterministic next action\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--cg', type=float, default=None)\n parser.add_argument('--bs', type=int, default=None)\n parser.add_argument('--epoch', type=int, default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--snapshot_mode', type=str, default=\"gap_and_last\")\n parser.add_argument('--snapshot_gap', type=int, default=100)\n args = parser.parse_args()\n import os.path as osp\n pre_dir = './Data/'+args.exp_name\\\n +('bd' if args.boundary else '')\\\n +(('ag'+str(args.num_ag)) if args.num_ag else '')\\\n +(('adv'+str(args.num_adv)) if args.num_adv else '')\\\n +(('l'+str(args.num_l)) if args.num_l else '')\\\n +(('ar'+str(args.ar)) if args.ar else '')\\\n +'_mpl'+str(args.mpl)\n main_dir = args.log_dir\\\n +args.conv\\\n +('layer'+str(args.layer))\\\n +('hidden'+str(args.hidden))\\\n +('pglayer'+str(args.pglayer))\\\n +('caglayer'+str(args.caglayer))\\\n +('qglayer'+str(args.qglayer))\\\n +('hnode'+str(args.hnode))\\\n +('sc' if args.sc else '')\\\n +('ceb' if args.ceb else '')\\\n +('dka' if args.dka else '')\\\n +('ce' if args.ce else '')\\\n +('er' if args.er else '')\\\n +('re' if args.re else '')\\\n +(('alpha'+str(args.alpha)) if args.alpha else '')\\\n +('fa' if args.fa else '')\\\n +('dcig' if args.dcig else '')\\\n +('dna' if args.dna else '')\\\n +(('lr'+str(args.lr)) if args.lr else '')\\\n +(('cg'+str(args.cg)) if args.cg else '')\\\n +(('bs'+str(args.bs)) if args.bs else '')\n log_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n # noinspection PyTypeChecker\n variant = dict(\n random_exploration=args.re,\n world_args=dict(\n num_agents=args.num_ag,\n num_adversaries=args.num_adv,\n num_landmarks=args.num_l,\n agent_size=args.ar,\n boundary=([[-1.,-1.],[1.,1.]] if args.boundary else None),\n shared_obs=True,\n ),\n algorithm_kwargs=dict(\n num_epochs=(args.epoch+1 if args.epoch else 1001),\n num_eval_steps_per_epoch=1000,\n num_trains_per_train_loop=1000,\n num_expl_steps_per_train_loop=1000,\n min_num_steps_before_training=1000,\n max_path_length=args.mpl,\n batch_size=(args.bs if args.bs else 256),\n ),\n trainer_kwargs=dict(\n use_soft_update=True,\n tau=1e-2,\n discount=0.99,\n qf_learning_rate=(args.lr if args.lr else 1e-3),\n cactor_learning_rate=(args.lr if args.lr else 1e-4),\n policy_learning_rate=(args.lr if args.lr else 1e-4),\n clip_gradient=(args.cg if args.cg else 0.),\n use_entropy_loss=True,\n use_entropy_reward=args.er,\n use_cactor_entropy_loss=args.ce,\n init_alpha=(args.alpha if args.alpha else 1.),\n use_automatic_entropy_tuning=(not args.fa),\n detach_k_action=args.dka,\n deterministic_cactor_in_graph=args.dcig,\n deterministic_next_action=args.dna,\n ),\n graph_kwargs=dict(\n conv_type=args.conv,\n node_dim=args.hnode,\n skip_connect=args.sc,\n ),\n p_num_conv_layers=args.pglayer,\n ca_num_conv_layers=args.caglayer,\n q_num_conv_layers=args.qglayer,\n concat_emb=args.ceb,\n qf_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n cactor_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n policy_kwargs=dict(\n hidden_dim=args.hidden,\n num_layer=args.layer,\n ),\n replay_buffer_size=int(1E6),\n )\n import os\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n with open(osp.join(log_dir,'variant.json'),'w') as out_json:\n import json\n json.dump(variant,out_json,indent=2)\n import sys\n cmd_input = 'python ' + ' '.join(sys.argv) + '\\n'\n with open(osp.join(log_dir, 'cmd_input.txt'), 'a') as f:\n f.write(cmd_input)\n setup_logger(args.exp_name+'/'+main_dir, variant=variant,\n snapshot_mode=args.snapshot_mode, snapshot_gap=args.snapshot_gap,\n log_dir=log_dir)\n import numpy as np\n import torch\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if isinstance(args.gpu, int):\n print('using gpu ',args.gpu)\n ptu.set_gpu_mode(True, gpu_id=args.gpu)\n experiment(variant)\n",
"import torch\nimport numpy as np\nimport time\nimport pdb\nfrom rlkit.torch.policies.tanh_gaussian_policy import TanhGaussianPolicy\nfrom rlkit.torch.policies.make_deterministic import MakeDeterministic\nfrom rlkit.torch.policies.gumbel_softmax_policy import GumbelSoftmaxMlpPolicy\nfrom rlkit.policies.argmax import ArgmaxDiscretePolicy\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='Ant') # 'Ant', 'Bug', 'Spider'\nparser.add_argument('--rs', type=float, default=0.01)\nparser.add_argument('--mpl', type=int, default=100) # max path length\nparser.add_argument('--p1', type=str, default='MASAC')\nparser.add_argument('--p2', type=str, default='PRG')\nparser.add_argument('--epoch', type=int, default=None)\nparser.add_argument('--rand', action='store_true', default=False)\nparser.add_argument('--seed', type=str, default=0)\nargs = parser.parse_args()\n\npre_dir = './Data/'+args.exp_name+'_rs'+str(args.rs)+'_mpl'+str(args.mpl)\n\npolicy_n = []\n\nif args.epoch:\n data_path1 = '{}/{}/seed{}/itr_{}.pkl'.format(pre_dir,args.p1,args.seed,args.epoch)\n data_path2 = '{}/{}/seed{}/itr_{}.pkl'.format(pre_dir,args.p2,args.seed,args.epoch)\nelse:\n data_path1 = '{}/{}/seed{}/params.pkl'.format(pre_dir,args.p1,args.seed)\n data_path2 = '{}/{}/seed{}/params.pkl'.format(pre_dir,args.p2,args.seed)\n\ndata1 = torch.load(data_path1,map_location='cpu')\npolicy_n1 = data1['trainer/policy_n']\nif not args.rand:\n if isinstance(policy_n1[0],TanhGaussianPolicy):\n policy_n1 = [MakeDeterministic(policy) for policy in policy_n1]\n elif isinstance(policy_n1[0],GumbelSoftmaxMlpPolicy):\n policy_n1 = [ArgmaxDiscretePolicy(policy,use_preactivation=True) for policy in policy_n1]\n\ndata2 = torch.load(data_path2,map_location='cpu')\npolicy_n2 = data2['trainer/policy_n']\nif not args.rand:\n if isinstance(policy_n2[0],TanhGaussianPolicy):\n policy_n2 = [MakeDeterministic(policy) for policy in policy_n2]\n elif isinstance(policy_n2[0],GumbelSoftmaxMlpPolicy):\n policy_n2 = [ArgmaxDiscretePolicy(policy,use_preactivation=True) for policy in policy_n2]\n\nimport gym\nimport robosumo.envs\nfrom robosumo_env_wrapper import RoboSumoEnv\nworld_args=dict(\n reward_scale=args.rs,\n)\nenv = RoboSumoEnv(gym.make('RoboSumo-{}-vs-{}-v0'.format(args.exp_name,args.exp_name)),\n **world_args)\no_n = env.reset()\nenv.render('human')\nnum_agent = env.num_agent\nfor agent in env.agents:\n agent._adjust_z = -0.5\n\nmax_path_length = args.mpl\npath_length = 0\ndone = np.array([False]*num_agent)\nc_r = np.zeros(num_agent)\nwhile True:\n path_length += 1\n a1, _ = policy_n1[0].get_action(o_n[0])\n a2, _ = policy_n2[1].get_action(o_n[1])\n # a1 = np.zeros_like(a1)\n # a2 = np.zeros_like(a1)\n a_n = [a1, a2]\n o_n, r_n, done, _ = env.step(a_n)\n c_r += r_n\n env.render('human')\n print(\"step: \",path_length)\n print(\"a: \",a_n)\n # print(\"o: \",o_n)\n # print('r: ',r_n)\n print(done)\n time.sleep(0.1)\n if path_length > max_path_length or done.all():\n print('c_r: ',c_r)\n path_length = 0\n done = np.array([False]*num_agent)\n c_r = np.zeros(num_agent)\n o_n = env.reset()\n pdb.set_trace()\n\n",
"import random\nimport itertools\nimport numpy as np\nfrom gym import spaces\n\nfrom traffic.traffic_env import TrafficEnv\nfrom traffic.road import Road, RoadSegment\nfrom traffic.car import Car\nfrom traffic.drivers.driver import Driver, XYSeperateDriver\nfrom traffic.drivers.oned_drivers import IDMDriver, PDDriver\nfrom traffic.actions.trajectory_accel_action import TrajectoryAccelAction\nfrom traffic.constants import *\n\nclass YNYDriver(XYSeperateDriver):\n def __init__(self, yld=True, t1=1.0, t2=0., \n s_min=0., v_min=0.5,\n v_yld=3., v_nyld=6., \n s_yld=3., s_nyld=0.5,\n **kwargs):\n self.yld = yld\n self.t1 = t1\n self.t2 = t2\n self.s_min = s_min\n self.v_min = v_min\n self.v_yld = v_yld\n self.v_nyld = v_nyld\n self.s_yld = s_yld\n self.s_nyld = s_nyld\n self.intention = 0 # 0: noraml drive; 1: yield 2: not yield\n super(YNYDriver, self).__init__(**kwargs)\n\n def set_yld(self, yld):\n self.yld = yld\n\n def observe(self, cars, road):\n s = cars[0].position[0] - self.car.position[0]\n s = s * self.x_driver.direction\n t = self.car.get_distance(cars[0],1)\n ego_vy = cars[0].velocity[1] * np.sign(self.car.position[1]-cars[0].position[1])\n # print(\"t: \",t, self.t1, self.t2)\n if self.yld:\n self.x_driver.set_v_des(self.v_yld)\n self.x_driver.s_des = self.s_yld\n else:\n self.x_driver.set_v_des(self.v_nyld)\n self.x_driver.s_des = self.s_nyld\n if (s < self.s_min) or (t > self.t1)\\\n or ((ego_vy <= self.v_min) and (t > self.t2)): # normal drive\n self.x_driver.observe(cars[1:], road)\n self.intention = 0\n else:\n if self.yld: # yield\n self.x_driver.min_overlap = self.t1\n self.x_driver.observe(cars, road)\n self.intention = 1\n else: # not yield\n self.x_driver.observe(cars[1:], road)\n self.intention = 2\n\n self.y_driver.observe(cars, road)\n\n def setup_render(self, viewer):\n if self.yld:\n self.car._color = [*GREEN_COLORS[0],0.5]\n else:\n self.car._color = [*RED_COLORS[0],0.5]\n self.car._arr_color = [0.8, 0.8, 0.8, 0.5]\n\n def update_render(self, camera_center):\n if self.yld:\n self.car._color = [*GREEN_COLORS[0],0.5]\n else:\n self.car._color = [*RED_COLORS[0],0.5]\n self.car._arr_color = [0.8, 0.8, 0.8, 0.5]\n\nclass EgoTrajectory:\n def xy_to_traj(self, pos):\n x, y = pos[0], pos[1]\n r = 6 # 4.5\n if y < 0.:\n s = y\n t = -x\n theta = np.pi/2\n curv = 0.\n elif x > r:\n s = r*np.pi/2. + x - r\n t = y - r\n theta = 0.\n curv = 0.\n else:\n theta = np.arctan2(r-x ,y)\n curv = 1./r\n s = r*(np.pi/2.-theta)\n t = np.sqrt((r-x)**2+(y)**2) - r\n\n return s, t, theta, curv\n\n def traj_to_xy(self, pos):\n s, t = pos[0], pos[1]\n r = 6 # 4.5\n if s < 0.:\n x = -t\n y = s\n theta = np.pi/2\n curv = 0.\n elif s > r*np.pi/2.:\n x = r + s - r*np.pi/2.\n y = r + t\n theta = 0.\n curv = 0.\n else:\n theta = np.pi/2 - s/r\n curv = 1./r\n x = r - (r+t)*np.sin(theta)\n y = (r+t)*np.cos(theta)\n\n return x, y, theta, curv\n\nclass EgoDriver(Driver):\n def __init__(self, \n trajectory=None, \n v_des=0.0,\n t_des=0.0,\n k_s_p=2.0,\n k_t_p=2.0,\n k_t_d=2.0,\n sigma=0.0, \n as_max=3.0,\n at_max=3.0,\n as_max_safe=6.0,\n at_max_safe=6.0,\n concern_distance=1.0,\n safe_distance=0.5,\n safe_speed=1.0,\n **kwargs):\n\n self.trajectory = trajectory\n self.v_des = v_des\n self.t_des = t_des\n self.k_s_p = k_s_p\n self.k_t_p = k_t_p\n self.k_t_d = k_t_d\n self.as_max = as_max\n self.at_max = at_max\n self.as_max_safe = as_max_safe\n self.at_max_safe = at_max_safe\n\n self.a_s = None\n self.a_t = None\n super(EgoDriver, self).__init__(**kwargs)\n # np.sqrt(self.car.length**2+self.car.width**2)/2\n self.concern_distance = concern_distance\n self.safe_distance = safe_distance\n self.safe_speed = safe_speed\n self.k_d_safe = 5.0\n self.k_v_safe = 5.0 # 2.0\n\n def set_trajectory(self, trajectory):\n self.trajectory = trajectory\n\n def observe(self, cars, road):\n s, t, theta, curv = self.trajectory.xy_to_traj(self.car.position)\n v_x, v_y = self.car.velocity[0], self.car.velocity[1]\n v_s = v_x*np.cos(theta) + v_y*np.sin(theta)\n v_t = -v_x*np.sin(theta) + v_y*np.cos(theta)\n\n self.a_s = self.k_s_p*(self.v_des-v_s)\n self.a_t = self.k_t_p*(self.t_des-t) - self.k_t_d*v_t\n self.a_s = np.clip(self.a_s,-self.as_max,self.as_max)\n self.a_t = np.clip(self.a_t,-self.at_max,self.at_max)\n\n # safety check\n a_x_safe = 0.\n a_y_safe = 0.\n unsafe = False\n for cid, car in enumerate(cars):\n if car is self.car:\n continue\n else:\n p1, p2 = self.car.get_closest_points(car)\n distance = np.linalg.norm(p1-p2)\n direction = (p1-p2)/distance\n v_rel = self.car.velocity - car.velocity\n speed_rel = np.sum(v_rel * direction)\n # print(distance)\n if distance < self.concern_distance:\n if distance < self.safe_distance:\n unsafe = True\n elif speed_rel < -self.safe_speed:\n unsafe = True\n if unsafe:\n # print('unsafe!')\n self.a_s = -self.k_v_safe * v_s\n self.a_t = -self.k_v_safe * v_t\n self.a_s = np.clip(self.a_s,-self.as_max_safe,self.as_max_safe)\n self.a_t = np.clip(self.a_t,-self.at_max_safe,self.at_max_safe)\n\n def get_action(self):\n return TrajectoryAccelAction(self.a_s, self.a_t, self.trajectory)\n\nclass TIntersectionMulti(TrafficEnv):\n def __init__(self,\n yld=0.5,\n observe_mode='full',\n label_mode='full',\n normalize_obs=False,\n vs_actions=[0.,0.5,3.],\n t_actions=[0.],\n desire_speed=3.,\n driver_sigma = 0.,\n speed_cost=0.01,\n t_cost=0.01,\n control_cost=0.01,\n collision_cost=2.,\n outroad_cost=2.,\n survive_reward=0.01,\n goal_reward=2.,\n road=Road([RoadSegment([(-100.,0.),(100.,0.),(100.,8.),(-100.,8.)]),\n RoadSegment([(-2,-10.),(2,-10.),(2,0.),(-2,0.)])]),\n left_bound = -20.,\n right_bound = 20.,\n gap_min = 3.,\n gap_max = 10.,\n max_veh_num = 12,\n num_updates=1,\n dt=0.1,\n **kwargs):\n\n self.yld = yld\n self.observe_mode = observe_mode\n self.label_mode = label_mode\n self.normalize_obs = normalize_obs\n self.vs_actions = vs_actions\n self.t_actions = t_actions\n # we use target value instead of target change so system is Markovian\n self.rl_actions = list(itertools.product(vs_actions,t_actions))\n self.num_updates = num_updates\n\n self.desire_speed = desire_speed\n self.speed_cost = speed_cost\n self.t_cost = t_cost\n self.control_cost = control_cost\n self.collision_cost = collision_cost\n self.outroad_cost = outroad_cost\n self.survive_reward = survive_reward\n self.goal_reward = goal_reward\n self._collision = False\n self._outroad = False\n self._goal = False\n self._intentions = []\n\n self.left_bound = left_bound\n self.right_bound = right_bound\n self.gap_min = gap_min\n self.gap_max = gap_max\n self.max_veh_num = max_veh_num\n self.label_dim = 2\n self.label_num = self.max_veh_num\n\n self.car_length=5.0\n self.car_width=2.0\n self.car_max_accel=10.0\n self.car_max_speed=40.0\n self.car_expose_level=4\n self.driver_sigma = driver_sigma\n self.s_des = 3.0\n self.s_min = 3.0\n self.min_overlap = 1.0\n\n super(TIntersectionMulti, self).__init__(\n road=road,\n cars=[],\n drivers=[],\n dt=dt,\n **kwargs,)\n\n def get_sup_labels(self):\n labels = np.array([np.nan]*self.label_num)\n i = 0\n upper_indices, lower_indices = self.get_sorted_indices()\n for indx in lower_indices:\n labels[i] = int(self._drivers[indx].yld)\n i += 1\n i = int(self.max_veh_num/2)\n for indx in upper_indices:\n labels[i] = int(self._drivers[indx].yld)\n i += 1\n return labels\n\n def update(self, action):\n # recorder intentios at the begining\n self._sup_labels = self.get_sup_labels()\n\n rl_action = self.rl_actions[action]\n self._drivers[0].v_des = rl_action[0]\n self._drivers[0].t_des = rl_action[1]\n\n self._goal = False\n self._collision = False\n self._outroad = False\n for _ in range(self.num_updates):\n for driver in self._drivers:\n driver.observe(self._cars, self._road)\n self._actions = [driver.get_action() for driver in self._drivers]\n [action.update(car, self.dt) for (car, action) in zip(self._cars, self._actions)]\n\n ego_car = self._cars[0]\n for car in self._cars[1:]:\n if ego_car.check_collision(car):\n self._collision = True\n return\n\n if not self._road.is_in(ego_car):\n self._outroad = True\n return\n\n if (ego_car.position[0] > 8.) \\\n and (ego_car.position[1] > 5.) \\\n and (ego_car.position[1] < 7.):\n self._goal = True\n return\n\n # remove cars that are out-of bound\n for car, driver in zip(self._cars[1:],self._drivers[1:]):\n if(car.position[1] < 4.) and (car.position[0] < self.left_bound):\n self.remove_car(car, driver)\n elif(car.position[1] > 4.) and (car.position[0] > self.right_bound):\n self.remove_car(car, driver)\n\n # add cars when there is enough space\n min_upper_x = np.inf\n max_lower_x = -np.inf\n for car in self._cars[1:]:\n if (car.position[1] < 4.) and (car.position[0] > max_lower_x):\n max_lower_x = car.position[0]\n if (car.position[1] > 4.) and (car.position[0] < min_upper_x):\n min_upper_x = car.position[0]\n if max_lower_x < (self.right_bound - np.random.rand()*(self.gap_max-self.gap_min) - self.gap_min - self.car_length):\n v_des = self.desire_speed\n p_des = 2.\n direction = -1\n x = self.right_bound\n car, driver = self.add_car(0, x, 2., -self.desire_speed, 0., v_des, p_des, direction, np.pi)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n if min_upper_x > (self.left_bound + np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length):\n v_des = self.desire_speed\n p_des = 6.\n direction = 1\n x = self.left_bound\n car, driver = self.add_car(0, x, 6., self.desire_speed, 0., v_des, p_des, direction, 0.)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n\n def is_terminal(self):\n return (self._collision or self._outroad or self._goal)\n\n def get_info(self):\n info = {}\n info['sup_labels'] = np.copy(self._sup_labels)\n\n if self._collision:\n info['event']='collision'\n elif self._outroad:\n info['event']='outroad'\n elif self._goal:\n info['event']='goal'\n else:\n info['event']='nothing'\n\n return info\n\n def observe(self):\n obs = np.zeros(int(4*self.max_veh_num+4))\n obs[:2] = self._cars[0].position\n obs[2:4] = self._cars[0].velocity\n upper_indices, lower_indices = self.get_sorted_indices()\n i = 4\n for indx in lower_indices:\n obs[i:i+2] = self._cars[indx].position - self._cars[0].position\n obs[i+2:i+4] = self._cars[indx].velocity - self._cars[0].velocity\n i += 4\n i = int(4 + self.max_veh_num/2*4)\n for indx in upper_indices:\n obs[i:i+2] = self._cars[indx].position - self._cars[0].position\n obs[i+2:i+4] = self._cars[indx].velocity - self._cars[0].velocity\n i += 4\n if self.normalize_obs:\n obs[0::4] = obs[0::4]/self.right_bound\n obs[1::4] = obs[1::4]/self.right_bound\n obs[2::4] = obs[2::4]/self.desire_speed\n obs[3::4] = obs[3::4]/self.desire_speed\n obs = np.copy(obs)\n return obs\n\n @property\n def observation_space(self):\n low = -np.ones(int(4*self.max_veh_num+4))\n high = np.ones(int(4*self.max_veh_num+4))\n return spaces.Box(low=low, high=high, dtype=np.float32)\n\n @property\n def action_space(self):\n return spaces.Discrete(len(self.rl_actions))\n\n def get_reward(self):\n reward = 0.\n action = self._actions[0]\n ego_car = self._cars[0]\n s, t, theta, curv = self._drivers[0].trajectory.xy_to_traj(ego_car.position)\n v_x, v_y = ego_car.velocity[0], ego_car.velocity[1]\n v_s = v_x*np.cos(theta) + v_y*np.sin(theta)\n v_t = -v_x*np.sin(theta) + v_y*np.cos(theta)\n\n speed_cost = -np.abs(self.desire_speed-v_s)/self.desire_speed\n reward += self.speed_cost*speed_cost\n\n t_cost = -np.abs(t)/(np.max(self.t_actions)+1e-3)\n reward += self.t_cost*t_cost\n\n control_cost = 0. # TODO\n reward += self.control_cost*control_cost\n\n if self._collision:\n reward -= self.collision_cost\n elif self._outroad:\n reward -= self.outroad_cost\n elif self._goal:\n reward += self.goal_reward\n else:\n reward += self.survive_reward\n # print(speed_cost, t_cost, control_cost, reward)\n return reward\n\n def remove_car(self, car, driver):\n self._cars.remove(car)\n self._drivers.remove(driver)\n if hasattr(self, 'viewer') and self.viewer:\n car.remove_render(self.viewer)\n driver.remove_render(self.viewer)\n\n def add_car(self, idx, x, y, vx, vy, v_des, p_des, direction, theta):\n car = Car(idx=idx, length=self.car_length, width=self.car_width, color=random.choice(RED_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n expose_level=self.car_expose_level)\n driver = YNYDriver(idx=idx, car=car, dt=self.dt,\n x_driver=IDMDriver(idx=idx, car=car, sigma=self.driver_sigma, s_des=self.s_des, s_min=self.s_min, axis=0, min_overlap=self.min_overlap, dt=self.dt), \n y_driver=PDDriver(idx=idx, car=car, sigma=0., axis=1, dt=self.dt)) \n car.set_position(np.array([x, y]))\n car.set_velocity(np.array([vx, vy]))\n car.set_rotation(theta)\n driver.x_driver.set_v_des(v_des)\n driver.x_driver.set_direction(direction)\n driver.y_driver.set_p_des(p_des)\n if np.random.rand() < self.yld:\n driver.set_yld(True)\n else:\n driver.set_yld(False)\n\n self._cars.append(car)\n self._drivers.append(driver)\n return car, driver\n\n def get_sorted_indices(self):\n # return indices of all other vehicles from left to right\n upper_indices, upper_xs = [], []\n lower_indices, lower_xs = [], []\n for indx,car in enumerate(self._cars[1:]):\n if car.position[1] > 4.:\n upper_indices.append(indx+1)\n upper_xs.append(car.position[0])\n else:\n lower_indices.append(indx+1)\n lower_xs.append(car.position[0])\n upper_indices = np.array(upper_indices)[np.argsort(upper_xs)]\n lower_indices = np.array(lower_indices)[np.argsort(lower_xs)]\n return upper_indices, lower_indices\n\n def get_important_indices(self):\n # return indices of 4 other vehicles that are closest to ego\n # on 4 directions\n ego_x = self._cars[0].position[0]\n min_ll, min_lr, min_ul, min_ur = np.inf, np.inf, np.inf, np.inf\n ind_ll, ind_lr, ind_ul, ind_ur = None, None, None, None\n for idx,car in enumerate(self._cars[1:]):\n x, y = car.position\n if y < 4.:\n if (x <= ego_x) and (ego_x - x < min_ll):\n min_ll = ego_x - x\n ind_ll = idx + 1\n elif (x > ego_x) and (x - ego_x < min_lr):\n min_lr = x - ego_x\n ind_lr = idx + 1\n else:\n if (x < ego_x) and (ego_x - x < min_ul):\n min_ul = ego_x - x\n ind_ul = idx + 1\n elif (x >= ego_x) and (x - ego_x < min_ur):\n min_ur = x - ego_x\n ind_ur = idx + 1\n return [ind_ll, ind_lr, ind_ul, ind_ur]\n\n def _reset(self):\n self._collision = False\n self._outroad = False\n self._goal = False\n\n self._cars, self._drivers = [], []\n car = Car(idx=0, length=self.car_length, width=self.car_width, color=random.choice(BLUE_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n expose_level=self.car_expose_level)\n driver = EgoDriver(trajectory=EgoTrajectory(),idx=0,car=car,dt=self.dt)\n car.set_position(np.array([0., -5.0]))\n car.set_velocity(np.array([0., 0.]))\n car.set_rotation(np.pi/2.)\n driver.v_des = 0.\n driver.t_des = 0.\n self._cars.append(car)\n self._drivers.append(driver)\n # randomly generate surrounding cars and drivers\n idx = 1\n # upper lane\n x = self.left_bound + np.random.rand()*(self.gap_max-self.gap_min)\n while (x < self.right_bound):\n v_des = self.desire_speed\n p_des = 6.\n direction = 1\n self.add_car(idx, x, 6., self.desire_speed, 0., v_des, p_des, direction, 0.)\n x += (np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length)\n idx += 1\n # lower lane\n x = self.right_bound - np.random.rand()*(self.gap_max-self.gap_min)\n while (x > self.left_bound):\n v_des = self.desire_speed\n p_des = 2.\n direction = -1\n self.add_car(idx, x, 2., -self.desire_speed, 0., v_des, p_des, direction, np.pi)\n x -= (np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length)\n idx += 1\n\n self._sup_labels = self.get_sup_labels()\n return None\n\n def setup_viewer(self):\n from traffic import rendering\n self.viewer = rendering.Viewer(1200, 800)\n self.viewer.set_bounds(-30.0, 30.0, -20.0, 20.0)\n\n def update_extra_render(self, extra_input):\n t1 = self._drivers[1].t1\n t2 = self._drivers[1].t2\n \n start = np.array([self.left_bound,1.-t1]) - self.get_camera_center()\n end = np.array([self.right_bound,1.-t1]) - self.get_camera_center()\n attrs = {\"color\":(1.,1.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,1.-t2]) - self.get_camera_center()\n end = np.array([self.right_bound,1.-t2]) - self.get_camera_center()\n attrs = {\"color\":(1.,0.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,3.+t1]) - self.get_camera_center()\n end = np.array([self.right_bound,3.+t1]) - self.get_camera_center()\n attrs = {\"color\":(1.,1.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,3.+t2]) - self.get_camera_center()\n end = np.array([self.right_bound,3.+t2]) - self.get_camera_center()\n attrs = {\"color\":(1.,0.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n\n start = np.array([self.left_bound,5.-t1]) - self.get_camera_center()\n end = np.array([self.right_bound,5.-t1]) - self.get_camera_center()\n attrs = {\"color\":(1.,1.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,5.-t2]) - self.get_camera_center()\n end = np.array([self.right_bound,5.-t2]) - self.get_camera_center()\n attrs = {\"color\":(1.,0.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,7.+t1]) - self.get_camera_center()\n end = np.array([self.right_bound,7.+t1]) - self.get_camera_center()\n attrs = {\"color\":(1.,1.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n start = np.array([self.left_bound,7.+t2]) - self.get_camera_center()\n end = np.array([self.right_bound,7.+t2]) - self.get_camera_center()\n attrs = {\"color\":(1.,0.,0.),\"linewidth\":2.}\n self.viewer.draw_line(start, end, **attrs)\n\n if extra_input:\n if ('attention_weight' in extra_input.keys()) and (extra_input['attention_weight'] is not None):\n edge_index = extra_input['attention_weight'][0]\n attention_weight = extra_input['attention_weight'][1]\n upper_indices, lower_indices = self.get_sorted_indices()\n car_indices = [np.nan]*(1+self.max_veh_num)\n car_indices[0] = 0\n car_indices[1:len(lower_indices)+1] = lower_indices[:]\n car_indices[int(self.max_veh_num/2)+1:int(self.max_veh_num/2)+1+len(upper_indices)] = upper_indices[:]\n starts, ends, attentions = [], [], []\n for i in range(edge_index.shape[1]):\n if np.isnan(car_indices[edge_index[0,i]]) or np.isnan(car_indices[edge_index[1,i]]):\n pass\n elif car_indices[edge_index[1,i]] == 0:\n attention = attention_weight[i].item()\n attentions.append(attention)\n car_i = car_indices[edge_index[0,i]]\n car_j = car_indices[edge_index[1,i]]\n start = self._cars[car_i].position - self.get_camera_center()\n end = self._cars[car_j].position - self.get_camera_center()\n starts.append(start)\n ends.append(end)\n rank_index = np.argsort(attentions)\n starts = np.array(starts)[rank_index]\n ends = np.array(ends)[rank_index]\n attentions = np.array(attentions)[rank_index]\n assert np.isclose(np.sum(attentions),1.)\n for start, end, attention in zip(starts[-3:],ends[-3:],attentions[-3:]):\n attrs = {\"color\":(1.,0.,1.),\"linewidth\":10.*attention}\n if (start == end).all():\n from traffic.rendering import make_circle, _add_attrs\n circle = make_circle(radius=1., res=15, filled=False, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle)\n else:\n self.viewer.draw_line(start, end, **attrs)\n if ('intention' in extra_input.keys()) and (extra_input['intention'] is not None):\n car_indices = [np.nan]*self.label_num\n if self.label_mode == 'full':\n upper_indices, lower_indices = self.get_sorted_indices()\n car_indices[0:len(lower_indices)] = lower_indices[:]\n car_indices[int(self.max_veh_num/2):int(self.max_veh_num/2)+len(upper_indices)] = upper_indices[:]\n elif self.label_mode == 'important':\n important_indices = self.get_important_indices()\n car_indices = important_indices\n for car_ind,intention in zip(car_indices,extra_input['intention']):\n if not np.isnan(car_ind):\n from traffic.rendering import make_circle, _add_attrs\n start = self._cars[car_ind].position - self.get_camera_center()\n attrs = {\"color\":(intention[0],intention[1],0.)}\n circle = make_circle(radius=0.5, res=15, filled=True, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle) \n\nif __name__ == '__main__':\n import time\n import pdb\n env = TIntersectionMulti(num_updates=1, yld=0.5, driver_sigma=0.1, \n normalize_obs=True,\n observe_mode='important',\n label_mode='important')\n obs = env.reset()\n img = env.render()\n done = False\n maximum_step = 200\n t = 0\n cr = 0.\n actions = [0]*(2*maximum_step)\n # actions = np.load('/Users/xiaobaima/Dropbox/SISL/rlkit/tests/Traffic/Data/t_intersection/MyDQNcg0.1expl0.2/seed0/failure1.npy')\n while True: #not done: \n # pdb.set_trace()\n # if t >= actions.shape[0]:\n # action = 7\n # else:\n # action = actions[t][0]\n # action = actions[t]\n # action = np.random.randint(env.action_space.n)\n action = input(\"Action\\n\")\n action = int(action)\n while action < 0:\n t = 0\n cr = 0.\n env.reset()\n env.render()\n action = input(\"Action\\n\")\n action = int(action)\n t += 1\n obs, reward, done, info = env.step(action)\n print('t: ', t)\n print('action: ',action)\n print('obs: ', obs)\n print('reward: ', reward)\n print('info: ', info)\n cr += reward\n env.render()\n time.sleep(0.1)\n if (t > maximum_step) or done:\n print('cr: ',cr)\n pdb.set_trace()\n # if env._collision or env._outroad:\n # pdb.set_trace()\n t = 0\n cr = 0.\n env.reset()\n env.render()\n env.close()\n",
"import numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom torch.distributions import Categorical\n\nfrom rlkit.policies.base import Policy\nfrom rlkit.torch import pytorch_util as ptu\nfrom rlkit.torch.core import eval_np, np_ify, torch_ify\nfrom rlkit.torch.data_management.normalizer import TorchFixedNormalizer\nfrom rlkit.torch.modules import LayerNorm\nfrom rlkit.torch.policies.deterministic_policies import MlpPolicy\n\nclass SupSepSoftmaxLSTMPolicy(Policy, nn.Module):\n \"\"\"\n LSTM policy with Categorical distributon using softmax\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n action_dim,\n policy,\n sup_learner,\n label_num,\n label_dim,\n ):\n super().__init__()\n self.policy = policy\n self.sup_learner = sup_learner\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.label_num = label_num\n self.label_dim = label_dim\n\n def to_onehot_labels(self, labels):\n if labels.shape[-1] != self.label_dim:\n labels_onehot = torch.zeros(*labels.shape,self.label_dim).to(ptu.device)\n labels_onehot.scatter_(-1,labels.unsqueeze(-1).long(),1.)\n else:\n labels_onehot = labels\n return labels_onehot\n\n def to_policy_inputs(self, obs_action, labels, sup_latent, return_info=False):\n obs_flat, prev_actions = obs_action\n obs = torch.reshape(obs_flat,(*obs_flat.shape[:-1], self.label_num+1, -1))\n valid_musk = (torch.sum(torch.abs(obs),dim=-1) != 0)\n valid_musk = torch.index_select(valid_musk,-1,torch.arange(1,self.label_num+1).to(ptu.device))\n\n with torch.no_grad():\n dist, sup_info = self.get_sup_distribution(obs_action, sup_latent=sup_latent, return_info=True)\n if labels is None:\n labels = torch.argmax(dist.probs, -1)\n else:\n labels = labels.clone()\n # valid_musk2 = ~torch.isnan(labels)\n # assert torch.all(torch.eq(valid_musk, valid_musk2)), \"obs label mismacth\"\n # can't check this since out-of-actual-length labels are 0\n\n labels[~valid_musk] = 0\n onehot_labels = self.to_onehot_labels(labels)\n onehot_labels[~valid_musk] = 0.\n ego_labels = torch.zeros(*onehot_labels.shape[:-2],1,self.label_dim).to(ptu.device)\n onehot_labels = torch.cat((ego_labels,onehot_labels),-2)\n\n obs = torch.cat((obs,onehot_labels),dim=-1).reshape(*obs.shape[:-2],-1)\n\n policy_inputs = (obs, prev_actions)\n if return_info:\n return policy_inputs, sup_info\n else:\n return policy_inputs\n\n def forward(self, obs_action, labels, latent=None, sup_latent=None, return_info=False):\n if latent is None:\n latent = self.policy.latent_0\n if sup_latent is None:\n sup_latent = self.sup_learner.latent_0\n policy_inputs, sup_info = self.to_policy_inputs(obs_action, labels=labels, sup_latent=sup_latent, return_info=True)\n pis, policy_info = self.policy(policy_inputs, latent=latent, return_info=True)\n info = policy_info\n info['sup_preactivation'] = sup_info['preactivation']\n info['sup_latent'] = sup_info['latent']\n\n if return_info:\n return pis, info\n else:\n return pis\n\n def get_distribution(self, obs_action, latent=None, sup_latent=None, labels=None):\n _, info = self.forward(obs_action, latent=latent, sup_latent=sup_latent, labels=labels, return_info=True)\n logits = info['preactivation']\n return Categorical(logits=logits)\n\n def log_prob(self, obs_action, action, latent=None, sup_latent=None, labels=None):\n return self.get_distribution(obs_action, latent=latent, sup_latent=sup_latent, labels=labels).log_prob(action.squeeze(-1))\n\n def get_action(self, obs, labels=None, deterministic=False):\n assert len(obs.shape) == 1\n assert (self.policy.a_p == self.sup_learner.a_p).all()\n with torch.no_grad():\n obs_action = (torch_ify(obs)[None,None,:], self.policy.a_p[None,None,:])\n if labels is not None:\n labels = torch_ify(labels)[None,None,:]\n pis, info = self.forward(obs_action, labels=labels,\n latent=self.policy.latent_p,\n sup_latent=self.sup_learner.latent_p,\n return_info=True)\n sup_probs = Categorical(logits=info['sup_preactivation']).probs\n pis = np_ify(pis[0,0,:])\n sup_probs = np_ify(sup_probs[0,0,:,:])\n if deterministic:\n action = np.argmax(pis)\n else:\n action = np.random.choice(np.arange(pis.shape[0]),p=pis)\n self.policy.a_p = torch_ify(np.array([action]))\n self.policy.latent_p = info['latent']\n self.sup_learner.a_p = torch_ify(np.array([action]))\n self.sup_learner.latent_p = info['sup_latent']\n \n return action, {'intentions': sup_probs}\n\n # def get_attention_weight(self, obs):\n # if hasattr(self.policy[0], 'attentioner'):\n # with torch.no_grad():\n # policy_inputs = eval_np(self.obs_to_policy_inputs, obs[None])\n # x, attention_weight = eval_np(self.policy[0], policy_inputs, return_attention_weights=True)\n # return attention_weight\n # else:\n # return None\n\n def get_sup_distribution(self, obs_action, sup_latent=None, return_info=False):\n _, info = self.sup_learner(obs_action, latent=sup_latent, return_info=True)\n logits = info['preactivation']\n if return_info:\n return Categorical(logits=logits), info\n else:\n return Categorical(logits=logits)\n\n def get_sup_labels(self, obs_action, sup_latent=None):\n sup_probs = self.sup_prob(obs_action, sup_latent=sup_latent)\n return torch.argmax(sup_probs,-1)\n\n def sup_log_prob(self, obs_action, label, sup_latent=None):\n return self.get_sup_distribution(obs_action, sup_latent=sup_latent).log_prob(label)\n\n def sup_prob(self, obs_action, sup_latent=None):\n return self.get_sup_distribution(obs_action, sup_latent=sup_latent).probs\n\n def reset(self):\n self.policy.reset()\n self.sup_learner.reset()\n",
"import numpy as np\nfrom traffic.actions.action import Action\n\nclass XYAccelAction(Action):\n def __init__(self,a_x,a_y):\n self.a_x = a_x\n self.a_y = a_y\n\n def update(self,car,dt):\n position_old = car.position\n velocity_old = car.velocity\n rotation_old = car.rotation\n accel = np.array([self.a_x, self.a_y])\n velocity = car.set_velocity(car.velocity + accel * dt)\n car.set_position(position_old+0.5*(velocity_old+velocity)*dt)\n\n # TODO\n rotation = car.heading\n max_ang = car._max_rotation\n if abs(rotation_old) <= max_ang: # rotation close to 0\n if abs(rotation) > max_ang:\n rotation = np.clip(max_ang,-max_ang,max_ang)\n elif (rotation > 0.) and (np.pi-rotation >= max_ang):\n rotation = np.pi - max_ang\n elif (rotation < 0.) and (rotation-(-np.pi) >= max_ang):\n rotation = -np.pi + max_ang\n car.set_rotation(rotation)",
"import copy\nimport torch\nfrom collections import OrderedDict\n\nfrom rlkit.util import tensor_util as tu\nfrom rlkit.torch.vpg.ppo import PPOTrainer\nfrom rlkit.torch.vpg.util import compute_advantages, filter_valids, pad_to_last\nfrom rlkit.torch.core import torch_ify\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nimport rlkit.pythonplusplus as ppp\n\nclass PPOSupVanillaTrainer(PPOTrainer):\n \"\"\"PPO + supervised learning.\n \"\"\"\n\n def __init__(self,\n replay_buffer,\n exploration_bonus,\n sup_lr=1e-3,\n sup_batch_size=64,\n sup_train_num=1,\n **kwargs):\n super().__init__(**kwargs)\n self.replay_buffer = replay_buffer\n self.sup_batch_size = sup_batch_size\n self.sup_train_num = sup_train_num\n self.exploration_bonus = exploration_bonus\n self._sup_optimizer = torch.optim.Adam(\n self.policy.parameters(),\n lr=sup_lr)\n\n def train_once(self, paths):\n \"\"\"Train the algorithm once.\n\n Args:\n itr (int): Iteration number.\n paths (list[dict]): A list of collected paths.\n\n Returns:\n numpy.float64: Calculated mean value of undiscounted returns.\n\n \"\"\"\n\n obs, actions, rewards, returns, valids, baselines, labels = \\\n self.process_samples(paths)\n\n if self._maximum_entropy:\n policy_entropies = self._compute_policy_entropy(obs)\n rewards += self._policy_ent_coeff * policy_entropies\n advs = self._compute_advantage(rewards, valids, baselines)\n\n if self._recurrent:\n pre_actions = actions[:,:-1,:]\n policy_input = (obs,pre_actions)\n obs_input, actions_input, rewards_input, returns_input, advs_input = \\\n obs, actions, rewards, returns, advs\n labels_input = labels\n valid_mask = torch.zeros(obs.shape[0],obs.shape[1]).bool()\n for i, valid in enumerate(valids):\n valid_mask[i,:valid] = True\n else:\n obs_input = torch.cat(filter_valids(obs, valids))\n actions_input = torch.cat(filter_valids(actions, valids))\n rewards_input = torch.cat(filter_valids(rewards, valids))\n returns_input = torch.cat(filter_valids(returns, valids))\n advs_input = torch.cat(filter_valids(advs, valids))\n labels_input = torch.cat(filter_valids(labels, valids))\n policy_input = obs_input\n valid_mask = torch.ones(obs_input.shape[0]).bool()\n # (num of valid samples) x ...\n self.replay_buffer.add_batch(obs_input, actions_input, labels_input, valid_mask)\n\n with torch.no_grad():\n policy_loss_before = self._compute_loss_with_adv(\n policy_input, actions_input, rewards_input, advs_input, valid_mask)\n vf_loss_before = self._compute_vf_loss(\n obs_input, returns_input, valid_mask)\n # kl_before = self._compute_kl_constraint(obs)\n kl_before = self._compute_kl_constraint(policy_input, valid_mask)\n sup_loss_before, sup_accuracy_before = self._compute_sup_loss(obs_input, actions_input, labels_input, valid_mask)\n \n self._train(policy_input, obs_input, actions_input, rewards_input, returns_input,\n advs_input, valid_mask)\n\n for _ in range(self.sup_train_num):\n sup_batch = self.replay_buffer.random_batch(self.sup_batch_size)\n sup_loss = self._train_sup_learner(sup_batch['observations'],sup_batch['actions'],\n sup_batch['labels'],sup_batch['valids'])\n\n with torch.no_grad():\n policy_loss_after = self._compute_loss_with_adv(\n policy_input, actions_input, rewards_input, advs_input, valid_mask)\n vf_loss_after = self._compute_vf_loss(\n obs_input, returns_input, valid_mask)\n # kl_before = self._compute_kl_constraint(obs)\n kl_after = self._compute_kl_constraint(policy_input, valid_mask)\n sup_loss_after, sup_accuracy_after = self._compute_sup_loss(obs_input, actions_input, labels_input, valid_mask)\n policy_entropy = self._compute_policy_entropy(policy_input)\n\n if self._need_to_update_eval_statistics:\n self._need_to_update_eval_statistics = False\n self.eval_statistics['LossBefore'] = policy_loss_before.item()\n self.eval_statistics['LossAfter'] = policy_loss_after.item()\n self.eval_statistics['dLoss'] = (policy_loss_before - policy_loss_after).item()\n self.eval_statistics['KLBefore'] = kl_before.item()\n self.eval_statistics['KL'] = kl_after.item()\n self.eval_statistics['Entropy'] = policy_entropy[valid_mask].mean().item()\n\n self.eval_statistics['VF LossBefore'] = vf_loss_before.item()\n self.eval_statistics['VF LossAfter'] = vf_loss_after.item()\n self.eval_statistics['VF dLoss'] = (vf_loss_before - vf_loss_after).item()\n\n self.eval_statistics['SUP LossBefore'] = sup_loss_before.item()\n self.eval_statistics['SUP LossAfter'] = sup_loss_after.item()\n self.eval_statistics['SUP dLoss'] = (sup_loss_before - sup_loss_after).item()\n self.eval_statistics['SUP AccuracyBefore'] = sup_accuracy_before.item()\n self.eval_statistics['SUP AccuracyAfter'] = sup_accuracy_after.item()\n self.eval_statistics['SUP dAccuracy'] = (sup_accuracy_before - sup_accuracy_after).item()\n\n self._old_policy = copy.deepcopy(self.policy)\n\n def _train_sup_learner(self, observations, actions, labels, valids):\n self._sup_optimizer.zero_grad()\n sup_loss, sup_accuracy = self._compute_sup_loss(observations, actions, labels, valids)\n sup_loss.backward()\n self._sup_optimizer.step()\n return sup_loss\n\n def _compute_sup_loss(self, obs, actions, labels, valid_mask):\n obs = torch_ify(obs)\n actions = torch_ify(actions)\n valid_mask = torch_ify(valid_mask).bool()\n labels = torch_ify(labels).clone()\n valids = ~torch.isnan(labels)\n labels[~valids] = 0\n if self._recurrent:\n pre_actions = actions[:,:-1,:] \n policy_input = (obs, pre_actions)\n else:\n policy_input = obs \n # lls = self.policy.sup_log_prob(policy_input, labels)\n\n valid_num = (valid_mask.unsqueeze(-1)*valids).float().sum()\n dists = self.policy.get_sup_distribution(policy_input)\n lls = dists.log_prob(labels)\n lls[~valids] = 0\n lls[~valid_mask] = 0\n loss = -lls.sum()/valid_num\n\n accuracy = (torch.argmax(dists.probs,-1)==labels).float()\n accuracy[~valids] = 0.\n accuracy[~valid_mask] = 0.\n accuracy = accuracy.sum()/valid_num\n\n # return -lls[valid_mask].mean()\n return loss, accuracy\n\n def _add_exploration_bonus(self, paths):\n paths = copy.deepcopy(paths)\n entropy_decreases = []\n with torch.no_grad():\n for path in paths:\n for i in range(len(path['observations'])-1):\n obs1 = path['observations'][i]\n labels1 = torch.tensor(path['env_infos']['sup_labels'][i])\n valid_mask1 = ~torch.isnan(labels1)[None,:]\n entropy_1 = self.policy.get_sup_distribution(torch_ify(obs1)[None,:]).entropy()\n # if self.attention_eb: # todo\n entropy_1 = torch.mean(entropy_1[valid_mask1])\n\n obs2 = path['observations'][i+1]\n labels2 = torch.tensor(path['env_infos']['sup_labels'][i+1])\n valid_mask2 = ~torch.isnan(labels2)[None,:]\n entropy_2 = self.policy.get_sup_distribution(torch_ify(obs2)[None,:]).entropy()\n entropy_2 = torch.mean(entropy_2[valid_mask2])\n\n entropy_decrease = (entropy_1 - entropy_2).item()\n entropy_decreases.append(entropy_decrease)\n path['rewards'][i] += self.exploration_bonus*entropy_decrease\n\n if self._need_to_update_eval_statistics:\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Entropy Decrease',\n entropy_decreases,\n ))\n return paths\n\n def process_samples(self, paths):\n r\"\"\"Process sample data based on the collected paths.\n\n Notes: P is the maximum path length (self.max_path_length)\n\n Args:\n paths (list[dict]): A list of collected paths\n\n Returns:\n torch.Tensor: The observations of the environment\n with shape :math:`(N, P, O*)`.\n torch.Tensor: The actions fed to the environment\n with shape :math:`(N, P, A*)`.\n torch.Tensor: The acquired rewards with shape :math:`(N, P)`.\n list[int]: Numbers of valid steps in each paths.\n torch.Tensor: Value function estimation at each step\n with shape :math:`(N, P)`.\n\n \"\"\"\n if self.exploration_bonus > 0.:\n paths = self._add_exploration_bonus(paths)\n valids = torch.Tensor([len(path['actions']) for path in paths]).int()\n obs = torch.stack([\n pad_to_last(path['observations'],\n total_length=self.max_path_length,\n axis=0) for path in paths\n ])\n\n actions = torch.stack([\n pad_to_last(path['actions'],\n total_length=self.max_path_length,\n axis=0) for path in paths\n ])\n\n rewards = torch.stack([\n pad_to_last(path['rewards'].reshape(-1), total_length=self.max_path_length)\n for path in paths\n ])\n returns = torch.stack([\n pad_to_last(tu.discount_cumsum(path['rewards'].reshape(-1),\n self.discount).copy(),\n total_length=self.max_path_length) for path in paths\n ])\n # batch x label_num x label_dim\n env_infos = [ppp.list_of_dicts__to__dict_of_lists(p['env_infos']) for p in paths]\n labels = torch.stack([\n pad_to_last(env_info['sup_labels'],\n total_length=self.max_path_length,\n axis=0) for env_info in env_infos\n ])\n with torch.no_grad():\n baselines = self._value_function(obs).squeeze(-1)\n\n return obs, actions, rewards, returns, valids, baselines, labels\n\n @property\n def networks(self):\n return [\n self._value_function,\n self._old_policy,\n self.policy,\n # self.sup_learner,\n ]\n\n def get_snapshot(self):\n return dict(\n policy=self.policy,\n old_policy=self._old_policy,\n value_function=self._value_function,\n # sup_learner=self.sup_learner,\n )\n",
"from collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch import nn as nn\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchTrainer\n\n\nclass MASACDiscreteTrainer(TorchTrainer):\n \"\"\"\n Soft Actor Critic on Discrete Action Space\n \"\"\"\n def __init__(\n self,\n env,\n qf1_n,\n target_qf1_n,\n qf2_n,\n target_qf2_n,\n policy_n,\n online_action,\n clip_gradient=0.,\n\n discount=0.99,\n reward_scale=1.0,\n\n policy_learning_rate=1e-4,\n qf_learning_rate=1e-3,\n qf_weight_decay=0,\n target_hard_update_period=1000,\n tau=1e-2,\n use_soft_update=False,\n qf_criterion=None,\n pre_activation_weight=0.,\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n optimizer_class=optim.Adam,\n\n min_q_value=-np.inf,\n max_q_value=np.inf,\n ):\n super().__init__()\n if qf_criterion is None:\n qf_criterion = nn.MSELoss()\n # qf_criterion = nn.SmoothL1Loss() # Huber Loss\n self.env = env\n self.qf1_n = qf1_n\n self.target_qf1_n = target_qf1_n\n self.qf2_n = qf2_n\n self.target_qf2_n = target_qf2_n\n self.policy_n = policy_n\n self.online_action = online_action\n self.clip_gradient = clip_gradient\n\n self.discount = discount\n self.reward_scale = reward_scale\n\n self.policy_learning_rate = policy_learning_rate\n self.qf_learning_rate = qf_learning_rate\n self.qf_weight_decay = qf_weight_decay\n self.target_hard_update_period = target_hard_update_period\n self.tau = tau\n self.use_soft_update = use_soft_update\n self.qf_criterion = qf_criterion\n self.min_q_value = min_q_value\n self.max_q_value = max_q_value\n\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = 0.5*np.log(self.env.action_space.n) # heuristic value\n self.log_alpha_n = [ptu.zeros(1, requires_grad=True) for i in range(len(self.policy_n))]\n self.alpha_optimizer_n = [\n optimizer_class(\n [self.log_alpha_n[i]],\n lr=self.policy_learning_rate,\n ) for i in range(len(self.log_alpha_n))]\n\n self.qf1_optimizer_n = [ \n optimizer_class(\n self.qf1_n[i].parameters(),\n lr=self.qf_learning_rate,\n ) for i in range(len(self.qf1_n))]\n self.qf2_optimizer_n = [ \n optimizer_class(\n self.qf2_n[i].parameters(),\n lr=self.qf_learning_rate,\n ) for i in range(len(self.qf2_n))]\n\n self.policy_optimizer_n = [\n optimizer_class(\n self.policy_n[i].parameters(),\n lr=self.policy_learning_rate,\n ) for i in range(len(self.policy_n))]\n\n self.eval_statistics = OrderedDict()\n self._n_train_steps_total = 0\n self._need_to_update_eval_statistics = True\n\n def train_from_torch(self, batch):\n rewards_n = batch['rewards']\n terminals_n = batch['terminals']\n obs_n = batch['observations']\n actions_n = batch['actions']\n next_obs_n = batch['next_observations']\n\n batch_size = rewards_n.shape[0]\n num_agent = rewards_n.shape[1]\n whole_obs = obs_n.view(batch_size, -1)\n whole_actions = actions_n.view(batch_size, -1)\n whole_next_obs = next_obs_n.view(batch_size, -1)\n \n if self.online_action:\n online_actions_n = [self.policy_n[agent].one_hot(obs_n[:,agent,:]).detach() for agent in range(num_agent)]\n online_actions_n = torch.stack(online_actions_n) # num_agent x batch x a_dim\n online_actions_n = online_actions_n.transpose(0,1).contiguous() # batch x num_agent x a_dim \n\n next_actions_n = [self.policy_n[agent].one_hot(next_obs_n[:,agent,:]).detach() for agent in range(num_agent)]\n next_actions_n = torch.stack(next_actions_n) # num_agent x batch x a_dim\n next_actions_n = next_actions_n.transpose(0,1).contiguous() # batch x num_agent x a_dim\n\n for agent in range(num_agent):\n \"\"\"\n Policy operations.\n \"\"\"\n pis,info = self.policy_n[agent](obs_n[:,agent,:],return_info=True) # batch x |A|\n logits = info['preactivation']\n if self.use_automatic_entropy_tuning:\n alpha_loss = -(pis.detach() * self.log_alpha_n[agent].exp() * (torch.log(pis+1e-3) + self.target_entropy).detach()).sum(-1).mean()\n self.alpha_optimizer_n[agent].zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer_n[agent].step()\n alpha = self.log_alpha_n[agent].exp()\n else:\n alpha_loss = torch.zeros(1)\n alpha = torch.ones(1)\n\n if self.online_action:\n current_actions = online_actions_n.clone()\n else:\n current_actions = actions_n.clone()\n other_action_index = np.array([i for i in range(num_agent) if i!=agent])\n other_actions = current_actions[:,other_action_index,:]\n q1_output = self.qf1_n[agent](whole_obs, other_actions.view(batch_size, -1))\n q2_output = self.qf2_n[agent](whole_obs, other_actions.view(batch_size, -1))\n min_q_output = torch.min(q1_output,q2_output) # batch x |A|\n policy_loss = (pis*(alpha*torch.log(pis+1e-3) - min_q_output)).sum(-1).mean()\n # policy_loss = (pis*(torch.log(pis+1e-3)-torch.log(torch.softmax(min_q_output/alpha, dim=-1)+1e-3))).sum(-1).mean()\n\n \"\"\"\n Critic operations.\n \"\"\"\n # speed up computation by not backpropping these gradients\n new_pis = self.policy_n[agent](next_obs_n[:,agent,:]).detach()\n next_current_actions = next_actions_n.clone()\n next_other_actions = next_current_actions[:,other_action_index,:].detach()\n other_actions = actions_n[:,other_action_index,:].detach()\n next_target_q1_values = self.target_qf1_n[agent](\n whole_next_obs,\n next_other_actions.view(batch_size,-1),\n )\n next_target_q2_values = self.target_qf2_n[agent](\n whole_next_obs,\n next_other_actions.view(batch_size,-1),\n )\n next_target_min_q_values = torch.min(next_target_q1_values,next_target_q2_values) # batch x |A|\n next_target_q_values = (new_pis*(next_target_min_q_values - alpha * torch.log(new_pis+1e-3))).sum(-1,keepdim=True) # batch\n q_target = self.reward_scale*rewards_n[:,agent,:] + (1. - terminals_n[:,agent,:]) * self.discount * next_target_q_values\n q_target = q_target.detach()\n q_target = torch.clamp(q_target, self.min_q_value, self.max_q_value)\n\n q1_pred = torch.sum(self.qf1_n[agent](whole_obs, other_actions.view(batch_size,-1))*actions_n[:,agent,:].detach(),dim=-1,keepdim=True)\n q2_pred = torch.sum(self.qf2_n[agent](whole_obs, other_actions.view(batch_size,-1))*actions_n[:,agent,:].detach(),dim=-1,keepdim=True)\n\n qf1_loss = self.qf_criterion(q1_pred, q_target)\n qf2_loss = self.qf_criterion(q2_pred, q_target)\n \"\"\"\n Update Networks\n \"\"\"\n\n self.policy_optimizer_n[agent].zero_grad()\n policy_loss.backward()\n if self.clip_gradient > 0.:\n nn.utils.clip_grad_norm_(self.policy_n[agent].parameters(), self.clip_gradient)\n policy_grad_norm = torch.tensor(0.).to(ptu.device) \n for p in self.policy_n[agent].parameters():\n param_norm = p.grad.data.norm(2)\n policy_grad_norm += param_norm.item() ** 2\n policy_grad_norm = policy_grad_norm ** (1. / 2)\n self.policy_optimizer_n[agent].step()\n\n self.qf1_optimizer_n[agent].zero_grad()\n qf1_loss.backward()\n if self.clip_gradient > 0.:\n nn.utils.clip_grad_norm_(self.qf1_n[agent].parameters(), self.clip_gradient)\n qf1_grad_norm = torch.tensor(0.).to(ptu.device) \n for p in self.qf1_n[agent].parameters():\n param_norm = p.grad.data.norm(2)\n qf1_grad_norm += param_norm.item() ** 2\n qf1_grad_norm = qf1_grad_norm ** (1. / 2)\n self.qf1_optimizer_n[agent].step()\n\n self.qf2_optimizer_n[agent].zero_grad()\n qf2_loss.backward()\n if self.clip_gradient > 0.:\n nn.utils.clip_grad_norm_(self.qf2_n[agent].parameters(), self.clip_gradient)\n qf2_grad_norm = torch.tensor(0.).to(ptu.device) \n for p in self.qf2_n[agent].parameters():\n param_norm = p.grad.data.norm(2)\n qf2_grad_norm += param_norm.item() ** 2\n qf2_grad_norm = qf2_grad_norm ** (1. / 2)\n self.qf2_optimizer_n[agent].step()\n\n \"\"\"\n Save some statistics for eval using just one batch.\n \"\"\"\n if self._need_to_update_eval_statistics:\n self.eval_statistics['QF1 Loss {}'.format(agent)] = np.mean(ptu.get_numpy(qf1_loss))\n self.eval_statistics['QF1 Gradient {}'.format(agent)] = np.mean(ptu.get_numpy(\n qf1_grad_norm\n ))\n self.eval_statistics['QF2 Loss {}'.format(agent)] = np.mean(ptu.get_numpy(qf2_loss))\n self.eval_statistics['QF2 Gradient {}'.format(agent)] = np.mean(ptu.get_numpy(\n qf2_grad_norm\n ))\n self.eval_statistics['Policy Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics['Policy Gradient {}'.format(agent)] = np.mean(ptu.get_numpy(\n policy_grad_norm\n ))\n self.eval_statistics['Alpha Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n alpha_loss\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q1 Predictions {}'.format(agent),\n ptu.get_numpy(q1_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q2 Predictions {}'.format(agent),\n ptu.get_numpy(q2_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Targets {}'.format(agent),\n ptu.get_numpy(q_target),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Pis {}'.format(agent),\n ptu.get_numpy(pis),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Alpha {}'.format(agent),\n ptu.get_numpy(alpha),\n ))\n \n self._need_to_update_eval_statistics = False\n self._update_target_networks()\n self._n_train_steps_total += 1\n\n def _update_target_networks(self):\n for qf1, target_qf1, qf2, target_qf2 in \\\n zip(self.qf1_n, self.target_qf1_n, self.qf2_n, self.target_qf2_n):\n if self.use_soft_update:\n ptu.soft_update_from_to(qf1, target_qf1, self.tau)\n ptu.soft_update_from_to(qf2, target_qf2, self.tau)\n else:\n if self._n_train_steps_total % self.target_hard_update_period == 0:\n ptu.copy_model_params_from_to(qf1, target_qf1)\n ptu.copy_model_params_from_to(qf2, target_qf2)\n\n def get_diagnostics(self):\n return self.eval_statistics\n\n def end_epoch(self, epoch):\n self._need_to_update_eval_statistics = True\n\n @property\n def networks(self):\n return [\n *self.policy_n,\n *self.qf1_n,\n *self.qf2_n,\n *self.target_qf1_n,\n *self.target_qf2_n,\n ]\n\n def get_snapshot(self):\n return dict(\n qf1_n=self.qf1_n,\n target_qf1_n=self.target_qf1_n,\n qf2_n=self.qf2_n,\n target_qf2_n=self.target_qf2_n,\n trained_policy_n=self.policy_n,\n )\n",
"from collections import OrderedDict\n\nimport numpy as np\n\nfrom rlkit.data_management.replay_buffer import ReplayBuffer\n\n\nclass SimpleReplayBuffer(ReplayBuffer):\n\n def __init__(\n self,\n max_replay_buffer_size,\n observation_dim,\n action_dim,\n env_info_sizes,\n store_raw_action=False,\n ):\n self._observation_dim = observation_dim\n self._action_dim = action_dim\n self._max_replay_buffer_size = max_replay_buffer_size\n self._observations = np.zeros((max_replay_buffer_size, observation_dim))\n # It's a bit memory inefficient to save the observations twice,\n # but it makes the code *much* easier since you no longer have to\n # worry about termination conditions.\n self._next_obs = np.zeros((max_replay_buffer_size, observation_dim))\n self._actions = np.zeros((max_replay_buffer_size, action_dim))\n # Xiaobai: store raw action if set\n self._store_raw_action = store_raw_action\n if store_raw_action:\n self._raw_actions = np.zeros((max_replay_buffer_size, action_dim))\n # Make everything a 2D np array to make it easier for other code to\n # reason about the shape of the data\n self._rewards = np.zeros((max_replay_buffer_size, 1))\n # self._terminals[i] = a terminal was received at time i\n self._terminals = np.zeros((max_replay_buffer_size, 1), dtype='uint8')\n # Define self._env_infos[key][i] to be the return value of env_info[key]\n # at time i\n self._env_infos = {}\n for key, size in env_info_sizes.items():\n self._env_infos[key] = np.zeros((max_replay_buffer_size, size))\n # self._env_info_keys = env_info_sizes.keys()\n self._env_info_keys = list(env_info_sizes.keys())\n\n self._top = 0\n self._size = 0\n\n def add_sample(self, observation, action, reward, next_observation,\n terminal, env_info, **kwargs):\n self._observations[self._top] = observation\n self._actions[self._top] = action\n self._rewards[self._top] = reward\n self._terminals[self._top] = terminal\n self._next_obs[self._top] = next_observation\n if (self._store_raw_action) and ('agent_info' in kwargs):\n if 'raw_action' in kwargs['agent_info']:\n self._raw_actions[self._top] = kwargs['agent_info']['raw_action']\n\n for key in self._env_info_keys:\n self._env_infos[key][self._top] = env_info[key]\n self._advance()\n\n def terminate_episode(self):\n pass\n\n def _advance(self):\n self._top = (self._top + 1) % self._max_replay_buffer_size\n if self._size < self._max_replay_buffer_size:\n self._size += 1\n\n def random_batch(self, batch_size):\n indices = np.random.randint(0, self._size, batch_size)\n batch = dict(\n observations=self._observations[indices],\n actions=self._actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=self._next_obs[indices],\n )\n if self._store_raw_action:\n batch['raw_actions']=self._raw_actions[indices]\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n return batch\n\n def rebuild_env_info_dict(self, idx):\n return {\n key: self._env_infos[key][idx]\n for key in self._env_info_keys\n }\n\n def batch_env_info_dict(self, indices):\n return {\n key: self._env_infos[key][indices]\n for key in self._env_info_keys\n }\n\n def num_steps_can_sample(self):\n return self._size\n\n def get_diagnostics(self):\n return OrderedDict([\n ('size', self._size)\n ])\n",
"from collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch import nn as nn\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_rl_algorithm import TorchTrainer\n\n\nclass R2GGNNTrainer(TorchTrainer):\n \"\"\"\n Recursive Reasoning Graph; n\n Shared GNN cactor and Q;\n Actors share the observation gnn from Q;\n \"\"\"\n def __init__(\n self,\n env,\n qf1,\n target_qf1,\n qf2,\n target_qf2,\n cactor,\n policy_n,\n detach_k_action=False,\n deterministic_cactor_in_graph=True,\n deterministic_next_action=False,\n use_entropy_loss=True,\n use_entropy_reward=True,\n use_cactor_entropy_loss=True,\n use_automatic_entropy_tuning=True,\n target_entropy=None,\n\n discount=0.99,\n reward_scale=1.0,\n\n policy_learning_rate=1e-4,\n qf_learning_rate=1e-3,\n qf_weight_decay=0.,\n init_alpha=1.,\n cactor_learning_rate=1e-4,\n clip_gradient=0.,\n target_hard_update_period=1000,\n tau=1e-2,\n use_soft_update=False,\n qf_criterion=None,\n pre_activation_weight=0.,\n optimizer_class=optim.Adam,\n shared_obs=False,\n\n min_q_value=-np.inf,\n max_q_value=np.inf,\n\n qf1_optimizer=None,\n qf2_optimizer=None,\n cactor_optimizer=None,\n policy_optimizer_n=None,\n alpha_optimizer_n=None,\n calpha_optimizer=None,\n log_alpha_n = None,\n log_calpha_n = None,\n ):\n super().__init__()\n self.env = env\n if qf_criterion is None:\n qf_criterion = nn.MSELoss()\n self.qf1 = qf1\n self.target_qf1 = target_qf1\n self.qf2 = qf2\n self.target_qf2 = target_qf2\n self.policy_n = policy_n\n self.cactor = cactor\n\n self.detach_k_action = detach_k_action\n self.deterministic_cactor_in_graph = deterministic_cactor_in_graph\n self.deterministic_next_action = deterministic_next_action\n\n self.discount = discount\n self.reward_scale = reward_scale\n\n self.policy_learning_rate = policy_learning_rate\n self.qf_learning_rate = qf_learning_rate\n self.qf_weight_decay = qf_weight_decay\n self.cactor_learning_rate = cactor_learning_rate\n self.clip_gradient = clip_gradient\n self.target_hard_update_period = target_hard_update_period\n self.tau = tau\n self.use_soft_update = use_soft_update\n self.qf_criterion = qf_criterion\n self.pre_activation_weight = pre_activation_weight\n self.min_q_value = min_q_value\n self.max_q_value = max_q_value\n self.shared_obs = shared_obs\n\n if qf1_optimizer:\n self.qf1_optimizer = qf1_optimizer\n else:\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(),\n lr=self.qf_learning_rate,\n )\n if qf2_optimizer:\n self.qf2_optimizer = qf2_optimizer\n else:\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(),\n lr=self.qf_learning_rate,\n )\n if policy_optimizer_n:\n self.policy_optimizer_n = policy_optimizer_n\n else:\n self.policy_optimizer_n = [\n optimizer_class(\n self.policy_n[i].parameters(),\n lr=self.policy_learning_rate,\n ) for i in range(len(self.policy_n))]\n\n if cactor_optimizer:\n self.cactor_optimizer = cactor_optimizer\n else:\n self.cactor_optimizer = optimizer_class(\n self.cactor.parameters(),\n lr=self.cactor_learning_rate,\n )\n\n self.init_alpha = init_alpha\n self.use_entropy_loss = use_entropy_loss\n self.use_entropy_reward = use_entropy_reward\n self.use_cactor_entropy_loss = use_cactor_entropy_loss\n self.use_automatic_entropy_tuning = use_automatic_entropy_tuning\n if self.use_automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas\n if self.use_entropy_loss:\n if log_alpha_n:\n self.log_alpha_n = log_alpha_n\n else:\n self.log_alpha_n = [ptu.tensor([np.log(self.init_alpha)], requires_grad=True, dtype=torch.float32) for i in range(len(self.policy_n))]\n if alpha_optimizer_n:\n self.alpha_optimizer_n = alpha_optimizer_n\n else:\n self.alpha_optimizer_n = [\n optimizer_class(\n [self.log_alpha_n[i]],\n lr=self.policy_learning_rate,\n ) for i in range(len(self.log_alpha_n))]\n\n if self.use_cactor_entropy_loss:\n if log_calpha_n:\n self.log_calpha_n = log_calpha_n\n else:\n self.log_calpha_n = [ptu.tensor([np.log(self.init_alpha)], requires_grad=True, dtype=torch.float32) for i in range(len(self.policy_n))]\n if calpha_optimizer:\n self.calpha_optimizer = calpha_optimizer\n else:\n self.calpha_optimizer = \\\n optimizer_class(\n self.log_calpha_n,\n lr=self.policy_learning_rate,\n )\n\n self.eval_statistics = OrderedDict()\n self._n_train_steps_total = 0\n self._need_to_update_eval_statistics = True\n\n def train_from_torch(self, batch):\n rewards_n = batch['rewards'].detach()\n terminals_n = batch['terminals'].detach()\n obs_n = batch['observations'].detach()\n actions_n = batch['actions'].detach()\n next_obs_n = batch['next_observations'].detach()\n\n batch_size = rewards_n.shape[0]\n num_agent = rewards_n.shape[1]\n\n \"\"\"\n Policy operations.\n \"\"\"\n online_actions_n, online_pre_values_n, online_log_pis_n = [], [], []\n shared_obs_emb = self.qf1.obs_gnn(obs_n)\n\n for agent in range(num_agent):\n policy_actions, info = self.policy_n[agent](\n shared_obs_emb[:,agent,:], return_info=True,\n )\n\n online_actions_n.append(policy_actions)\n online_pre_values_n.append(info['preactivation'])\n online_log_pis_n.append(info['log_prob'])\n k0_actions = torch.stack(online_actions_n) # num_agent x batch x a_dim\n k0_actions = k0_actions.transpose(0,1).contiguous() # batch x num_agent x a_dim\n\n if self.shared_obs:\n k0_inputs = torch.cat([obs_n, k0_actions.reshape(*obs_n.shape[:-1],-1)],dim=-1)\n else:\n k0_inputs = torch.cat([obs_n, k0_actions],dim=-1)\n k1_actions = self.cactor(k0_inputs, deterministic=self.deterministic_cactor_in_graph)\n if self.detach_k_action:\n k1_actions = k1_actions.detach()\n\n q1_outputs = self.qf1(obs_n, k1_actions, k0_actions)\n q2_outputs = self.qf2(obs_n, k1_actions, k0_actions)\n\n min_q_outputs = torch.min(q1_outputs, q2_outputs) # batch x num_agent x 1\n\n policy_gradients_n = []\n alpha_n = []\n\n for agent in range(num_agent):\n policy_actions = online_actions_n[agent]\n pre_value = online_pre_values_n[agent]\n log_pi = online_log_pis_n[agent]\n if self.pre_activation_weight > 0.:\n pre_activation_policy_loss = (\n (pre_value**2).sum(dim=1).mean()\n )\n else:\n pre_activation_policy_loss = torch.tensor(0.).to(ptu.device) \n if self.use_entropy_loss:\n if self.use_automatic_entropy_tuning:\n alpha = self.log_alpha_n[agent].exp()\n alpha_loss = -(alpha * (log_pi + self.target_entropy).detach()).mean()\n self.alpha_optimizer_n[agent].zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer_n[agent].step()\n alpha = self.log_alpha_n[agent].exp().detach()\n alpha_n.append(alpha)\n else:\n alpha_loss = torch.tensor(0.).to(ptu.device)\n alpha = torch.tensor(self.init_alpha).to(ptu.device)\n alpha_n.append(alpha)\n entropy_loss = (alpha*log_pi).mean()\n else:\n entropy_loss = torch.tensor(0.).to(ptu.device)\n\n raw_policy_loss = -min_q_outputs[:,agent,:].mean()\n policy_loss = (\n raw_policy_loss +\n pre_activation_policy_loss * self.pre_activation_weight +\n entropy_loss\n )\n\n policy_gradients_n.append(torch.autograd.grad(policy_loss, self.policy_n[agent].parameters(),retain_graph=True))\n\n if self._need_to_update_eval_statistics:\n self.eval_statistics['Policy Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics['Raw Policy Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n raw_policy_loss\n ))\n self.eval_statistics['Preactivation Policy Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n pre_activation_policy_loss\n ))\n self.eval_statistics['Entropy Loss {}'.format(agent)] = np.mean(ptu.get_numpy(\n entropy_loss\n ))\n if self.use_entropy_loss:\n self.eval_statistics['Alpha {} Mean'.format(agent)] = np.mean(ptu.get_numpy(\n alpha\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy Action {}'.format(agent),\n ptu.get_numpy(policy_actions),\n ))\n\n for agent in range(num_agent):\n # self.policy_optimizer_n[agent].zero_grad()\n for pid,p in enumerate(self.policy_n[agent].parameters()):\n p.grad = policy_gradients_n[agent][pid]\n if self.clip_gradient > 0.:\n grad_norm = nn.utils.clip_grad_norm_(self.policy_n[agent].parameters(), self.clip_gradient)\n self.eval_statistics['Policy {} Grad Norm'.format(agent)] = ptu.get_numpy(torch.tensor(grad_norm))\n self.policy_optimizer_n[agent].step()\n\n \"\"\"\n Critic operations.\n \"\"\"\n with torch.no_grad():\n next_shared_obs_emb = self.qf1.obs_gnn(next_obs_n)\n next_actions_n, next_log_pis_n = [], []\n for agent in range(num_agent):\n next_actions, next_info = self.policy_n[agent](\n next_shared_obs_emb[:,agent,:], return_info=True,\n deterministic=self.deterministic_next_action,\n )\n next_actions_n.append(next_actions)\n next_log_pis_n.append(next_info['log_prob'])\n next_k0_actions = torch.stack(next_actions_n) # num_agent x batch x a_dim\n next_k0_actions = next_k0_actions.transpose(0,1).contiguous() # batch x num_agent x a_dim\n\n if self.shared_obs:\n next_k0_inputs = torch.cat([next_obs_n, next_k0_actions.reshape(*next_obs_n.shape[:-1],-1)],dim=-1)\n else:\n next_k0_inputs = torch.cat([next_obs_n, next_k0_actions],dim=-1)\n next_k1_actions = self.cactor(next_k0_inputs, deterministic=self.deterministic_cactor_in_graph)\n\n next_target_q1_values = self.target_qf1(next_obs_n, next_k1_actions, next_k0_actions)\n next_target_q2_values = self.target_qf2(next_obs_n, next_k1_actions, next_k0_actions)\n next_target_q_values = torch.min(next_target_q1_values, next_target_q2_values)\n\n if self.use_entropy_reward:\n next_alphas = torch.stack(alpha_n)[None,:]\n next_log_pis = torch.stack(next_log_pis_n).transpose(0,1).contiguous()\n next_target_q_values = next_target_q_values - next_alphas * next_log_pis\n q_targets = self.reward_scale*rewards_n + (1. - terminals_n) * self.discount * next_target_q_values\n q_targets = torch.clamp(q_targets, self.min_q_value, self.max_q_value)\n\n k0_actions = actions_n \n k1_actions = actions_n\n\n q1_preds = self.qf1(obs_n, k1_actions, k0_actions)\n raw_qf1_loss = self.qf_criterion(q1_preds, q_targets)\n\n q2_preds = self.qf2(obs_n, k1_actions, k0_actions)\n raw_qf2_loss = self.qf_criterion(q2_preds, q_targets)\n\n if self.qf_weight_decay > 0:\n reg_loss1 = self.qf_weight_decay * sum(\n torch.sum(param ** 2)\n for param in list(self.qf1.regularizable_parameters())+list(self.cg1.regularizable_parameters())\n )\n\n reg_loss2 = self.qf_weight_decay * sum(\n torch.sum(param ** 2)\n for param in list(self.qf2.regularizable_parameters())+list(self.cg2.regularizable_parameters())\n )\n else:\n reg_loss1, reg_loss2 = torch.tensor(0.).to(ptu.device), torch.tensor(0.).to(ptu.device)\n\n qf1_loss = raw_qf1_loss + reg_loss1\n qf2_loss = raw_qf2_loss + reg_loss2\n\n if self._need_to_update_eval_statistics:\n self.eval_statistics['Qf1 Loss'] = ptu.get_numpy(qf1_loss)\n self.eval_statistics['Qf2 Loss'] = ptu.get_numpy(qf2_loss)\n self.eval_statistics['Raw Qf1 Loss'] = ptu.get_numpy(raw_qf1_loss)\n self.eval_statistics['Raw Qf2 Loss'] = ptu.get_numpy(raw_qf2_loss)\n self.eval_statistics['Reg Qf2 Loss'] = ptu.get_numpy(reg_loss1)\n self.eval_statistics['Reg Qf2 Loss'] = ptu.get_numpy(reg_loss2)\n\n self.qf1_optimizer.zero_grad()\n qf1_loss.backward()\n if self.clip_gradient > 0.:\n grad_norm = nn.utils.clip_grad_norm_(self.qf1.parameters(), self.clip_gradient)\n self.eval_statistics['Qf1 Grad Norm'] = ptu.get_numpy(torch.tensor(grad_norm))\n self.qf1_optimizer.step()\n\n self.qf2_optimizer.zero_grad()\n qf2_loss.backward()\n if self.clip_gradient > 0.:\n grad_norm = nn.utils.clip_grad_norm_(self.qf2.parameters(), self.clip_gradient)\n self.eval_statistics['Qf2 Grad Norm'] = ptu.get_numpy(torch.tensor(grad_norm))\n self.qf2_optimizer.step()\n\n \"\"\"\n Central actor operations.\n \"\"\"\n if self.shared_obs:\n buffer_inputs = torch.cat([obs_n, actions_n.reshape(*obs_n.shape[:-1],-1)],dim=-1)\n else:\n buffer_inputs = torch.cat([obs_n, actions_n],dim=-1)\n cactor_actions, cactor_infos = self.cactor(buffer_inputs,return_info=True)\n\n cactor_pre_values = cactor_infos['preactivation']\n if self.pre_activation_weight > 0:\n pre_activation_cactor_loss = (\n (cactor_pre_values**2).sum(dim=1).mean()\n )\n else:\n pre_activation_cactor_loss = torch.tensor(0.).to(ptu.device)\n if self.use_cactor_entropy_loss:\n cactor_log_pis = cactor_infos['log_prob'] # batch x num_ageng x 1\n if self.use_automatic_entropy_tuning:\n calphas = torch.stack(self.log_calpha_n).exp()[None,:]\n calpha_loss = -(calphas * (cactor_log_pis + self.target_entropy).detach())\n calpha_loss = calpha_loss.mean()\n self.calpha_optimizer.zero_grad()\n calpha_loss.backward()\n self.calpha_optimizer.step()\n calphas = torch.stack(self.log_calpha_n).exp().detach()\n else:\n calpha_loss = torch.tensor(0.).to(ptu.device)\n calphas = torch.stack([torch.tensor(self.init_alpha).to(ptu.device) for i in range(num_agent)])\n cactor_entropy_loss = (calphas[None,:]*cactor_log_pis).mean()\n else:\n cactor_entropy_loss = torch.tensor(0.).to(ptu.device)\n \n q1_outputs = self.qf1(obs_n, actions_n, cactor_actions)\n q2_outputs = self.qf2(obs_n, actions_n, cactor_actions)\n q_outputs = torch.min(q1_outputs,q2_outputs)\n raw_cactor_loss = -q_outputs.mean()\n cactor_loss = (\n raw_cactor_loss +\n pre_activation_cactor_loss * self.pre_activation_weight +\n cactor_entropy_loss\n )\n\n if self._need_to_update_eval_statistics:\n if self.use_cactor_entropy_loss:\n self.eval_statistics.update(create_stats_ordered_dict(\n 'CAlpha', ptu.get_numpy(calphas),\n ))\n self.eval_statistics['Cactor Loss'] = ptu.get_numpy(cactor_loss)\n self.eval_statistics['Raw Cactor Loss'] = ptu.get_numpy(raw_cactor_loss)\n self.eval_statistics['Preactivation Cactor Loss'] = ptu.get_numpy(pre_activation_cactor_loss)\n self.eval_statistics['Entropy Cactor Loss'] = ptu.get_numpy(cactor_entropy_loss)\n\n self.cactor_optimizer.zero_grad()\n cactor_loss.backward()\n if self.clip_gradient > 0.:\n grad_norm = nn.utils.clip_grad_norm_(self.cactor.parameters(), self.clip_gradient)\n self.eval_statistics['Cactor Grad Norm'] = ptu.get_numpy(torch.tensor(grad_norm))\n self.cactor_optimizer.step()\n \n self._need_to_update_eval_statistics = False\n self._update_target_networks()\n self._n_train_steps_total += 1\n\n def _update_target_networks(self):\n if self.use_soft_update:\n ptu.soft_update_from_to(self.qf1, self.target_qf1, self.tau)\n ptu.soft_update_from_to(self.qf2, self.target_qf2, self.tau)\n else:\n if self._n_train_steps_total % self.target_hard_update_period == 0:\n ptu.copy_model_params_from_to(self.qf1, self.target_qf1)\n ptu.copy_model_params_from_to(self.qf2, self.target_qf2)\n\n def get_diagnostics(self):\n return self.eval_statistics\n\n def end_epoch(self, epoch):\n self._need_to_update_eval_statistics = True\n\n @property\n def networks(self):\n res = [\n *self.policy_n,\n self.cactor,\n self.qf1,\n self.target_qf1,\n self.qf2,\n self.target_qf2,\n ]\n return res\n\n def get_snapshot(self):\n res = dict(\n qf1=self.qf1,\n target_qf1=self.target_qf1,\n qf2=self.qf2,\n target_qf2=self.target_qf2,\n cactor=self.cactor,\n policy_n=self.policy_n,\n # optimizers\n qf1_optimizer=self.qf1_optimizer,\n qf2_optimizer=self.qf2_optimizer,\n cactor_optimizer=self.cactor_optimizer,\n policy_optimizer_n=self.policy_optimizer_n,\n )\n res['shared_gnn'] = self.qf1.obs_gnn\n\n if self.use_automatic_entropy_tuning:\n res['log_alpha_n'] = self.log_alpha_n\n res['alpha_optimizer_n'] = self.alpha_optimizer_n\n if self.use_cactor_entropy_loss:\n res['log_calpha_n'] = self.log_calpha_n\n res['calpha_optimizer'] = self.calpha_optimizer\n return res\n",
"import random\nimport itertools\nimport numpy as np\nfrom gym import spaces\n\nfrom traffic.traffic_env import TrafficEnv\nfrom traffic.road import Road, RoadSegment\nfrom traffic.car import Car\nfrom traffic.drivers.driver import Driver, XYSeperateDriver\nfrom traffic.drivers.oned_drivers import IDMDriver, PDDriver\nfrom traffic.actions.trajectory_accel_action import TrajectoryAccelAction\nfrom traffic.constants import *\n\nclass YNYDriver(XYSeperateDriver):\n def __init__(self, yld=True, t=1.0, s_min=3.0,\n v_normal=3., v_ny=6., \n s_normal=3., s_ny=1.,\n **kwargs):\n self.yld = yld\n self.t = t \n self.s_min = 3.0\n self.v_normal = v_normal\n self.v_ny = v_ny\n self.s_normal = s_normal\n self.s_ny = s_ny\n self.intention = 0 # 0: noraml drive; 1: yield 2: not yield\n super(YNYDriver, self).__init__(**kwargs)\n\n def set_yld(self, yld):\n self.yld = yld\n\n def observe(self, cars, road):\n s = cars[0].position[0] - self.car.position[0]\n s = s * self.x_driver.direction\n t = self.car.get_distance(cars[0],1)\n # print(\"t: \",t, self.t1, self.t2)\n self.x_driver.set_v_des(self.v_normal)\n self.x_driver.s_des = self.s_normal\n if (s < self.s_min) or (t > self.t): # normal drive\n self.x_driver.observe(cars[1:], road)\n self.intention = 0\n else:\n if self.yld and (t <= self.t): # yield\n self.x_driver.min_overlap = self.t\n self.x_driver.observe(cars, road)\n self.intention = 1\n else: # not yield\n self.x_driver.set_v_des(self.v_ny)\n self.x_driver.s_des = self.s_ny\n self.x_driver.observe(cars[1:], road)\n self.intention = 2\n \n self.y_driver.observe(cars, road)\n\n def setup_render(self, viewer):\n if self.intention == 0:\n self.car._color = [*GREEN_COLORS[0],0.5]\n elif self.intention == 1:\n self.car._color = [*BLUE_COLORS[0],0.5]\n elif self.intention == 2:\n self.car._color = [*RED_COLORS[0],0.5]\n\n def update_render(self, camera_center):\n if self.intention == 0:\n self.car._color = [*GREEN_COLORS[0],0.5]\n elif self.intention == 1:\n self.car._color = [*BLUE_COLORS[0],0.5]\n elif self.intention == 2:\n self.car._color = [*RED_COLORS[0],0.5]\n\nclass EgoTrajectory:\n def xy_to_traj(self, pos):\n x, y = pos[0], pos[1]\n r = 6 # 4.5\n if y < 0.:\n s = y\n t = -x\n theta = np.pi/2\n curv = 0.\n elif x > r:\n s = r*np.pi/2. + x - r\n t = y - r\n theta = 0.\n curv = 0.\n else:\n theta = np.arctan2(r-x ,y)\n curv = 1./r\n s = r*(np.pi/2.-theta)\n t = np.sqrt((r-x)**2+(y)**2) - r\n\n return s, t, theta, curv\n\n def traj_to_xy(self, pos):\n s, t = pos[0], pos[1]\n r = 6 # 4.5\n if s < 0.:\n x = -t\n y = s\n theta = np.pi/2\n curv = 0.\n elif s > r*np.pi/2.:\n x = r + s - r*np.pi/2.\n y = r + t\n theta = 0.\n curv = 0.\n else:\n theta = np.pi/2 - s/r\n curv = 1./r\n x = r - (r+t)*np.sin(theta)\n y = (r+t)*np.cos(theta)\n\n return x, y, theta, curv\n\nclass EgoDriver(Driver):\n def __init__(self, \n trajectory=None, \n v_des=0.0,\n t_des=0.0,\n k_s_p=2.0,\n k_t_p=2.0,\n k_t_d=2.0,\n sigma=0.0, \n as_max=3.0,\n at_max=3.0,\n as_max_safe=6.0,\n at_max_safe=6.0,\n concern_distance=2.0,\n safe_distance=0.5,\n safe_speed=1.0,\n **kwargs):\n\n self.trajectory = trajectory\n self.v_des = v_des\n self.t_des = t_des\n self.k_s_p = k_s_p\n self.k_t_p = k_t_p\n self.k_t_d = k_t_d\n self.as_max = as_max\n self.at_max = at_max\n self.as_max_safe = as_max_safe\n self.at_max_safe = at_max_safe\n\n self.a_s = None\n self.a_t = None\n super(EgoDriver, self).__init__(**kwargs)\n # np.sqrt(self.car.length**2+self.car.width**2)/2\n self.concern_distance = concern_distance\n self.safe_distance = safe_distance\n self.safe_speed = safe_speed\n self.k_d_safe = 5.0\n self.k_v_safe = 5.0 # 2.0\n\n def set_trajectory(self, trajectory):\n self.trajectory = trajectory\n\n def observe(self, cars, road):\n s, t, theta, curv = self.trajectory.xy_to_traj(self.car.position)\n v_x, v_y = self.car.velocity[0], self.car.velocity[1]\n v_s = v_x*np.cos(theta) + v_y*np.sin(theta)\n v_t = -v_x*np.sin(theta) + v_y*np.cos(theta)\n\n self.a_s = self.k_s_p*(self.v_des-v_s)\n self.a_t = self.k_t_p*(self.t_des-t) - self.k_t_d*v_t\n self.a_s = np.clip(self.a_s,-self.as_max,self.as_max)\n self.a_t = np.clip(self.a_t,-self.at_max,self.at_max)\n\n # safety check\n a_x_safe = 0.\n a_y_safe = 0.\n unsafe = False\n for cid, car in enumerate(cars):\n if car is self.car:\n continue\n else:\n p1, p2 = self.car.get_closest_points(car)\n distance = np.linalg.norm(p1-p2)\n direction = (p1-p2)/distance\n v_rel = self.car.velocity - car.velocity\n speed_rel = np.sum(v_rel * direction)\n # print(distance)\n if distance < self.concern_distance:\n if distance < self.safe_distance:\n unsafe = True\n elif speed_rel < -self.safe_speed:\n unsafe = True\n if unsafe:\n self.a_s = -self.k_v_safe * v_s\n self.a_t = -self.k_v_safe * v_t\n self.a_s = np.clip(self.a_s,-self.as_max_safe,self.as_max_safe)\n self.a_t = np.clip(self.a_t,-self.at_max_safe,self.at_max_safe)\n\n def get_action(self):\n return TrajectoryAccelAction(self.a_s, self.a_t, self.trajectory)\n\nclass TIntersectionMulti(TrafficEnv):\n def __init__(self,\n yld=0.5,\n observe_mode='full',\n label_mode='full',\n normalize_obs=False,\n vs_actions=[0.,0.5,3.],\n t_actions=[-1.5,0.,1.5],\n desire_speed=3.,\n driver_sigma = 0.,\n speed_cost=0.01,\n t_cost=0.01,\n control_cost=0.01,\n collision_cost=5.,\n outroad_cost=5.,\n survive_reward=0.01,\n goal_reward=5.,\n road=Road([RoadSegment([(-100.,0.),(100.,0.),(100.,8.),(-100.,8.)]),\n RoadSegment([(-2,-10.),(2,-10.),(2,0.),(-2,0.)])]),\n left_bound = -20.,\n right_bound = 20.,\n gap_min = 3.,\n gap_max = 10.,\n max_veh_num = 12,\n num_updates=1,\n dt=0.1,\n **kwargs):\n\n self.yld = yld\n self.observe_mode = observe_mode\n self.label_mode = label_mode\n self.normalize_obs = normalize_obs\n self.vs_actions = vs_actions\n self.t_actions = t_actions\n # we use target value instead of target change so system is Markovian\n self.rl_actions = list(itertools.product(vs_actions,t_actions))\n self.num_updates = num_updates\n\n self.desire_speed = desire_speed\n self.speed_cost = speed_cost\n self.t_cost = t_cost\n self.control_cost = control_cost\n self.collision_cost = collision_cost\n self.outroad_cost = outroad_cost\n self.survive_reward = survive_reward\n self.goal_reward = goal_reward\n self._collision = False\n self._outroad = False\n self._goal = False\n self._intentions = []\n\n self.left_bound = left_bound\n self.right_bound = right_bound\n self.gap_min = gap_min\n self.gap_max = gap_max\n self.max_veh_num = max_veh_num\n self.label_dim = 3\n if self.label_mode == 'full':\n if observe_mode == 'full':\n self.label_num = self.max_veh_num\n elif observe_mode == 'important':\n self.label_num = 4\n elif self.label_mode == 'important':\n self.label_num = 2\n\n self.car_length=5.0\n self.car_width=2.0\n self.car_max_accel=10.0\n self.car_max_speed=40.0\n self.car_expose_level=4\n self.driver_sigma = driver_sigma\n self.s_des = 3.0\n self.s_min = 3.0\n self.min_overlap = 1.0\n\n super(TIntersectionMulti, self).__init__(\n road=road,\n cars=[],\n drivers=[],\n dt=dt,\n **kwargs,)\n\n def get_intentions(self):\n intentions = np.array([np.nan]*self.label_num)\n if self.label_mode == 'full':\n i = 0\n if self.observe_mode == 'full':\n upper_indices, lower_indices = self.get_sorted_indices()\n for indx in lower_indices:\n intentions[i] = self._drivers[indx].intention\n i += 1\n i = int(self.max_veh_num/2)\n for indx in upper_indices:\n intentions[i] = self._drivers[indx].intention\n i += 1\n elif self.observe_mode == 'important':\n important_indices = self.get_important_indices()\n for indx in important_indices:\n if indx is None:\n i += 1\n else:\n intentions[i] = self._drivers[indx].intention\n i += 1\n elif self.label_mode == 'important':\n # [ind_ll, ind_lr, ind_ul, ind_ur]\n ind_ll, ind_lr, ind_ul, ind_ur = self.get_important_indices()\n if ind_lr is not None:\n intentions[0] = self._drivers[ind_lr].intention\n if ind_ul is not None:\n intentions[1] = self._drivers[ind_ul].intention\n return intentions\n\n def update(self, action):\n # recorder intentios at the begining\n self._intentions = self.get_intentions()\n\n rl_action = self.rl_actions[action]\n self._drivers[0].v_des = rl_action[0]\n self._drivers[0].t_des = rl_action[1]\n\n self._goal = False\n self._collision = False\n self._outroad = False\n for _ in range(self.num_updates):\n for driver in self._drivers:\n driver.observe(self._cars, self._road)\n self._actions = [driver.get_action() for driver in self._drivers]\n [action.update(car, self.dt) for (car, action) in zip(self._cars, self._actions)]\n\n ego_car = self._cars[0]\n for car in self._cars[1:]:\n if ego_car.check_collision(car):\n self._collision = True\n return\n\n if not self._road.is_in(ego_car):\n self._outroad = True\n return\n\n if (ego_car.position[0] > 8.) \\\n and (ego_car.position[1] > 5.) \\\n and (ego_car.position[1] < 7.):\n self._goal = True\n return\n\n # remove cars that are out-of bound\n for car, driver in zip(self._cars[1:],self._drivers[1:]):\n if(car.position[1] < 4.) and (car.position[0] < self.left_bound):\n self.remove_car(car, driver)\n elif(car.position[1] > 4.) and (car.position[0] > self.right_bound):\n self.remove_car(car, driver)\n\n # add cars when there is enough space\n min_upper_x = np.inf\n max_lower_x = -np.inf\n for car in self._cars[1:]:\n if (car.position[1] < 4.) and (car.position[0] > max_lower_x):\n max_lower_x = car.position[0]\n if (car.position[1] > 4.) and (car.position[0] < min_upper_x):\n min_upper_x = car.position[0]\n if max_lower_x < (self.right_bound - np.random.rand()*(self.gap_max-self.gap_min) - self.gap_min - self.car_length):\n v_des = self.desire_speed\n p_des = 2.\n direction = -1\n x = self.right_bound\n car, driver = self.add_car(0, x, 2., -self.desire_speed, 0., v_des, p_des, direction, np.pi)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n if min_upper_x > (self.left_bound + np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length):\n v_des = self.desire_speed\n p_des = 6.\n direction = 1\n x = self.left_bound\n car, driver = self.add_car(0, x, 6., self.desire_speed, 0., v_des, p_des, direction, 0.)\n if hasattr(self, 'viewer') and self.viewer:\n car.setup_render(self.viewer)\n driver.setup_render(self.viewer)\n\n def is_terminal(self):\n return (self._collision or self._outroad or self._goal)\n\n def get_info(self):\n info = {}\n info['sup_labels'] = np.copy(self._intentions)\n\n if self._collision:\n info['event']='collision'\n elif self._outroad:\n info['event']='outroad'\n elif self._goal:\n info['event']='goal'\n else:\n info['event']='nothing'\n\n return info\n\n def observe(self):\n if self.observe_mode == 'full':\n obs = np.zeros(int(4*self.max_veh_num+4))\n obs[:2] = self._cars[0].position\n obs[2:4] = self._cars[0].velocity\n upper_indices, lower_indices = self.get_sorted_indices()\n i = 4\n for indx in lower_indices:\n obs[i:i+2] = self._cars[indx].position - self._cars[0].position\n obs[i+2:i+4] = self._cars[indx].velocity - self._cars[0].velocity\n i += 4\n i = int(4 + self.max_veh_num/2*4)\n for indx in upper_indices:\n obs[i:i+2] = self._cars[indx].position - self._cars[0].position\n obs[i+2:i+4] = self._cars[indx].velocity - self._cars[0].velocity\n i += 4\n elif self.observe_mode == 'important':\n obs = np.zeros(int(4*4+4))\n obs[:2] = self._cars[0].position\n obs[2:4] = self._cars[0].velocity\n important_indices = self.get_important_indices()\n i = 4\n for indx in important_indices:\n if indx is None:\n obs[i:i+4] = 0.\n else:\n obs[i:i+2] = self._cars[indx].position - self._cars[0].position\n obs[i+2:i+4] = self._cars[indx].velocity - self._cars[0].velocity\n i += 4\n if self.normalize_obs:\n obs[0::4] = obs[0::4]/self.right_bound\n obs[1::4] = obs[1::4]/self.right_bound\n obs[2::4] = obs[2::4]/self.desire_speed\n obs[3::4] = obs[3::4]/self.desire_speed\n obs = np.copy(obs)\n return obs\n\n @property\n def observation_space(self):\n if self.observe_mode == 'full':\n low = -np.ones(int(4*self.max_veh_num+4))\n high = np.ones(int(4*self.max_veh_num+4))\n elif self.observe_mode == 'important':\n low = -np.ones(20)\n high = np.ones(20)\n return spaces.Box(low=low, high=high, dtype=np.float32)\n\n @property\n def action_space(self):\n return spaces.Discrete(len(self.rl_actions))\n\n def get_reward(self):\n reward = 0.\n action = self._actions[0]\n ego_car = self._cars[0]\n s, t, theta, curv = self._drivers[0].trajectory.xy_to_traj(ego_car.position)\n v_x, v_y = ego_car.velocity[0], ego_car.velocity[1]\n v_s = v_x*np.cos(theta) + v_y*np.sin(theta)\n v_t = -v_x*np.sin(theta) + v_y*np.cos(theta)\n\n speed_cost = -np.abs(self.desire_speed-v_s)/self.desire_speed\n reward += self.speed_cost*speed_cost\n\n t_cost = -np.abs(t)/np.max(self.t_actions)\n reward += self.t_cost*t_cost\n\n control_cost = 0. # TODO\n reward += self.control_cost*control_cost\n # print(speed_cost, t_cost, control_cost)\n\n if self._collision:\n reward -= self.collision_cost\n elif self._outroad:\n reward -= self.outroad_cost\n elif self._goal:\n reward += self.goal_reward\n else:\n reward += self.survive_reward\n\n return reward\n\n def remove_car(self, car, driver):\n self._cars.remove(car)\n self._drivers.remove(driver)\n if hasattr(self, 'viewer') and self.viewer:\n car.remove_render(self.viewer)\n driver.remove_render(self.viewer)\n\n def add_car(self, idx, x, y, vx, vy, v_des, p_des, direction, theta):\n car = Car(idx=idx, length=self.car_length, width=self.car_width, color=random.choice(RED_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n expose_level=self.car_expose_level)\n driver = YNYDriver(idx=idx, car=car, dt=self.dt,\n x_driver=IDMDriver(idx=idx, car=car, sigma=self.driver_sigma, s_des=self.s_des, s_min=self.s_min, axis=0, min_overlap=self.min_overlap, dt=self.dt), \n y_driver=PDDriver(idx=idx, car=car, sigma=0., axis=1, dt=self.dt)) \n car.set_position(np.array([x, y]))\n car.set_velocity(np.array([vx, vy]))\n car.set_rotation(theta)\n driver.x_driver.set_v_des(v_des)\n driver.x_driver.set_direction(direction)\n driver.y_driver.set_p_des(p_des)\n if np.random.rand() < self.yld:\n driver.set_yld(True)\n else:\n driver.set_yld(False)\n\n self._cars.append(car)\n self._drivers.append(driver)\n return car, driver\n\n def get_important_indices(self):\n # return indices of 4 other vehicles that are closest to ego\n # on 4 directions\n ego_x = self._cars[0].position[0]\n min_ll, min_lr, min_ul, min_ur = np.inf, np.inf, np.inf, np.inf\n ind_ll, ind_lr, ind_ul, ind_ur = None, None, None, None\n for idx,car in enumerate(self._cars[1:]):\n x, y = car.position\n if y < 4.:\n if (x <= ego_x) and (ego_x - x < min_ll):\n min_ll = ego_x - x\n ind_ll = idx + 1\n elif (x > ego_x) and (x - ego_x < min_lr):\n min_lr = x - ego_x\n ind_lr = idx + 1\n else:\n if (x < ego_x) and (ego_x - x < min_ul):\n min_ul = ego_x - x\n ind_ul = idx + 1\n elif (x >= ego_x) and (x - ego_x < min_ur):\n min_ur = x - ego_x\n ind_ur = idx + 1\n return [ind_ll, ind_lr, ind_ul, ind_ur]\n\n def get_sorted_indices(self):\n # return indices of all other vehicles from left to right\n upper_indices, upper_xs = [], []\n lower_indices, lower_xs = [], []\n for indx,car in enumerate(self._cars[1:]):\n if car.position[1] > 4.:\n upper_indices.append(indx+1)\n upper_xs.append(car.position[0])\n else:\n lower_indices.append(indx+1)\n lower_xs.append(car.position[0])\n upper_indices = np.array(upper_indices)[np.argsort(upper_xs)]\n lower_indices = np.array(lower_indices)[np.argsort(lower_xs)]\n return upper_indices, lower_indices\n\n def _reset(self):\n self._collision = False\n self._outroad = False\n self._goal = False\n\n self._cars, self._drivers = [], []\n car = Car(idx=0, length=self.car_length, width=self.car_width, color=random.choice(BLUE_COLORS),\n max_accel=self.car_max_accel, max_speed=self.car_max_speed,\n expose_level=self.car_expose_level)\n driver = EgoDriver(trajectory=EgoTrajectory(),idx=0,car=car,dt=self.dt)\n car.set_position(np.array([0., -2.5]))\n car.set_velocity(np.array([0., 0.]))\n car.set_rotation(np.pi/2.)\n driver.v_des = 0.\n driver.t_des = 0.\n self._cars.append(car)\n self._drivers.append(driver)\n # randomly generate surrounding cars and drivers\n idx = 1\n # upper lane\n x = self.left_bound + np.random.rand()*(self.gap_max-self.gap_min)\n while (x < self.right_bound):\n v_des = self.desire_speed\n p_des = 6.\n direction = 1\n self.add_car(idx, x, 6., self.desire_speed, 0., v_des, p_des, direction, 0.)\n x += (np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length)\n idx += 1\n # lower lane\n x = self.right_bound - np.random.rand()*(self.gap_max-self.gap_min)\n while (x > self.left_bound):\n v_des = self.desire_speed\n p_des = 2.\n direction = -1\n self.add_car(idx, x, 2., -self.desire_speed, 0., v_des, p_des, direction, np.pi)\n x -= (np.random.rand()*(self.gap_max-self.gap_min) + self.gap_min + self.car_length)\n idx += 1\n\n self._intentions = self.get_intentions()\n return None\n\n def setup_viewer(self):\n from traffic import rendering\n self.viewer = rendering.Viewer(1200, 800)\n self.viewer.set_bounds(-30.0, 30.0, -20.0, 20.0)\n\n def update_extra_render(self, extra_input):\n if self.observe_mode == 'important':\n important_indices = self.get_important_indices()\n for ind in important_indices:\n if ind is None:\n pass\n else:\n center = self._cars[ind].position - self.get_camera_center()\n attrs = {\"color\":(1.,0.,0.),\"linewidth\":1.}\n from traffic.rendering import make_circle, _add_attrs\n circle = make_circle(radius=1., res=15, filled=False, center=center)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle)\n if self.label_mode == 'important':\n ind_ll, ind_lr, ind_ul, ind_ur = self.get_important_indices()\n for ind in [ind_lr, ind_ul]:\n if ind is None:\n pass\n else:\n center = self._cars[ind].position - self.get_camera_center()\n attrs = {\"color\":(0.,0.,1.),\"linewidth\":1.}\n from traffic.rendering import make_circle, _add_attrs\n circle = make_circle(radius=0.8, res=15, filled=False, center=center)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle)\n if extra_input:\n if ('attention_weight' in extra_input.keys()) and (extra_input['attention_weight'] is not None):\n edge_index = extra_input['attention_weight'][0]\n attention_weight = extra_input['attention_weight'][1]\n upper_indices, lower_indices = self.get_sorted_indices()\n car_indices = [np.nan]*(1+self.max_veh_num)\n car_indices[0] = 0\n car_indices[1:len(lower_indices)+1] = lower_indices[:]\n car_indices[int(self.max_veh_num/2)+1:int(self.max_veh_num/2)+1+len(upper_indices)] = upper_indices[:]\n starts, ends, attentions = [], [], []\n for i in range(edge_index.shape[1]):\n if np.isnan(car_indices[edge_index[0,i]]) or np.isnan(car_indices[edge_index[1,i]]):\n pass\n elif car_indices[edge_index[1,i]] == 0:\n attention = attention_weight[i].item()\n attentions.append(attention)\n car_i = car_indices[edge_index[0,i]]\n car_j = car_indices[edge_index[1,i]]\n start = self._cars[car_i].position - self.get_camera_center()\n end = self._cars[car_j].position - self.get_camera_center()\n starts.append(start)\n ends.append(end)\n rank_index = np.argsort(attentions)\n starts = np.array(starts)[rank_index]\n ends = np.array(ends)[rank_index]\n attentions = np.array(attentions)[rank_index]\n for start, end, attention in zip(starts[-3:],ends[-3:],attentions[-3:]):\n attrs = {\"color\":(1.,0.,1.),\"linewidth\":10.*attention}\n if (start == end).all():\n from traffic.rendering import make_circle, _add_attrs\n circle = make_circle(radius=1., res=15, filled=False, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle)\n else:\n self.viewer.draw_line(start, end, **attrs)\n if ('intention' in extra_input.keys()) and (extra_input['intention'] is not None):\n car_indices = [np.nan]*self.label_num\n if self.label_mode == 'full':\n upper_indices, lower_indices = self.get_sorted_indices()\n car_indices[0:len(lower_indices)] = lower_indices[:]\n car_indices[int(self.max_veh_num/2):int(self.max_veh_num/2)+len(upper_indices)] = upper_indices[:]\n elif self.label_mode == 'important':\n ind_ll, ind_lr, ind_ul, ind_ur = self.get_important_indices()\n car_indices[0] = (ind_lr if ind_lr else np.nan)\n car_indices[1] = (ind_ul if ind_ul else np.nan)\n for car_ind,intention in zip(car_indices,extra_input['intention']):\n if not np.isnan(car_ind):\n from traffic.rendering import make_circle, _add_attrs\n start = self._cars[car_ind].position - self.get_camera_center()\n attrs = {\"color\":(intention[2],intention[0],intention[1])}\n circle = make_circle(radius=0.5, res=15, filled=True, center=start)\n _add_attrs(circle, attrs)\n self.viewer.add_onetime(circle) \n\nif __name__ == '__main__':\n import time\n import pdb\n env = TIntersectionMulti(num_updates=1, yld=0.5, driver_sigma=0.1, \n normalize_obs=True,\n observe_mode='important',\n label_mode='important')\n obs = env.reset()\n img = env.render()\n done = False\n maximum_step = 200\n t = 0\n cr = 0.\n # actions = [*[8]*2,*[8]*4,*[7]*20]\n actions = [*[1]*10,*[1]*20,*[1]*200]\n # actions = np.load('/Users/xiaobaima/Dropbox/SISL/rlkit/tests/Traffic/Data/t_intersection/MyDQNcg0.1expl0.2/seed0/failure1.npy')\n while True: #not done: \n # pdb.set_trace()\n # if t >= actions.shape[0]:\n # action = 7\n # else:\n # action = actions[t][0]\n # action = actions[t]\n # action = np.random.randint(env.action_space.n)\n action = input(\"Action\\n\")\n action = int(action)\n while action < 0:\n t = 0\n env.reset()\n env.render()\n action = input(\"Action\\n\")\n action = int(action)\n t += 1\n obs, reward, done, info = env.step(action)\n print('t: ', t)\n print('action: ',action)\n print('obs: ', obs)\n print('reward: ', reward)\n print('info: ', info)\n cr += reward\n env.render()\n time.sleep(0.1)\n if (t > maximum_step) or done:\n print('cr: ',cr)\n pdb.set_trace()\n # if env._collision or env._outroad:\n # pdb.set_trace()\n t = 0\n cr = 0.\n env.reset()\n env.close()\n"
] | [
[
"torch.nn.Linear",
"torch.manual_seed",
"torch.nn.ReLU",
"numpy.random.seed"
],
[
"torch.load"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.save"
],
[
"torch.nn.Linear",
"torch.manual_seed",
"numpy.random.seed"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.save"
],
[
"torch.tanh",
"numpy.array",
"torch.no_grad",
"torch.tensor"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.arange",
"torch.save"
],
[
"numpy.array",
"numpy.zeros",
"torch.load"
],
[
"numpy.abs",
"numpy.sqrt",
"numpy.clip",
"numpy.isnan",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.sign",
"numpy.arctan2",
"numpy.copy",
"numpy.max",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.sum"
],
[
"torch.abs",
"torch.cat",
"torch.zeros",
"numpy.arange",
"torch.reshape",
"torch.distributions.Categorical",
"torch.no_grad",
"numpy.argmax",
"torch.arange",
"numpy.array",
"torch.argmax"
],
[
"numpy.array",
"numpy.clip"
],
[
"torch.mean",
"torch.ones",
"torch.isnan",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"torch.argmax"
],
[
"numpy.log",
"torch.ones",
"torch.zeros",
"torch.min",
"torch.tensor",
"torch.log",
"torch.stack",
"torch.clamp",
"torch.nn.MSELoss"
],
[
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.log",
"torch.cat",
"torch.min",
"torch.sum",
"torch.tensor",
"torch.no_grad",
"numpy.prod",
"torch.stack",
"torch.clamp",
"torch.nn.MSELoss"
],
[
"numpy.abs",
"numpy.sqrt",
"numpy.clip",
"numpy.isnan",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arctan2",
"numpy.max",
"numpy.copy",
"numpy.ones",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jrsassen/megaman | [
"faccaf267aad0a8b18ec8a705735fd9dd838ca1e",
"6583e462bc05c003c6c5e030ba993c5e30477720"
] | [
"megaman/geometry/tests/test_adjacency.py",
"megaman/embedding/spectral_embedding.py"
] | [
"# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE\n\nfrom nose import SkipTest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_raises, assert_equal\nfrom scipy.sparse import isspmatrix\nfrom scipy.spatial.distance import cdist, pdist, squareform\n\nfrom megaman.geometry import (Geometry, compute_adjacency_matrix, Adjacency,\n adjacency_methods)\n\n\ntry:\n import pyflann as pyf\n NO_PYFLANN = False\nexcept ImportError:\n NO_PYFLANN = True\n\n\ndef test_adjacency_methods():\n assert_equal(set(adjacency_methods()),\n {'auto', 'pyflann', 'ball_tree',\n 'cyflann', 'brute', 'kd_tree'})\n\n\ndef test_adjacency_input_validation():\n X = np.random.rand(20, 3)\n # need to specify radius or n_neighbors\n assert_raises(ValueError, compute_adjacency_matrix, X)\n # cannot specify both radius and n_neighbors\n assert_raises(ValueError, compute_adjacency_matrix, X,\n radius=1, n_neighbors=10)\n\n\ndef test_adjacency():\n rng = np.random.RandomState(36)\n X = rng.rand(100, 3)\n Gtrue = {}\n\n exact_methods = [m for m in Adjacency.methods()\n if not m.endswith('flann')]\n\n def check_kneighbors(n_neighbors, method):\n if method == 'pyflann' and NO_PYFLANN:\n raise SkipTest(\"pyflann not installed\")\n\n G = compute_adjacency_matrix(X, method=method,\n n_neighbors=n_neighbors)\n assert isspmatrix(G)\n assert G.shape == (X.shape[0], X.shape[0])\n if method in exact_methods:\n assert_allclose(G.toarray(), Gtrue[n_neighbors].toarray())\n\n def check_radius(radius, method):\n if method == 'pyflann' and NO_PYFLANN:\n raise SkipTest(\"pyflann not installed\")\n\n G = compute_adjacency_matrix(X, method=method,\n radius=radius)\n assert isspmatrix(G)\n assert G.shape == (X.shape[0], X.shape[0])\n if method in exact_methods:\n assert_allclose(G.toarray(), Gtrue[radius].toarray())\n\n for n_neighbors in [5, 10, 15]:\n Gtrue[n_neighbors] = compute_adjacency_matrix(X, method='brute',\n n_neighbors=n_neighbors)\n for method in Adjacency.methods():\n yield check_kneighbors, n_neighbors, method\n\n for radius in [0.1, 0.5, 1.0]:\n Gtrue[radius] = compute_adjacency_matrix(X, method='brute',\n radius=radius)\n for method in Adjacency.methods():\n yield check_radius, radius, method\n\n\ndef test_unknown_method():\n X = np.arange(20).reshape((10, 2))\n assert_raises(ValueError, compute_adjacency_matrix, X, 'foo')\n\n\ndef test_all_methods_close():\n rand = np.random.RandomState(36)\n X = rand.randn(10, 2)\n D_true = squareform(pdist(X))\n D_true[D_true > 0.5] = 0\n\n def check_method(method):\n kwargs = {}\n if method == 'pyflann':\n try:\n import pyflann as pyf\n except ImportError:\n raise SkipTest(\"pyflann not installed.\")\n flindex = pyf.FLANN()\n flindex.build_index(X, algorithm='kmeans',\n target_precision=0.9)\n kwargs['flann_index'] = flindex\n this_D = compute_adjacency_matrix(X, method=method, radius=0.5,\n **kwargs)\n assert_allclose(this_D.toarray(), D_true, rtol=1E-5)\n\n for method in ['auto', 'cyflann', 'pyflann', 'brute']:\n yield check_method, method\n\n\ndef test_custom_adjacency():\n class CustomAdjacency(Adjacency):\n name = \"custom\"\n def adjacency_graph(self, X):\n return squareform(pdist(X))\n\n rand = np.random.RandomState(42)\n X = rand.rand(10, 2)\n D = compute_adjacency_matrix(X, method='custom', radius=1)\n assert_allclose(D, cdist(X, X))\n\n Adjacency._remove_from_registry(\"custom\")\n\ndef test_cyflann_index_type():\n rand = np.random.RandomState(36)\n X = rand.randn(10, 2)\n D_true = squareform(pdist(X))\n D_true[D_true > 1.5] = 0\n \n def check_index_type(index_type):\n method = 'cyflann'\n radius = 1.5\n cyflann_kwds = {'index_type':index_type}\n adjacency_kwds = {'radius':radius, 'cyflann_kwds':cyflann_kwds}\n this_D = compute_adjacency_matrix(X=X, method = 'cyflann', **adjacency_kwds)\n assert_allclose(this_D.toarray(), D_true, rtol=1E-5, atol=1E-5)\n \n for index_type in ['kmeans', 'kdtrees']:\n yield check_index_type, index_type",
"\"\"\"Spectral Embedding\"\"\"\n\n# Author: Marina Meila <[email protected]>\n# James McQueen <[email protected]>\n# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE\n#\n# after the scikit-learn version by\n# Gael Varoquaux <[email protected]>\n# Wei LI <[email protected]>\n#\n# diffusion maps portion after:\n# Satrajit Ghosh <[email protected]> https://github.com/satra/mapalign/blob/master/mapalign/embed.py\n# License: BSD 3 clause\n\nimport warnings\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.sparse.csgraph import connected_components\nfrom ..embedding.base import BaseEmbedding\nfrom ..utils.validation import check_random_state\nfrom ..utils.eigendecomp import eigen_decomposition, check_eigen_solver\nfrom ..geometry.complete_adjacency_matrix import complete_adjacency_matrix\nfrom ..geometry.affinity import compute_affinity_matrix\nfrom ..geometry.laplacian import compute_laplacian_matrix\nfrom ..utils.nystrom_extension import nystrom_extension\n\ndef _graph_connected_component(graph, node_id):\n \"\"\"\n Find the largest graph connected components the contains one\n given node\n\n Parameters\n ----------\n graph : array-like, shape: (n_samples, n_samples)\n adjacency matrix of the graph, non-zero weight means an edge\n between the nodes\n\n node_id : int\n The index of the query node of the graph\n\n Returns\n -------\n connected_components : array-like, shape: (n_samples,)\n An array of bool value indicates the indexes of the nodes\n belong to the largest connected components of the given query\n node\n \"\"\"\n connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool)\n connected_components[node_id] = True\n n_node = graph.shape[0]\n for i in range(n_node):\n last_num_component = connected_components.sum()\n _, node_to_add = np.where(graph[connected_components] != 0)\n connected_components[node_to_add] = True\n if last_num_component >= connected_components.sum():\n break\n return connected_components\n\n\ndef _graph_is_connected(graph):\n \"\"\"\n Return whether the graph is connected (True) or Not (False)\n\n Parameters\n ----------\n graph : array-like or sparse matrix, shape: (n_samples, n_samples)\n adjacency matrix of the graph, non-zero weight means an edge\n between the nodes\n\n Returns\n -------\n is_connected : bool\n True means the graph is fully connected and False means not\n \"\"\"\n if sparse.isspmatrix(graph):\n # sparse graph, find all the connected components\n n_connected_components, _ = connected_components(graph)\n return n_connected_components == 1\n else:\n # dense graph, find all connected components start from node 0\n return _graph_connected_component(graph, 0).sum() == graph.shape[0]\n\ndef compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time):\n \"\"\" Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for final steps \"\"\"\n # Check that diffusion maps is using the correct laplacian, warn otherwise\n if lapl_type not in ['geometric', 'renormalized']:\n warnings.warn(\"for correct diffusion maps embedding use laplacian type 'geometric' or 'renormalized'.\")\n # Step 5 of diffusion maps:\n vectors = diffusion_map.copy()\n psi = vectors/vectors[:,[0]]\n diffusion_times = diffusion_time\n if diffusion_time == 0:\n lambdas = np.abs(lambdas)\n diffusion_times = np.exp(1. - np.log(1 - lambdas[1:])/np.log(lambdas[1:]))\n lambdas = lambdas / (1 - lambdas)\n else:\n lambdas = np.abs(lambdas)\n lambdas = lambdas ** float(diffusion_time)\n diffusion_map = psi * lambdas\n return diffusion_map\n\ndef spectral_embedding(geom, n_components=8, eigen_solver='auto',\n random_state=None, drop_first=True,\n diffusion_maps = False, diffusion_time = 0, solver_kwds = None):\n \"\"\"\n Project the sample on the first eigen vectors of the graph Laplacian.\n\n The adjacency matrix is used to compute a normalized graph Laplacian\n whose principal eigenvectors (associated to the\n smallest eigen values) represent the embedding coordinates of the data.\n\n The ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally\n an affinity or similarity matrix between samples (for instance the\n heat kernel of a euclidean distance matrix or a k-NN matrix).\n The Laplacian must be symmetric so that the eigen vector decomposition works as expected.\n This is ensured by the default setting (for more details,\n see the documentation in geometry.py).\n\n The data and generic geometric parameters are passed via a Geometry object, which also\n computes the Laplacian. By default, the 'geometric' Laplacian (or \"debiased\", or \"renormalized\" with\n alpha=1) is used. This is the Laplacian construction defined in [Coifman and Lafon, 2006] (see also\n documentation in laplacian.py). Thus, with diffusion_maps=False, spectral embedding is a modification\n of the Laplacian Eigenmaps algorithm of [Belkin and Nyiogi, 2002], with diffusion_maps=False, geom.laplacian_method\n ='symmetricnormalized' it is exactly the Laplacian Eigenmaps, with diffusion_maps=True, diffusion_time>0 it\n is the Diffusion Maps algorithm of [Coifman and Lafon 2006]; diffusion_maps=True and diffusion_time=0 is the same\n as diffusion_maps=False and default geom.laplacian_method.\n\n Parameters\n ----------\n geom : a Geometry object from megaman.embedding.geometry\n n_components : integer, optional\n The dimension of the projection subspace.\n eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', 'amg' or 'samg'}\n 'auto' :\n algorithm will attempt to choose the best method for input data\n 'dense' :\n use standard dense matrix operations for the eigenvalue decomposition.\n For this method, M must be an array or matrix type. This method should be avoided for large problems.\n 'arpack' :\n use arnoldi iteration in shift-invert mode. For this method,\n M may be a dense matrix, sparse matrix, or general linear operator.\n Warning: ARPACK can be unstable for some problems. It is best to\n try several random seeds in order to check results.\n 'lobpcg' :\n Locally Optimal Block Preconditioned Conjugate Gradient Method.\n A preconditioned eigensolver for large symmetric positive definite\n (SPD) generalized eigenproblems.\n 'amg' :\n AMG requires pyamg to be installed. It can be faster on very large,\n sparse problems, but may also lead to instabilities.\n 'samg' :\n Algebraic Multigrid solver from Fraunhofer SCAI (requires\n ``Fraunhofer SAMG`` and ``pysamg`` to be installed). It can be\n significantly faster on very large, sparse problems. Note that SAMG\n is a commercial product and one needs a license to use it. For\n licensing (including test or educational licenses)\n contact [email protected]\n random_state : int seed, RandomState instance, or None (default)\n A pseudo random number generator used for the initialization of the\n lobpcg eigen vectors decomposition when eigen_solver == 'amg'\n or eigen_solver == 'samg'.\n By default, arpack is used.\n drop_first : bool, optional, default=True\n Whether to drop the first eigenvector. For spectral embedding, this\n should be True as the first eigenvector should be constant vector for\n connected graph, but for spectral clustering, this should be kept as\n False to retain the first eigenvector.\n diffusion_map : boolean, optional. Whether to return the diffusion map\n version by re-scaling the embedding coordinate by the eigenvalues to the power\n diffusion_time.\n diffusion_time: if diffusion_map=True, the eigenvectors of the Laplacian are rescaled by\n (1-lambda)^diffusion_time, where lambda is the corresponding eigenvalue.\n diffusion_time has the role of scale parameter. One of the main ideas of diffusion framework is\n that running the diffusion forward in time (taking larger and larger\n powers of the Laplacian/transition matrix) reveals the geometric structure of X at larger and\n larger scales (the diffusion process).\n diffusion_time = 0 empirically provides a reasonable balance from a clustering\n perspective. Specifically, the notion of a cluster in the data set\n is quantified as a region in which the probability of escaping this\n region is low (within a certain time t).\n Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for description\n solver_kwds : any additional keyword arguments to pass to the selected eigen_solver\n\n Returns\n -------\n embedding : array, shape=(n_samples, n_components)\n The reduced samples.\n\n Notes\n -----\n Spectral embedding is most useful when the graph has one connected\n component. If there graph has many components, the first few eigenvectors\n will simply uncover the connected components of the graph.\n\n References\n ----------\n * http://en.wikipedia.org/wiki/LOBPCG\n\n * Toward the Optimal Preconditioned Eigensolver: Locally Optimal\n Block Preconditioned Conjugate Gradient Method\n Andrew V. Knyazev\n http://dx.doi.org/10.1137%2FS1064827500366124\n \"\"\"\n random_state = check_random_state(random_state)\n\n if geom.affinity_matrix is None:\n geom.compute_affinity_matrix()\n if not _graph_is_connected(geom.affinity_matrix):\n warnings.warn(\"Graph is not fully connected: \"\n \"spectral embedding may not work as expected.\")\n\n if geom.laplacian_matrix is None:\n laplacian = geom.compute_laplacian_matrix(copy=False,\n return_lapsym=True)\n else:\n laplacian = geom.laplacian_matrix\n\n n_nodes = laplacian.shape[0]\n lapl_type = geom.laplacian_method\n eigen_solver, solver_kwds = check_eigen_solver(eigen_solver,solver_kwds,\n size=laplacian.shape[0],\n nvec=n_components + 1)\n re_normalize = False\n PD_solver = False\n if eigen_solver in ['samg', 'amg', 'lobpcg']: # these methods require a symmetric positive definite matrix!\n epsilon = 2\n PD_solver = True\n if lapl_type not in ['symmetricnormalized', 'unnormalized']:\n re_normalize = True\n # If lobpcg (or amg/samg with lobpcg) is chosen and\n # If the Laplacian is non-symmetric then we need to extract:\n # the w (weight) vector from geometry\n # and the symmetric Laplacian = S.\n # The actual Laplacian is L = W^{-1}S (Where W is the diagonal matrix of w)\n # Which has the same spectrum as: L* = W^{-1/2}SW^{-1/2} which is symmetric\n # We calculate the eigen-decomposition of L*: [D, V]\n # then use W^{-1/2}V to compute the eigenvectors of L\n # See (Handbook for Cluster Analysis Chapter 2 Proposition 1).\n # However, since we censor the affinity matrix A at a radius it is not guaranteed\n # to be positive definite. But since L = W^{-1}S has maximum eigenvalue 1 (stochastic matrix)\n # and L* has the same spectrum it also has largest e-value of 1.\n # therefore if we look at I - L* then this has smallest eigenvalue of 0 and so\n # must be positive semi-definite. It also has the same spectrum as L* but\n # lambda(I - L*) = 1 - lambda(L*).\n # Finally, since we want positive definite not semi-definite we use (1+epsilon)*I\n # instead of I to make the smallest eigenvalue epsilon.\n if geom.laplacian_weights is None: # a laplacian existed but it wasn't called with return_lapsym = True\n geom.compute_laplacian_matrix(copy = False, return_lapsym = True)\n w = np.array(geom.laplacian_weights)\n symmetrized_laplacian = geom.laplacian_symmetric.copy()\n if sparse.isspmatrix(symmetrized_laplacian):\n symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.row])\n symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.col])\n symmetrized_laplacian = (1+epsilon)*sparse.identity(n_nodes) - symmetrized_laplacian\n else:\n symmetrized_laplacian /= np.sqrt(w)\n symmetrized_laplacian /= np.sqrt(w[:,np.newaxis])\n symmetrixed_laplacian = (1+epsilon)*np.identity(n_nodes) - symmetrized_laplacian\n else: # using a symmetric laplacian but adjust to avoid positive definite errors\n symmetrized_laplacian = geom.laplacian_matrix.copy()\n if sparse.isspmatrix(symmetrized_laplacian):\n symmetrized_laplacian = (1+epsilon)*sparse.identity(n_nodes) - symmetrized_laplacian\n else:\n symmetrixed_laplacian = (1+epsilon)*np.identity(n_nodes) - symmetrized_laplacian\n\n if PD_solver: # then eI - L was used, fix the eigenvalues\n lambdas, diffusion_map = eigen_decomposition(symmetrized_laplacian, n_components+1, eigen_solver=eigen_solver,\n random_state=random_state, drop_first=drop_first, largest = False,\n solver_kwds=solver_kwds)\n lambdas = -lambdas + epsilon\n else:\n lambdas, diffusion_map = eigen_decomposition(laplacian, n_components+1, eigen_solver=eigen_solver,\n random_state=random_state, drop_first=drop_first, largest = True,\n solver_kwds=solver_kwds)\n if re_normalize:\n diffusion_map /= np.sqrt(w[:, np.newaxis]) # put back on original Laplacian space\n diffusion_map /= np.linalg.norm(diffusion_map, axis = 0) # norm 1 vectors\n # sort the eigenvalues\n ind = np.argsort(lambdas); ind = ind[::-1]\n lambdas = lambdas[ind]; lambdas[0] = 0\n diffusion_map = diffusion_map[:, ind]\n eigenvalues = lambdas.copy()\n eigenvectors = diffusion_map.copy()\n if diffusion_maps:\n diffusion_map = compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time)\n if drop_first:\n embedding = diffusion_map[:, 1:(n_components+1)]\n eigenvectors = eigenvectors[:, 1:(n_components+1)]\n eigenvalues = eigenvalues[1:(n_components+1)]\n else:\n embedding = diffusion_map[:, :n_components]\n eigenvectors = eigenvectors[:, :(n_components)]\n eigenvalues = eigenvalues[:(n_components)]\n return embedding, eigenvalues, eigenvectors\n\n\nclass SpectralEmbedding(BaseEmbedding):\n \"\"\"\n Spectral embedding for non-linear dimensionality reduction.\n\n Forms an affinity matrix given by the specified function and\n applies spectral decomposition to the corresponding graph laplacian.\n The resulting transformation is given by the value of the\n eigenvectors for each data point.\n\n Parameters\n -----------\n\n n_components : integer\n number of coordinates for the manifold.\n radius : float (optional)\n radius for adjacency and affinity calculations. Will be overridden if\n either is set in `geom`\n geom : dict or megaman.geometry.Geometry object\n specification of geometry parameters: keys are\n [\"adjacency_method\", \"adjacency_kwds\", \"affinity_method\",\n \"affinity_kwds\", \"laplacian_method\", \"laplacian_kwds\"]\n eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', 'amg' or 'samg'}\n 'auto' :\n algorithm will attempt to choose the best method for input data\n 'dense' :\n use standard dense matrix operations for the eigenvalue decomposition.\n For this method, M must be an array or matrix type. This method should be avoided for large problems.\n 'arpack' :\n use arnoldi iteration in shift-invert mode. For this method,\n M may be a dense matrix, sparse matrix, or general linear operator.\n Warning: ARPACK can be unstable for some problems. It is best to\n try several random seeds in order to check results.\n 'lobpcg' :\n Locally Optimal Block Preconditioned Conjugate Gradient Method.\n A preconditioned eigensolver for large symmetric positive definite\n (SPD) generalized eigenproblems.\n 'amg' :\n AMG requires pyamg to be installed. It can be faster on very large,\n sparse problems, but may also lead to instabilities.\n 'samg' :\n Algebraic Multigrid solver from Fraunhofer SCAI (requires\n ``Fraunhofer SAMG`` and ``pysamg`` to be installed). It can be\n significantly faster on very large, sparse problems. Note that SAMG\n is a commercial product and one needs a license to use it. For\n licensing (including test or educational licenses)\n contact [email protected]\n random_state : numpy.RandomState or int, optional\n The generator or seed used to determine the starting vector for arpack\n iterations. Defaults to numpy.random.RandomState\n drop_first : bool, optional, default=True\n Whether to drop the first eigenvector. For spectral embedding, this\n should be True as the first eigenvector should be constant vector for\n connected graph, but for spectral clustering, this should be kept as\n False to retain the first eigenvector.\n diffusion_map : boolean, optional. Whether to return the diffusion map\n version by re-scaling the embedding by the eigenvalues.\n solver_kwds : any additional keyword arguments to pass to the selected eigen_solver\n\n References\n ----------\n .. [1] A Tutorial on Spectral Clustering, 2007\n Ulrike von Luxburg\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\n\n .. [2] On Spectral Clustering: Analysis and an algorithm, 2011\n Andrew Y. Ng, Michael I. Jordan, Yair Weiss\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100\n\n .. [3] Normalized cuts and image segmentation, 2000\n Jianbo Shi, Jitendra Malik\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\n \"\"\"\n def __init__(self, n_components=2, radius=None, geom=None,\n eigen_solver='auto', random_state=None,\n drop_first=True, diffusion_maps=False, diffusion_time=0,solver_kwds=None):\n self.n_components = n_components\n self.radius = radius\n self.geom = geom\n self.eigen_solver = eigen_solver\n self.random_state = random_state\n self.drop_first = drop_first\n self.diffusion_maps = diffusion_maps\n self.diffusion_time = diffusion_time\n self.solver_kwds = solver_kwds\n\n def fit(self, X, y=None, input_type='data'):\n \"\"\"\n Fit the model from data in X.\n\n Parameters\n ----------\n input_type : string, one of: 'data', 'distance' or 'affinity'.\n The values of input data X. (default = 'data')\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n If self.input_type is distance, or affinity:\n\n X : array-like, shape (n_samples, n_samples),\n Interpret X as precomputed distance or adjacency graph\n computed from samples.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n X = self._validate_input(X, input_type)\n self.fit_geometry(X, input_type)\n random_state = check_random_state(self.random_state)\n self.embedding_, self.eigenvalues_, self.eigenvectors_ = spectral_embedding(self.geom_,\n n_components = self.n_components,\n eigen_solver = self.eigen_solver,\n random_state = random_state,\n drop_first = self.drop_first,\n diffusion_maps = self.diffusion_maps,\n diffusion_time = self.diffusion_time,\n solver_kwds = self.solver_kwds)\n self.affinity_matrix_ = self.geom_.affinity_matrix\n self.laplacian_matrix_ = self.geom_.laplacian_matrix\n self.laplacian_matrix_type_ = self.geom_.laplacian_method\n return self\n\n def predict(self, X_test, y=None):\n \"\"\"\n Predict embedding on new data X_test given the existing embedding on training data\n\n Uses the Nystrom Extension to estimate the eigenvectors.\n\n Currently only works with input_type data (i.e. not affinity or distance)\n \"\"\"\n if not hasattr(self, 'geom_'):\n raise RuntimeError('the .fit() function must be called before the .predict() function')\n if self.geom_.X is None:\n raise NotImplementedError('method only implemented when X passed as data')\n # Complete the adjacency matrix\n adjacency_kwds = self.geom_.adjacency_kwds\n if self.geom_.adjacency_method == 'cyflann':\n if 'cyflann_kwds' in adjacency_kwds.keys():\n cyflann_kwds = adjacency_kwds['cyflann_kwds']\n else:\n cyflann_kwds = {}\n total_adjacency_matrix = complete_adjacency_matrix(self.geom_.adjacency_matrix,\n self.geom_.X,\n X_test,adjacency_kwds)\n # Compute the affinity matrix, check method and kwds\n if self.geom_.affinity_kwds is not None:\n affinity_kwds = self.geom_.affinity_kwds\n else:\n affinity_kwds = {}\n if self.geom_.affinity_method is not None:\n affinity_method = self.geom_.affinity_method\n else:\n affinity_method = 'auto'\n total_affinity_matrix = compute_affinity_matrix(total_adjacency_matrix, affinity_method,\n **affinity_kwds)\n # Compute the affinity matrix, check method and kwds\n if self.geom_.laplacian_kwds is not None:\n laplacian_kwds = self.geom_.laplacian_kwds\n else:\n laplacian_kwds = {}\n if self.geom_.laplacian_method is not None:\n laplacian_method = self.geom_.laplacian_method\n else:\n self.laplacian_method = 'auto'\n total_laplacian_matrix = compute_laplacian_matrix(total_affinity_matrix, laplacian_method,\n **laplacian_kwds)\n # Take the columns of Laplacian and existing embedding and pass to Nystrom Extension\n (n_sample_train) = self.geom_.adjacency_matrix.shape[0]\n total_laplacian_matrix = total_laplacian_matrix.tocsr()\n C = total_laplacian_matrix[:, :n_sample_train]\n # warnings.warn(str(C.shape))\n eigenvalues, eigenvectors = nystrom_extension(C, self.eigenvectors_, self.eigenvalues_)\n # If diffusion maps compute diffusion time etc\n if self.diffusion_maps:\n embedding = compute_diffusion_maps(laplacian_method, eigenvectors, eigenvalues, self.diffusion_time)\n else:\n embedding = eigenvectors\n (n_sample_test) = X_test.shape[0]\n embedding_test=embedding[-n_sample_test:, :]\n return embedding_test, embedding\n"
] | [
[
"scipy.sparse.isspmatrix",
"numpy.arange",
"scipy.spatial.distance.cdist",
"scipy.spatial.distance.pdist",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.random.RandomState"
],
[
"scipy.sparse.isspmatrix",
"numpy.log",
"numpy.abs",
"numpy.sqrt",
"numpy.linalg.norm",
"scipy.sparse.identity",
"numpy.identity",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
scopatz/PyTables | [
"05a74def785688abd802224a5ba44393a701ebc7",
"05a74def785688abd802224a5ba44393a701ebc7",
"05a74def785688abd802224a5ba44393a701ebc7",
"05a74def785688abd802224a5ba44393a701ebc7",
"05a74def785688abd802224a5ba44393a701ebc7",
"05a74def785688abd802224a5ba44393a701ebc7"
] | [
"bench/create-large-number-objects.py",
"tables/tests/common.py",
"examples/tutorial2.py",
"tables/file.py",
"examples/particles.py",
"bench/poly.py"
] | [
"\"This creates an HDF5 file with a potentially large number of objects\"\n\nimport sys\nimport numpy\nimport tables\n\nfilename = sys.argv[1]\n\n# Open a new empty HDF5 file\nfileh = tables.open_file(filename, mode=\"w\")\n\n# nlevels -- Number of levels in hierarchy\n# ngroups -- Number of groups on each level\n# ndatasets -- Number of arrays on each group\n# LR: Low ratio groups/datasets\n#nlevels, ngroups, ndatasets = (3, 1, 1000)\n# MR: Medium ratio groups/datasets\nnlevels, ngroups, ndatasets = (3, 10, 100)\n#nlevels, ngroups, ndatasets = (3, 5, 10)\n# HR: High ratio groups/datasets\n#nlevels, ngroups, ndatasets = (30, 10, 10)\n\n# Create an Array to save on disk\na = numpy.array([-1, 2, 4], numpy.int16)\n\ngroup = fileh.root\ngroup2 = fileh.root\nfor k in range(nlevels):\n for j in range(ngroups):\n for i in range(ndatasets):\n # Save the array on the HDF5 file\n fileh.create_array(group2, 'array' + str(i),\n a, \"Signed short array\")\n # Create a new group\n group2 = fileh.create_group(group, 'group' + str(j))\n # Create a new group\n group3 = fileh.create_group(group, 'ngroup' + str(k))\n # Iterate over this new group (group3)\n group = group3\n group2 = group3\n\nfileh.close()\n",
"# -*- coding: utf-8 -*-\n\n########################################################################\n#\n# License: BSD\n# Created: 2005-05-24\n# Author: Ivan Vilata i Balaguer - [email protected]\n#\n# $Id$\n#\n########################################################################\n\n\"\"\"Utilities for PyTables' test suites.\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport re\nimport sys\nimport time\nimport locale\nimport platform\nimport tempfile\nimport warnings\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n if sys.version_info < (2, 7):\n raise\n else:\n import unittest\n\nimport numpy\nimport numexpr\n\nimport tables\nfrom tables.utils import detect_number_of_cores\n\nverbose = False\n\"\"\"Show detailed output of the testing process.\"\"\"\n\nheavy = False\n\"\"\"Run all tests even when they take long to complete.\"\"\"\n\nshow_memory = False\n\"\"\"Show the progress of memory consumption.\"\"\"\n\n\ndef parse_argv(argv):\n global verbose, heavy\n\n if 'verbose' in argv:\n verbose = True\n argv.remove('verbose')\n\n if 'silent' in argv: # take care of old flag, just in case\n verbose = False\n argv.remove('silent')\n\n if '--heavy' in argv:\n heavy = True\n argv.remove('--heavy')\n\n return argv\n\n\nzlib_avail = tables.which_lib_version(\"zlib\") is not None\nlzo_avail = tables.which_lib_version(\"lzo\") is not None\nbzip2_avail = tables.which_lib_version(\"bzip2\") is not None\nblosc_avail = tables.which_lib_version(\"blosc\") is not None\n\n\ndef print_heavy(heavy):\n if heavy:\n print(\"\"\"Performing the complete test suite!\"\"\")\n else:\n print(\"\"\"\\\nPerforming only a light (yet comprehensive) subset of the test suite.\nIf you want a more complete test, try passing the --heavy flag to this script\n(or set the 'heavy' parameter in case you are using tables.test() call).\nThe whole suite will take more than 4 hours to complete on a relatively\nmodern CPU and around 512 MB of main memory.\"\"\")\n print('-=' * 38)\n\n\ndef print_versions():\n \"\"\"Print all the versions of software that PyTables relies on.\"\"\"\n\n print('-=' * 38)\n print(\"PyTables version: %s\" % tables.__version__)\n print(\"HDF5 version: %s\" % tables.which_lib_version(\"hdf5\")[1])\n print(\"NumPy version: %s\" % numpy.__version__)\n tinfo = tables.which_lib_version(\"zlib\")\n if numexpr.use_vml:\n # Get only the main version number and strip out all the rest\n vml_version = numexpr.get_vml_version()\n vml_version = re.findall(\"[0-9.]+\", vml_version)[0]\n vml_avail = \"using VML/MKL %s\" % vml_version\n else:\n vml_avail = \"not using Intel's VML/MKL\"\n print(\"Numexpr version: %s (%s)\" % (numexpr.__version__, vml_avail))\n if tinfo is not None:\n print(\"Zlib version: %s (%s)\" % (tinfo[1],\n \"in Python interpreter\"))\n tinfo = tables.which_lib_version(\"lzo\")\n if tinfo is not None:\n print(\"LZO version: %s (%s)\" % (tinfo[1], tinfo[2]))\n tinfo = tables.which_lib_version(\"bzip2\")\n if tinfo is not None:\n print(\"BZIP2 version: %s (%s)\" % (tinfo[1], tinfo[2]))\n tinfo = tables.which_lib_version(\"blosc\")\n if tinfo is not None:\n blosc_date = tinfo[2].split()[1]\n print(\"Blosc version: %s (%s)\" % (tinfo[1], blosc_date))\n blosc_cinfo = tables.blosc_get_complib_info()\n blosc_cinfo = [\n \"%s (%s)\" % (k, v[1]) for k, v in sorted(blosc_cinfo.items())\n ]\n print(\"Blosc compressors: %s\" % ', '.join(blosc_cinfo))\n try:\n from Cython import __version__ as cython_version\n print('Cython version: %s' % cython_version)\n except:\n pass\n print('Python version: %s' % sys.version)\n print('Platform: %s' % platform.platform())\n #if os.name == 'posix':\n # (sysname, nodename, release, version, machine) = os.uname()\n # print('Platform: %s-%s' % (sys.platform, machine))\n print('Byte-ordering: %s' % sys.byteorder)\n print('Detected cores: %s' % detect_number_of_cores())\n print('Default encoding: %s' % sys.getdefaultencoding())\n print('Default FS encoding: %s' % sys.getfilesystemencoding())\n print('Default locale: (%s, %s)' % locale.getdefaultlocale())\n print('-=' * 38)\n\n # This should improve readability whan tests are run by CI tools\n sys.stdout.flush()\n\n\ndef verbosePrint(string, nonl=False):\n \"\"\"Print out the `string` if verbose output is enabled.\"\"\"\n if not verbose:\n return\n if nonl:\n print(string, end=' ')\n else:\n print(string)\n\n\ndef allequal(a, b, flavor=\"numpy\"):\n \"\"\"Checks if two numerical objects are equal.\"\"\"\n\n # print(\"a-->\", repr(a))\n # print(\"b-->\", repr(b))\n if not hasattr(b, \"shape\"):\n # Scalar case\n return a == b\n\n if ((not hasattr(a, \"shape\") or a.shape == ()) and\n (not hasattr(b, \"shape\") or b.shape == ())):\n return a == b\n\n if a.shape != b.shape:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # Way to check the type equality without byteorder considerations\n if hasattr(b, \"dtype\") and a.dtype.str[1:] != b.dtype.str[1:]:\n if verbose:\n print(\"dtype is not equal:\", a.dtype, \"!=\", b.dtype)\n return 0\n\n # Rank-0 case\n if len(a.shape) == 0:\n if a[()] == b[()]:\n return 1\n else:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # null arrays\n if a.size == 0: # len(a) is not correct for generic shapes\n if b.size == 0:\n return 1\n else:\n if verbose:\n print(\"length is not equal\")\n print(\"len(a.data) ==>\", len(a.data))\n print(\"len(b.data) ==>\", len(b.data))\n return 0\n\n # Multidimensional case\n result = (a == b)\n result = numpy.all(result)\n if not result and verbose:\n print(\"Some of the elements in arrays are not equal\")\n\n return result\n\n\ndef areArraysEqual(arr1, arr2):\n \"\"\"Are both `arr1` and `arr2` equal arrays?\n\n Arguments can be regular NumPy arrays, chararray arrays or\n structured arrays (including structured record arrays). They are\n checked for type and value equality.\n\n \"\"\"\n\n t1 = type(arr1)\n t2 = type(arr2)\n\n if not ((hasattr(arr1, 'dtype') and arr1.dtype == arr2.dtype) or\n issubclass(t1, t2) or issubclass(t2, t1)):\n return False\n\n return numpy.all(arr1 == arr2)\n\n\n# COMPATIBILITY: assertWarns is new in Python 3.2\n# Code copied from the standard unittest.case module (Python 3.4)\nif not hasattr(unittest.TestCase, 'assertWarns'):\n class _BaseTestCaseContext:\n def __init__(self, test_case):\n self.test_case = test_case\n\n def _raiseFailure(self, standardMsg):\n msg = self.test_case._formatMessage(self.msg, standardMsg)\n raise self.test_case.failureException(msg)\n\n class _AssertRaisesBaseContext(_BaseTestCaseContext):\n def __init__(self, expected, test_case, callable_obj=None,\n expected_regex=None):\n _BaseTestCaseContext.__init__(self, test_case)\n self.expected = expected\n self.test_case = test_case\n if callable_obj is not None:\n try:\n self.obj_name = callable_obj.__name__\n except AttributeError:\n self.obj_name = str(callable_obj)\n else:\n self.obj_name = None\n if expected_regex is not None:\n expected_regex = re.compile(expected_regex)\n self.expected_regex = expected_regex\n self.msg = None\n\n def handle(self, name, callable_obj, args, kwargs):\n \"\"\"\n If callable_obj is None, assertRaises/Warns is being used as a\n context manager, so check for a 'msg' kwarg and return self.\n If callable_obj is not None, call it passing args and kwargs.\n \"\"\"\n if callable_obj is None:\n self.msg = kwargs.pop('msg', None)\n return self\n with self:\n callable_obj(*args, **kwargs)\n\n class _AssertWarnsContext(_AssertRaisesBaseContext):\n def __enter__(self):\n for v in sys.modules.values():\n if getattr(v, '__warningregistry__', None):\n v.__warningregistry__ = {}\n self.warnings_manager = warnings.catch_warnings(record=True)\n self.warnings = self.warnings_manager.__enter__()\n warnings.simplefilter(\"always\", self.expected)\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.warnings_manager.__exit__(exc_type, exc_value, tb)\n if exc_type is not None:\n # let unexpected exceptions pass through\n return\n try:\n exc_name = self.expected.__name__\n except AttributeError:\n exc_name = str(self.expected)\n first_matching = None\n for m in self.warnings:\n w = m.message\n if not isinstance(w, self.expected):\n continue\n if first_matching is None:\n first_matching = w\n if (self.expected_regex is not None and\n not self.expected_regex.search(str(w))):\n continue\n # store warning for later retrieval\n self.warning = w\n self.filename = m.filename\n self.lineno = m.lineno\n return\n # Now we simply try to choose a helpful failure message\n if first_matching is not None:\n self._raiseFailure(\n '\"{0}\" does not match \"{1}\"'.format(\n self.expected_regex.pattern, str(first_matching)))\n if self.obj_name:\n self._raiseFailure(\"{0} not triggered by {1}\".format(\n exc_name, self.obj_name))\n else:\n self._raiseFailure(\"{0} not triggered\".format(exc_name))\n\n\nclass PyTablesTestCase(unittest.TestCase):\n def tearDown(self):\n super(PyTablesTestCase, self).tearDown()\n for key in self.__dict__:\n if self.__dict__[key].__class__.__name__ not in ('instancemethod'):\n self.__dict__[key] = None\n\n def _getName(self):\n \"\"\"Get the name of this test case.\"\"\"\n return self.id().split('.')[-2]\n\n def _getMethodName(self):\n \"\"\"Get the name of the method currently running in the test case.\"\"\"\n return self.id().split('.')[-1]\n\n def _verboseHeader(self):\n \"\"\"Print a nice header for the current test method if verbose.\"\"\"\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = \"Running %s.%s\" % (name, methodName)\n print('%s\\n%s' % (title, '-' * len(title)))\n\n @classmethod\n def _testFilename(class_, filename):\n \"\"\"Returns an absolute version of the `filename`, taking care of the\n location of the calling test case class.\"\"\"\n modname = class_.__module__\n # When the definitive switch to ``setuptools`` is made,\n # this should definitely use the ``pkg_resouces`` API::\n #\n # return pkg_resources.resource_filename(modname, filename)\n #\n modfile = sys.modules[modname].__file__\n dirname = os.path.dirname(modfile)\n return os.path.join(dirname, filename)\n\n # COMPATIBILITY: assertWarns is new in Python 3.2\n if not hasattr(unittest.TestCase, 'assertWarns'):\n def assertWarns(self, expected_warning, callable_obj=None,\n *args, **kwargs):\n context = _AssertWarnsContext(expected_warning, self, callable_obj)\n return context.handle('assertWarns', callable_obj, args, kwargs)\n\n def _checkEqualityGroup(self, node1, node2, hardlink=False):\n if verbose:\n print(\"Group 1:\", node1)\n print(\"Group 2:\", node2)\n if hardlink:\n self.assertTrue(\n node1._v_pathname != node2._v_pathname,\n \"node1 and node2 have the same pathnames.\")\n else:\n self.assertTrue(\n node1._v_pathname == node2._v_pathname,\n \"node1 and node2 does not have the same pathnames.\")\n self.assertTrue(\n node1._v_children == node2._v_children,\n \"node1 and node2 does not have the same children.\")\n\n def _checkEqualityLeaf(self, node1, node2, hardlink=False):\n if verbose:\n print(\"Leaf 1:\", node1)\n print(\"Leaf 2:\", node2)\n if hardlink:\n self.assertTrue(\n node1._v_pathname != node2._v_pathname,\n \"node1 and node2 have the same pathnames.\")\n else:\n self.assertTrue(\n node1._v_pathname == node2._v_pathname,\n \"node1 and node2 does not have the same pathnames.\")\n self.assertTrue(\n areArraysEqual(node1[:], node2[:]),\n \"node1 and node2 does not have the same values.\")\n\n\nclass TestFileMixin(object):\n h5fname = None\n open_kwargs = {}\n\n def setUp(self):\n super(TestFileMixin, self).setUp()\n #self.h5fname = self._testFilename(self.testfname)\n self.h5file = tables.open_file(\n self.h5fname, title=self._getName(), **self.open_kwargs)\n\n def tearDown(self):\n \"\"\"Close ``h5file``.\"\"\"\n\n self.h5file.close()\n super(TestFileMixin, self).tearDown()\n\n\nclass TempFileMixin(object):\n open_mode = 'w'\n open_kwargs = {}\n\n def _getTempFileName(self):\n return tempfile.mktemp(prefix=self._getName(), suffix='.h5')\n\n def setUp(self):\n \"\"\"Set ``h5file`` and ``h5fname`` instance attributes.\n\n * ``h5fname``: the name of the temporary HDF5 file.\n * ``h5file``: the writable, empty, temporary HDF5 file.\n\n \"\"\"\n\n super(TempFileMixin, self).setUp()\n self.h5fname = self._getTempFileName()\n self.h5file = tables.open_file(\n self.h5fname, self.open_mode, title=self._getName(),\n **self.open_kwargs)\n\n def tearDown(self):\n \"\"\"Close ``h5file`` and remove ``h5fname``.\"\"\"\n\n self.h5file.close()\n self.h5file = None\n os.remove(self.h5fname) # comment this for debugging purposes only\n super(TempFileMixin, self).tearDown()\n\n def _reopen(self, mode='r', **kwargs):\n \"\"\"Reopen ``h5file`` in the specified ``mode``.\n\n Returns a true or false value depending on whether the file was\n reopenend or not. If not, nothing is changed.\n\n \"\"\"\n\n self.h5file.close()\n self.h5file = tables.open_file(self.h5fname, mode, **kwargs)\n return True\n\n\nclass ShowMemTime(PyTablesTestCase):\n tref = time.time()\n \"\"\"Test for showing memory and time consumption.\"\"\"\n\n def test00(self):\n \"\"\"Showing memory and time consumption.\"\"\"\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in open(\"/proc/self/status\"):\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", time.time() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(\"VmSize: %7s kB\\tVmRSS: %7s kB\" % (vmsize, vmrss))\n print(\"VmData: %7s kB\\tVmStk: %7s kB\" % (vmdata, vmstk))\n print(\"VmExe: %7s kB\\tVmLib: %7s kB\" % (vmexe, vmlib))\n\n\n## Local Variables:\n## mode: python\n## py-indent-offset: 4\n## tab-width: 4\n## fill-column: 72\n## End:\n",
"\"\"\"This program shows the different protections that PyTables offer to the user\nin order to insure a correct data injection in tables.\n\nExample to be used in the second tutorial in the User's Guide.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport tables\nimport numpy as np\n\n# Describe a particle record\n\n\nclass Particle(tables.IsDescription):\n name = tables.StringCol(itemsize=16) # 16-character string\n lati = tables.Int32Col() # integer\n longi = tables.Int32Col() # integer\n pressure = tables.Float32Col(shape=(2, 3)) # array of floats\n # (single-precision)\n temperature = tables.Float64Col(shape=(2, 3)) # array of doubles\n # (double-precision)\n\n# Native NumPy dtype instances are also accepted\nEvent = np.dtype([\n (\"name\", \"S16\"),\n (\"TDCcount\", np.uint8),\n (\"ADCcount\", np.uint16),\n (\"xcoord\", np.float32),\n (\"ycoord\", np.float32)\n])\n\n# And dictionaries too (this defines the same structure as above)\n# Event = {\n# \"name\" : StringCol(itemsize=16),\n# \"TDCcount\" : UInt8Col(),\n# \"ADCcount\" : UInt16Col(),\n# \"xcoord\" : Float32Col(),\n# \"ycoord\" : Float32Col(),\n# }\n\n# Open a file in \"w\"rite mode\nfileh = tables.open_file(\"tutorial2.h5\", mode=\"w\")\n# Get the HDF5 root group\nroot = fileh.root\n# Create the groups:\nfor groupname in (\"Particles\", \"Events\"):\n group = fileh.create_group(root, groupname)\n# Now, create and fill the tables in Particles group\ngparticles = root.Particles\n# Create 3 new tables\nfor tablename in (\"TParticle1\", \"TParticle2\", \"TParticle3\"):\n # Create a table\n table = fileh.create_table(\"/Particles\", tablename, Particle,\n \"Particles: \" + tablename)\n # Get the record object associated with the table:\n particle = table.row\n # Fill the table with 257 particles\n for i in range(257):\n # First, assign the values to the Particle record\n particle['name'] = 'Particle: %6d' % (i)\n particle['lati'] = i\n particle['longi'] = 10 - i\n # Detectable errors start here. Play with them!\n particle['pressure'] = np.array(\n i * np.arange(2 * 3)).reshape((2, 4)) # Incorrect\n # particle['pressure'] = array(i*arange(2*3)).reshape((2,3)) # Correct\n # End of errors\n particle['temperature'] = (i ** 2) # Broadcasting\n # This injects the Record values\n particle.append()\n # Flush the table buffers\n table.flush()\n\n# Now, go for Events:\nfor tablename in (\"TEvent1\", \"TEvent2\", \"TEvent3\"):\n # Create a table in Events group\n table = fileh.create_table(root.Events, tablename, Event,\n \"Events: \" + tablename)\n # Get the record object associated with the table:\n event = table.row\n # Fill the table with 257 events\n for i in range(257):\n # First, assign the values to the Event record\n event['name'] = 'Event: %6d' % (i)\n event['TDCcount'] = i % (1 << 8) # Correct range\n # Detectable errors start here. Play with them!\n event['xcoor'] = float(i ** 2) # Wrong spelling\n # event['xcoord'] = float(i**2) # Correct spelling\n event['ADCcount'] = \"sss\" # Wrong type\n # event['ADCcount'] = i * 2 # Correct type\n # End of errors\n event['ycoord'] = float(i) ** 4\n # This injects the Record values\n event.append()\n # Flush the buffers\n table.flush()\n\n# Read the records from table \"/Events/TEvent3\" and select some\ntable = root.Events.TEvent3\ne = [p['TDCcount'] for p in table\n if p['ADCcount'] < 20 and 4 <= p['TDCcount'] < 15]\nprint(\"Last record ==>\", p)\nprint(\"Selected values ==>\", e)\nprint(\"Total selected records ==> \", len(e))\n# Finally, close the file (this also will flush all the remaining buffers!)\nfileh.close()\n",
"# -*- coding: utf-8 -*-\n\n########################################################################\n#\n# License: BSD\n# Created: September 4, 2002\n# Author: Francesc Alted - [email protected]\n#\n# $Id$\n#\n########################################################################\n\n\"\"\"Create PyTables files and the object tree.\n\nThis module support importing generic HDF5 files, on top of which\nPyTables files are created, read or extended. If a file exists, an\nobject tree mirroring their hierarchical structure is created in memory.\nFile class offer methods to traverse the tree, as well as to create new\nnodes.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport time\nimport weakref\nimport warnings\nimport collections\n\nimport numexpr\nimport numpy\n\nimport tables.misc.proxydict\nfrom tables import hdf5extension\nfrom tables import utilsextension\nfrom tables import parameters\nfrom tables.exceptions import (ClosedFileError, FileModeError, NodeError,\n NoSuchNodeError, UndoRedoError, ClosedNodeError,\n PerformanceWarning)\nfrom tables.registry import get_class_by_name\nfrom tables.path import join_path, split_path\nfrom tables import undoredo\nfrom tables.description import (IsDescription, UInt8Col, StringCol,\n descr_from_dtype, dtype_from_descr)\nfrom tables.filters import Filters\nfrom tables.node import Node, NotLoggedMixin\nfrom tables.group import Group, RootGroup\nfrom tables.group import TransactionGroupG, TransactionG, MarkG\nfrom tables.leaf import Leaf\nfrom tables.array import Array\nfrom tables.carray import CArray\nfrom tables.earray import EArray\nfrom tables.vlarray import VLArray\nfrom tables.table import Table\nfrom tables import linkextension\nfrom tables.utils import detect_number_of_cores\nfrom tables import lrucacheextension\nfrom tables.flavor import flavor_of, array_as_internal\nfrom tables.atom import Atom\n\nfrom tables.link import SoftLink, ExternalLink\n\nfrom tables._past import previous_api, previous_api_property\n\n\n# format_version = \"1.0\" # Initial format\n# format_version = \"1.1\" # Changes in ucl compression\n# format_version = \"1.2\" # Support for enlargeable arrays and VLA's\n# # 1.2 was introduced in PyTables 0.8\n# format_version = \"1.3\" # Support for indexes in Tables\n# # 1.3 was introduced in PyTables 0.9\n# format_version = \"1.4\" # Support for multidimensional attributes\n# # 1.4 was introduced in PyTables 1.1\n# format_version = \"1.5\" # Support for persistent defaults in tables\n# # 1.5 was introduced in PyTables 1.2\n# format_version = \"1.6\" # Support for NumPy objects and new flavors for\n# # objects.\n# # 1.6 was introduced in pytables 1.3\n#format_version = \"2.0\" # Pickles are not used anymore in system attrs\n# # 2.0 was introduced in PyTables 2.0\nformat_version = \"2.1\" # Numeric and numarray flavors are gone.\n\ncompatible_formats = [] # Old format versions we can read\n # Empty means that we support all the old formats\n\n\nclass _FileRegistry(object):\n def __init__(self):\n self._name_mapping = collections.defaultdict(set)\n self._handlers = set()\n\n @property\n def filenames(self):\n return self._name_mapping.keys()\n\n @property\n def handlers(self):\n #return set(self._handlers) # return a copy\n return self._handlers\n\n def __len__(self):\n return len(self._handlers)\n\n def __contains__(self, filename):\n return filename in self.filenames\n\n def add(self, handler):\n self._name_mapping[handler.filename].add(handler)\n self._handlers.add(handler)\n\n def remove(self, handler):\n filename = handler.filename\n self._name_mapping[filename].remove(handler)\n # remove enpty keys\n if not self._name_mapping[filename]:\n del self._name_mapping[filename]\n self._handlers.remove(handler)\n\n def get_handlers_by_name(self, filename):\n #return set(self._name_mapping[filename]) # return a copy\n return self._name_mapping[filename]\n\n def close_all(self):\n are_open_files = len(self._handlers) > 0\n if are_open_files:\n sys.stderr.write(\"Closing remaining open files:\")\n handlers = list(self._handlers) # make a copy\n for fileh in handlers:\n sys.stderr.write(\"%s...\" % fileh.filename)\n fileh.close()\n sys.stderr.write(\"done\")\n if are_open_files:\n sys.stderr.write(\"\\n\")\n\n\n# Dict of opened files (keys are filenames and values filehandlers)\n_open_files = _FileRegistry()\n\n# Opcodes for do-undo actions\n_op_to_code = {\n \"MARK\": 0,\n \"CREATE\": 1,\n \"REMOVE\": 2,\n \"MOVE\": 3,\n \"ADDATTR\": 4,\n \"DELATTR\": 5,\n}\n\n_code_to_op = [\"MARK\", \"CREATE\", \"REMOVE\", \"MOVE\", \"ADDATTR\", \"DELATTR\"]\n\n\n# Paths and names for hidden nodes related with transactions.\n_trans_version = '1.0'\n\n_trans_group_parent = '/'\n_trans_group_name = '_p_transactions'\n_trans_group_path = join_path(_trans_group_parent, _trans_group_name)\n\n_action_log_parent = _trans_group_path\n_action_log_name = 'actionlog'\n_action_log_path = join_path(_action_log_parent, _action_log_name)\n\n_trans_parent = _trans_group_path\n_trans_name = 't%d' # %d -> transaction number\n_trans_path = join_path(_trans_parent, _trans_name)\n\n_markParent = _trans_path\n_markName = 'm%d' # %d -> mark number\n_markPath = join_path(_markParent, _markName)\n\n_shadow_parent = _markPath\n_shadow_name = 'a%d' # %d -> action number\n_shadow_path = join_path(_shadow_parent, _shadow_name)\n\n\ndef _checkfilters(filters):\n if not (filters is None or\n isinstance(filters, Filters)):\n raise TypeError(\"filter parameter has to be None or a Filter \"\n \"instance and the passed type is: '%s'\" %\n type(filters))\n\n\ndef copy_file(srcfilename, dstfilename, overwrite=False, **kwargs):\n \"\"\"An easy way of copying one PyTables file to another.\n\n This function allows you to copy an existing PyTables file named\n srcfilename to another file called dstfilename. The source file\n must exist and be readable. The destination file can be\n overwritten in place if existing by asserting the overwrite\n argument.\n\n This function is a shorthand for the :meth:`File.copy_file` method,\n which acts on an already opened file. kwargs takes keyword\n arguments used to customize the copying process. See the\n documentation of :meth:`File.copy_file` for a description of those\n arguments.\n\n \"\"\"\n\n # Open the source file.\n srcfileh = open_file(srcfilename, mode=\"r\")\n\n try:\n # Copy it to the destination file.\n srcfileh.copy_file(dstfilename, overwrite=overwrite, **kwargs)\n finally:\n # Close the source file.\n srcfileh.close()\n\ncopyFile = previous_api(copy_file)\n\n\nif tuple(map(int, utilsextension.get_hdf5_version().split('-')[0].split('.'))) \\\n < (1, 8, 7):\n _FILE_OPEN_POLICY = 'strict'\nelse:\n _FILE_OPEN_POLICY = 'default'\n\n\ndef open_file(filename, mode=\"r\", title=\"\", root_uep=\"/\", filters=None,\n **kwargs):\n \"\"\"Open a PyTables (or generic HDF5) file and return a File object.\n\n Parameters\n ----------\n filename : str\n The name of the file (supports environment variable expansion).\n It is suggested that file names have any of the .h5, .hdf or\n .hdf5 extensions, although this is not mandatory.\n mode : str\n The mode to open the file. It can be one of the\n following:\n\n * *'r'*: Read-only; no data can be modified.\n * *'w'*: Write; a new file is created (an existing file\n with the same name would be deleted).\n * *'a'*: Append; an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n * *'r+'*: It is similar to 'a', but the file must already\n exist.\n\n title : str\n If the file is to be created, a TITLE string attribute will be\n set on the root group with the given value. Otherwise, the\n title will be read from disk, and this will not have any effect.\n root_uep : str\n The root User Entry Point. This is a group in the HDF5 hierarchy\n which will be taken as the starting point to create the object\n tree. It can be whatever existing group in the file, named by\n its HDF5 path. If it does not exist, an HDF5ExtError is issued.\n Use this if you do not want to build the *entire* object tree,\n but rather only a *subtree* of it.\n\n .. versionchanged:: 3.0\n The *rootUEP* parameter has been renamed into *root_uep*.\n\n filters : Filters\n An instance of the Filters (see :ref:`FiltersClassDescr`) class\n that provides information about the desired I/O filters\n applicable to the leaves that hang directly from the *root group*,\n unless other filter properties are specified for these leaves.\n Besides, if you do not specify filter properties for child groups,\n they will inherit these ones, which will in turn propagate to\n child nodes.\n\n Notes\n -----\n In addition, it recognizes the (lowercase) names of parameters\n present in :file:`tables/parameters.py` as additional keyword\n arguments.\n See :ref:`parameter_files` for a detailed info on the supported\n parameters.\n\n .. note::\n\n If you need to deal with a large number of nodes in an\n efficient way, please see :ref:`LRUOptim` for more info and\n advices about the integrated node cache engine.\n\n \"\"\"\n\n # XXX filename normalization ??\n\n # Check already opened files\n if _FILE_OPEN_POLICY == 'strict':\n # This policy do not allows to open the same file multiple times\n # even in read-only mode\n if filename in _open_files:\n raise ValueError(\n \"The file '%s' is already opened. \"\n \"Please close it before reopening. \"\n \"HDF5 v.%s, FILE_OPEN_POLICY = '%s'\" % (\n filename, utilsextension.get_hdf5_version(),\n _FILE_OPEN_POLICY))\n else:\n for filehandle in _open_files.get_handlers_by_name(filename):\n omode = filehandle.mode\n # 'r' is incompatible with everything except 'r' itself\n if mode == 'r' and omode != 'r':\n raise ValueError(\n \"The file '%s' is already opened, but \"\n \"not in read-only mode (as requested).\" % filename)\n # 'a' and 'r+' are compatible with everything except 'r'\n elif mode in ('a', 'r+') and omode == 'r':\n raise ValueError(\n \"The file '%s' is already opened, but \"\n \"in read-only mode. Please close it before \"\n \"reopening in append mode.\" % filename)\n # 'w' means that we want to destroy existing contents\n elif mode == 'w':\n raise ValueError(\n \"The file '%s' is already opened. Please \"\n \"close it before reopening in write mode.\" % filename)\n\n # Finally, create the File instance, and return it\n return File(filename, mode, title, root_uep, filters, **kwargs)\n\nopenFile = previous_api(open_file)\n\n\n# A dumb class that doesn't keep nothing at all\nclass _NoCache(object):\n def __len__(self):\n return 0\n\n def __contains__(self, key):\n return False\n\n def __iter__(self):\n return iter([])\n\n def __setitem__(self, key, value):\n pass\n\n __marker = object()\n\n def pop(self, key, d=__marker):\n if d is not self.__marker:\n return d\n raise KeyError(key)\n\n\nclass _DictCache(dict):\n def __init__(self, nslots):\n if nslots < 1:\n raise ValueError(\"Invalid number of slots: %d\" % nslots)\n self.nslots = nslots\n super(_DictCache, self).__init__()\n\n def __setitem__(self, key, value):\n # Check if we are running out of space\n if len(self) > self.nslots:\n warnings.warn(\n \"the dictionary of node cache is exceeding the recommended \"\n \"maximum number (%d); be ready to see PyTables asking for \"\n \"*lots* of memory and possibly slow I/O.\" % (\n self.nslots), PerformanceWarning)\n super(_DictCache, self).__setitem__(key, value)\n\n\nclass NodeManager(object):\n def __init__(self, nslots=64, node_factory=None):\n super(NodeManager, self).__init__()\n\n self.registry = weakref.WeakValueDictionary()\n\n if nslots > 0:\n cache = lrucacheextension.NodeCache(nslots)\n elif nslots == 0:\n cache = _NoCache()\n else:\n # nslots < 0\n cache = _DictCache(-nslots)\n\n self.cache = cache\n\n # node_factory(node_path)\n self.node_factory = node_factory\n\n def register_node(self, node, key):\n if key is None:\n key = node._v_pathname\n\n if key in self.registry:\n if not self.registry[key]._v_isopen:\n del self.registry[key]\n elif self.registry[key] is not node:\n raise RuntimeError('trying to register a node with an '\n 'existing key: ``%s``' % key)\n else:\n self.registry[key] = node\n\n def cache_node(self, node, key=None):\n if key is None:\n key = node._v_pathname\n\n self.register_node(node, key)\n if key in self.cache:\n oldnode = self.cache.pop(key)\n if oldnode is not node and oldnode._v_isopen:\n raise RuntimeError('trying to cache a node with an '\n 'existing key: ``%s``' % key)\n\n self.cache[key] = node\n\n def get_node(self, key):\n node = self.cache.pop(key, None)\n if node is not None:\n if node._v_isopen:\n self.cache_node(node, key)\n return node\n else:\n # this should not happen\n warnings.warn(\"a closed node found in the cache: ``%s``\" % key)\n\n if key in self.registry:\n node = self.registry[key]\n if node is None:\n # this should not happen since WeakValueDictionary drops all\n # dead weakrefs\n warnings.warn(\"None is stored in the registry for key: \"\n \"``%s``\" % key)\n elif node._v_isopen:\n self.cache_node(node, key)\n return node\n else:\n # this should not happen\n warnings.warn(\"a closed node found in the registry: \"\n \"``%s``\" % key)\n del self.registry[key]\n node = None\n\n if self.node_factory:\n node = self.node_factory(key)\n self.cache_node(node, key)\n\n return node\n\n def rename_node(self, oldkey, newkey):\n for cache in (self.cache, self.registry):\n if oldkey in cache:\n node = cache.pop(oldkey)\n cache[newkey] = node\n\n def drop_from_cache(self, nodepath):\n '''Remove the node from cache'''\n\n # Remove the node from the cache.\n self.cache.pop(nodepath, None)\n\n def drop_node(self, node, check_unregistered=True):\n \"\"\"Drop the `node`.\n\n Remove the node from the cache and, if it has no more references,\n close it.\n\n \"\"\"\n\n # Remove all references to the node.\n nodepath = node._v_pathname\n\n self.drop_from_cache(nodepath)\n\n if nodepath in self.registry:\n if not node._v_isopen:\n del self.registry[nodepath]\n elif check_unregistered:\n # If the node is not in the registry (this should never happen)\n # we close it forcibly since it is not ensured that the __del__\n # method is called for object that are still alive when the\n # interpreter is shut down\n if node._v_isopen:\n warnings.warn(\"dropping a node that is not in the registry: \"\n \"``%s``\" % nodepath)\n\n node._g_pre_kill_hook()\n node._f_close()\n\n def flush_nodes(self):\n # Only iter on the nodes in the registry since nodes in the cahce\n # should always have an entry in the registry\n closed_keys = []\n for path, node in self.registry.items():\n if not node._v_isopen:\n closed_keys.append(path)\n elif '/_i_' not in path: # Indexes are not necessary to be flushed\n if isinstance(node, Leaf):\n node.flush()\n\n for path in closed_keys:\n # self.cache.pop(path, None)\n if path in self.cache:\n warnings.warn(\"closed node the cache: ``%s``\" % path)\n self.cache.pop(path, None)\n self.registry.pop(path)\n\n @staticmethod\n def _close_nodes(nodepaths, get_node):\n for nodepath in nodepaths:\n try:\n node = get_node(nodepath)\n except KeyError:\n pass\n else:\n if not node._v_isopen or node._v__deleting:\n continue\n\n try:\n # Avoid descendent nodes to also iterate over\n # their descendents, which are already to be\n # closed by this loop.\n if hasattr(node, '_f_get_child'):\n node._g_close()\n else:\n node._f_close()\n del node\n except ClosedNodeError:\n #import traceback\n #type_, value, tb = sys.exc_info()\n #exception_dump = ''.join(\n # traceback.format_exception(type_, value, tb))\n #warnings.warn(\n # \"A '%s' exception occurred trying to close a node \"\n # \"that was supposed to be open.\\n\"\n # \"%s\" % (type_.__name__, exception_dump))\n pass\n\n def close_subtree(self, prefix='/'):\n if not prefix.endswith('/'):\n prefix = prefix + '/'\n\n cache = self.cache\n registry = self.registry\n\n # Ensure tables are closed before their indices\n paths = [\n path for path in cache\n if path.startswith(prefix) and '/_i_' not in path\n ]\n self._close_nodes(paths, cache.pop)\n\n # Close everything else (i.e. indices)\n paths = [path for path in cache if path.startswith(prefix)]\n self._close_nodes(paths, cache.pop)\n\n # Ensure tables are closed before their indices\n paths = [\n path for path in registry\n if path.startswith(prefix) and '/_i_' not in path\n ]\n self._close_nodes(paths, registry.pop)\n\n # Close everything else (i.e. indices)\n paths = [path for path in registry if path.startswith(prefix)]\n self._close_nodes(paths, registry.pop)\n\n def shutdown(self):\n registry = self.registry\n cache = self.cache\n\n #self.close_subtree('/')\n\n keys = list(cache) # copy\n for key in keys:\n node = cache.pop(key)\n if node._v_isopen:\n registry.pop(node._v_pathname, None)\n node._f_close()\n\n while registry:\n key, node = registry.popitem()\n if node._v_isopen:\n node._f_close()\n\n\nclass File(hdf5extension.File, object):\n \"\"\"The in-memory representation of a PyTables file.\n\n An instance of this class is returned when a PyTables file is\n opened with the :func:`tables.open_file` function. It offers methods\n to manipulate (create, rename, delete...) nodes and handle their\n attributes, as well as methods to traverse the object tree.\n The *user entry point* to the object tree attached to the HDF5 file\n is represented in the root_uep attribute.\n Other attributes are available.\n\n File objects support an *Undo/Redo mechanism* which can be enabled\n with the :meth:`File.enable_undo` method. Once the Undo/Redo\n mechanism is enabled, explicit *marks* (with an optional unique\n name) can be set on the state of the database using the\n :meth:`File.mark`\n method. There are two implicit marks which are always available:\n the initial mark (0) and the final mark (-1). Both the identifier\n of a mark and its name can be used in *undo* and *redo* operations.\n\n Hierarchy manipulation operations (node creation, movement and\n removal) and attribute handling operations (setting and deleting)\n made after a mark can be undone by using the :meth:`File.undo`\n method, which returns the database to the state of a past mark.\n If undo() is not followed by operations that modify the hierarchy\n or attributes, the :meth:`File.redo` method can be used to return\n the database to the state of a future mark. Else, future states of\n the database are forgotten.\n\n Note that data handling operations can not be undone nor redone by\n now. Also, hierarchy manipulation operations on nodes that do not\n support the Undo/Redo mechanism issue an UndoRedoWarning *before*\n changing the database.\n\n The Undo/Redo mechanism is persistent between sessions and can\n only be disabled by calling the :meth:`File.disable_undo` method.\n\n File objects can also act as context managers when using the with\n statement introduced in Python 2.5. When exiting a context, the\n file is automatically closed.\n\n Parameters\n ----------\n filename : str\n The name of the file (supports environment variable expansion).\n It is suggested that file names have any of the .h5, .hdf or\n .hdf5 extensions, although this is not mandatory.\n\n mode : str\n The mode to open the file. It can be one of the\n following:\n\n * *'r'*: Read-only; no data can be modified.\n * *'w'*: Write; a new file is created (an existing file\n with the same name would be deleted).\n * *'a'*: Append; an existing file is opened for reading\n and writing, and if the file does not exist it is created.\n * *'r+'*: It is similar to 'a', but the file must already\n exist.\n\n title : str\n If the file is to be created, a TITLE string attribute will be\n set on the root group with the given value. Otherwise, the\n title will be read from disk, and this will not have any effect.\n\n root_uep : str\n The root User Entry Point. This is a group in the HDF5 hierarchy\n which will be taken as the starting point to create the object\n tree. It can be whatever existing group in the file, named by\n its HDF5 path. If it does not exist, an HDF5ExtError is issued.\n Use this if you do not want to build the *entire* object tree,\n but rather only a *subtree* of it.\n\n .. versionchanged:: 3.0\n The *rootUEP* parameter has been renamed into *root_uep*.\n\n filters : Filters\n An instance of the Filters (see :ref:`FiltersClassDescr`) class that\n provides information about the desired I/O filters applicable to the\n leaves that hang directly from the *root group*, unless other filter\n properties are specified for these leaves. Besides, if you do not\n specify filter properties for child groups, they will inherit these\n ones, which will in turn propagate to child nodes.\n\n Notes\n -----\n In addition, it recognizes the (lowercase) names of parameters\n present in :file:`tables/parameters.py` as additional keyword\n arguments.\n See :ref:`parameter_files` for a detailed info on the supported\n parameters.\n\n\n .. rubric:: File attributes\n\n .. attribute:: filename\n\n The name of the opened file.\n\n .. attribute:: format_version\n\n The PyTables version number of this file.\n\n .. attribute:: isopen\n\n True if the underlying file is open, false otherwise.\n\n .. attribute:: mode\n\n The mode in which the file was opened.\n\n .. attribute:: root\n\n The *root* of the object tree hierarchy (a Group instance).\n\n .. attribute:: root_uep\n\n The UEP (user entry point) group name in the file (see\n the :func:`open_file` function).\n\n .. versionchanged:: 3.0\n The *rootUEP* attribute has been renamed into *root_uep*.\n\n \"\"\"\n\n # The top level kinds. Group must go first!\n _node_kinds = ('Group', 'Leaf', 'Link', 'Unknown')\n rootUEP = previous_api_property('root_uep')\n _v_objectId = previous_api_property('_v_objectid')\n\n def _gettitle(self):\n return self.root._v_title\n\n def _settitle(self, title):\n self.root._v_title = title\n\n def _deltitle(self):\n del self.root._v_title\n\n title = property(\n _gettitle, _settitle, _deltitle,\n \"The title of the root group in the file.\")\n\n def _getfilters(self):\n return self.root._v_filters\n\n def _setfilters(self, filters):\n self.root._v_filters = filters\n\n def _delfilters(self):\n del self.root._v_filters\n\n filters = property(\n _getfilters, _setfilters, _delfilters,\n (\"Default filter properties for the root group \"\n \"(see :ref:`FiltersClassDescr`).\"))\n\n open_count = property(\n lambda self: self._open_count, None, None,\n \"\"\"The number of times this file handle has been opened.\n\n .. versionchanged:: 3.1\n The mechanism for caching and sharing file handles has been\n removed in PyTables 3.1. Now this property should always\n be 1 (or 0 for closed files).\n\n .. deprecated:: 3.1\n\n \"\"\")\n\n def __init__(self, filename, mode=\"r\", title=\"\",\n root_uep=\"/\", filters=None, **kwargs):\n\n self.filename = filename\n \"\"\"The name of the opened file.\"\"\"\n\n self.mode = mode\n \"\"\"The mode in which the file was opened.\"\"\"\n\n if mode not in ('r', 'r+', 'a', 'w'):\n raise ValueError(\"invalid mode string ``%s``. Allowed modes are: \"\n \"'r', 'r+', 'a' and 'w'\" % mode)\n\n # Get all the parameters in parameter file(s)\n params = dict([(k, v) for k, v in parameters.__dict__.iteritems()\n if k.isupper() and not k.startswith('_')])\n # Update them with possible keyword arguments\n if [k for k in kwargs if k.isupper()]:\n warnings.warn(\"The use of uppercase keyword parameters is \"\n \"deprecated\", DeprecationWarning)\n\n kwargs = dict([(k.upper(), v) for k, v in kwargs.iteritems()])\n params.update(kwargs)\n\n # If MAX_ * _THREADS is not set yet, set it to the number of cores\n # on this machine.\n\n if params['MAX_NUMEXPR_THREADS'] is None:\n params['MAX_NUMEXPR_THREADS'] = detect_number_of_cores()\n\n if params['MAX_BLOSC_THREADS'] is None:\n params['MAX_BLOSC_THREADS'] = detect_number_of_cores()\n\n self.params = params\n\n # Now, it is time to initialize the File extension\n self._g_new(filename, mode, **params)\n\n # Check filters and set PyTables format version for new files.\n new = self._v_new\n if new:\n _checkfilters(filters)\n self.format_version = format_version\n \"\"\"The PyTables version number of this file.\"\"\"\n\n # The node manager must be initialized before the root group\n # initialization but the node_factory attribute is set onl later\n # because it is a bount method of the root grop itself.\n node_cache_slots = params['NODE_CACHE_SLOTS']\n self._node_manager = NodeManager(nslots=node_cache_slots)\n\n # For the moment Undo/Redo is not enabled.\n self._undoEnabled = False\n\n # Set the flag to indicate that the file has been opened.\n # It must be set before opening the root group\n # to allow some basic access to its attributes.\n self.isopen = 1\n \"\"\"True if the underlying file os open, False otherwise.\"\"\"\n\n # Append the name of the file to the global dict of files opened.\n _open_files.add(self)\n\n # Set the number of times this file has been opened to 1\n self._open_count = 1\n\n # Get the root group from this file\n self.root = root = self.__get_root_group(root_uep, title, filters)\n \"\"\"The *root* of the object tree hierarchy (a Group instance).\"\"\"\n # Complete the creation of the root node\n # (see the explanation in ``RootGroup.__init__()``.\n root._g_post_init_hook()\n self._node_manager.node_factory = self.root._g_load_child\n\n # Save the PyTables format version for this file.\n if new:\n if params['PYTABLES_SYS_ATTRS']:\n root._v_attrs._g__setattr(\n 'PYTABLES_FORMAT_VERSION', format_version)\n\n # If the file is old, and not opened in \"read-only\" mode,\n # check if it has a transaction log\n if not new and self.mode != \"r\" and _trans_group_path in self:\n # It does. Enable the undo.\n self.enable_undo()\n\n # Set the maximum number of threads for Numexpr\n numexpr.set_vml_num_threads(params['MAX_NUMEXPR_THREADS'])\n\n def __get_root_group(self, root_uep, title, filters):\n \"\"\"Returns a Group instance which will act as the root group in the\n hierarchical tree.\n\n If file is opened in \"r\", \"r+\" or \"a\" mode, and the file already\n exists, this method dynamically builds a python object tree\n emulating the structure present on file.\n\n \"\"\"\n\n self._v_objectid = self._get_file_id()\n\n if root_uep in [None, \"\"]:\n root_uep = \"/\"\n # Save the User Entry Point in a variable class\n self.root_uep = root_uep\n\n new = self._v_new\n\n # Get format version *before* getting the object tree\n if not new:\n # Firstly, get the PyTables format version for this file\n self.format_version = utilsextension.read_f_attr(\n self._v_objectid, 'PYTABLES_FORMAT_VERSION')\n if not self.format_version:\n # PYTABLES_FORMAT_VERSION attribute is not present\n self.format_version = \"unknown\"\n self._isPTFile = False\n elif not isinstance(self.format_version, str):\n # system attributes should always be str\n if sys.version_info[0] < 3:\n self.format_version = self.format_version.encode()\n else:\n self.format_version = self.format_version.decode('utf-8')\n\n # Create new attributes for the root Group instance and\n # create the object tree\n return RootGroup(self, root_uep, title=title, new=new, filters=filters)\n\n __getRootGroup = previous_api(__get_root_group)\n\n def _get_or_create_path(self, path, create):\n \"\"\"Get the given `path` or create it if `create` is true.\n\n If `create` is true, `path` *must* be a string path and not a\n node, otherwise a `TypeError`will be raised.\n\n \"\"\"\n\n if create:\n return self._create_path(path)\n else:\n return self.get_node(path)\n\n _getOrCreatePath = previous_api(_get_or_create_path)\n\n def _create_path(self, path):\n \"\"\"Create the groups needed for the `path` to exist.\n\n The group associated with the given `path` is returned.\n\n \"\"\"\n\n if not hasattr(path, 'split'):\n raise TypeError(\"when creating parents, parent must be a path\")\n\n if path == '/':\n return self.root\n\n parent, create_group = self.root, self.create_group\n for pcomp in path.split('/')[1:]:\n try:\n child = parent._f_get_child(pcomp)\n except NoSuchNodeError:\n child = create_group(parent, pcomp)\n parent = child\n return parent\n\n _createPath = previous_api(_create_path)\n\n def create_group(self, where, name, title=\"\", filters=None,\n createparents=False):\n \"\"\"Create a new group.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new group will hang. It can be a\n path string (for example '/level1/leaf5'), or a Group instance\n (see :ref:`GroupClassDescr`).\n name : str\n The name of the new group.\n title : str, optional\n A description for this node (it sets the TITLE HDF5 attribute on\n disk).\n filters : Filters\n An instance of the Filters class (see :ref:`FiltersClassDescr`)\n that provides information about the desired I/O filters applicable\n to the leaves that hang directly from this new group (unless other\n filter properties are specified for these leaves). Besides, if you\n do not specify filter properties for its child groups, they will\n inherit these ones.\n createparents : bool\n Whether to create the needed groups for the parent\n path to exist (not done by default).\n\n See Also\n --------\n Group : for more information on groups\n\n \"\"\"\n\n parentnode = self._get_or_create_path(where, createparents)\n _checkfilters(filters)\n return Group(parentnode, name,\n title=title, new=True, filters=filters)\n\n createGroup = previous_api(create_group)\n\n def create_table(self, where, name, description=None, title=\"\",\n filters=None, expectedrows=10000,\n chunkshape=None, byteorder=None,\n createparents=False, obj=None):\n \"\"\"Create a new table with the given name in where location.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new table will hang. It can be a\n path string (for example '/level1/leaf5'), or a Group instance\n (see :ref:`GroupClassDescr`).\n name : str\n The name of the new table.\n description : Description\n This is an object that describes the table, i.e. how\n many columns it has, their names, types, shapes, etc. It\n can be any of the following:\n\n * *A user-defined class*: This should inherit from the\n IsDescription class (see :ref:`IsDescriptionClassDescr`)\n where table fields are specified.\n * *A dictionary*: For example, when you do not know\n beforehand which structure your table will have).\n * *A Description instance*: You can use the description\n attribute of another table to create a new one with the\n same structure.\n * *A NumPy dtype*: A completely general structured NumPy\n dtype.\n * *A NumPy (structured) array instance*: The dtype of\n this structured array will be used as the description.\n Also, in case the array has actual data, it will be\n injected into the newly created table.\n\n .. versionchanged:: 3.0\n The *description* parameter can be None (default) if *obj* is\n provided. In that case the structure of the table is deduced\n by *obj*.\n\n title : str\n A description for this node (it sets the TITLE HDF5 attribute\n on disk).\n filters : Filters\n An instance of the Filters class (see :ref:`FiltersClassDescr`)\n that provides information about the desired I/O filters to be\n applied during the life of this object.\n expectedrows : int\n A user estimate of the number of records that will be in the table.\n If not provided, the default value is EXPECTED_ROWS_TABLE (see\n :file:`tables/parameters.py`). If you plan to create a bigger\n table try providing a guess; this will optimize the HDF5 B-Tree\n creation and management process time and memory used.\n chunkshape\n The shape of the data chunk to be read or written in a\n single HDF5 I/O operation. Filters are applied to those\n chunks of data. The rank of the chunkshape for tables must\n be 1. If None, a sensible value is calculated based on the\n expectedrows parameter (which is recommended).\n byteorder : str\n The byteorder of data *on disk*, specified as 'little' or 'big'.\n If this is not specified, the byteorder is that of the platform,\n unless you passed an array as the description, in which case\n its byteorder will be used.\n createparents : bool\n Whether to create the needed groups for the parent path to exist\n (not done by default).\n obj : python object\n The recarray to be saved. Accepted types are NumPy record\n arrays.\n\n The *obj* parameter is optional and it can be provided in\n alternative to the *description* parameter.\n If both *obj* and *description* are provided they must\n be consistent with each other.\n\n .. versionadded:: 3.0\n\n See Also\n --------\n Table : for more information on tables\n\n \"\"\"\n\n if obj is not None:\n if not isinstance(obj, numpy.ndarray):\n raise TypeError('invalid obj parameter %r' % obj)\n\n descr, _ = descr_from_dtype(obj.dtype)\n if (description is not None and\n dtype_from_descr(description) != obj.dtype):\n raise TypeError('the desctiption parameter is not consistent '\n 'with the data type of the obj parameter')\n elif description is None:\n description = descr\n\n parentnode = self._get_or_create_path(where, createparents)\n if description is None:\n raise ValueError(\"invalid table description: None\")\n _checkfilters(filters)\n\n ptobj = Table(parentnode, name,\n description=description, title=title,\n filters=filters, expectedrows=expectedrows,\n chunkshape=chunkshape, byteorder=byteorder)\n\n if obj is not None:\n ptobj.append(obj)\n\n return ptobj\n\n createTable = previous_api(create_table)\n\n def create_array(self, where, name, obj=None, title=\"\",\n byteorder=None, createparents=False,\n atom=None, shape=None):\n \"\"\"Create a new array.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new array will hang. It can be a\n path string (for example '/level1/leaf5'), or a Group instance\n (see :ref:`GroupClassDescr`).\n name : str\n The name of the new array\n obj : python object\n The array or scalar to be saved. Accepted types are NumPy\n arrays and scalars, as well as native Python sequences and\n scalars, provided that values are regular (i.e. they are\n not like ``[[1,2],2]``) and homogeneous (i.e. all the\n elements are of the same type).\n\n Also, objects that have some of their dimensions equal to 0\n are not supported (use an EArray node (see\n :ref:`EArrayClassDescr`) if you want to store an array with\n one of its dimensions equal to 0).\n\n .. versionchanged:: 3.0\n The *Object parameter has been renamed into *obj*.*\n\n title : str\n A description for this node (it sets the TITLE HDF5 attribute on\n disk).\n byteorder : str\n The byteorder of the data *on disk*, specified as 'little' or\n 'big'. If this is not specified, the byteorder is that of the\n given object.\n createparents : bool, optional\n Whether to create the needed groups for the parent path to exist\n (not done by default).\n atom : Atom\n An Atom (see :ref:`AtomClassDescr`) instance representing\n the *type* and *shape* of the atomic objects to be saved.\n\n .. versionadded:: 3.0\n\n shape : tuple of ints\n The shape of the stored array.\n\n .. versionadded:: 3.0\n\n See Also\n --------\n Array : for more information on arrays\n create_table : for more information on the rest of parameters\n\n \"\"\"\n\n if obj is None:\n if atom is None or shape is None:\n raise TypeError('if the obj parameter is not specified '\n '(or None) then both the atom and shape '\n 'parametes should be provided.')\n else:\n # Making strides=(0,...) below is a trick to create the\n # array fast and without memory consumption\n dflt = numpy.zeros((), dtype=atom.dtype)\n obj = numpy.ndarray(shape, dtype=atom.dtype, buffer=dflt,\n strides=(0,)*len(shape))\n else:\n flavor = flavor_of(obj)\n # use a temporary object because converting obj at this stage\n # breaks some test. This is soultion performs a double,\n # potentially expensive, conversion of the obj parameter.\n _obj = array_as_internal(obj, flavor)\n\n if shape is not None and shape != _obj.shape:\n raise TypeError('the shape parameter do not match obj.shape')\n\n if atom is not None and atom.dtype != _obj.dtype:\n raise TypeError('the atom parameter is not consistent with '\n 'the data type of the obj parameter')\n\n parentnode = self._get_or_create_path(where, createparents)\n return Array(parentnode, name,\n obj=obj, title=title, byteorder=byteorder)\n\n createArray = previous_api(create_array)\n\n def create_carray(self, where, name, atom=None, shape=None, title=\"\",\n filters=None, chunkshape=None,\n byteorder=None, createparents=False, obj=None):\n \"\"\"Create a new chunked array.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new array will hang. It can\n be a path string (for example '/level1/leaf5'), or a Group\n instance (see :ref:`GroupClassDescr`).\n name : str\n The name of the new array\n atom : Atom\n An Atom (see :ref:`AtomClassDescr`) instance representing\n the *type* and *shape* of the atomic objects to be saved.\n\n .. versionchanged:: 3.0\n The *atom* parameter can be None (default) if *obj* is\n provided.\n\n shape : tuple\n The shape of the new array.\n\n .. versionchanged:: 3.0\n The *shape* parameter can be None (default) if *obj* is\n provided.\n\n title : str, optional\n A description for this node (it sets the TITLE HDF5 attribute\n on disk).\n filters : Filters, optional\n An instance of the Filters class (see :ref:`FiltersClassDescr`)\n that provides information about the desired I/O filters to\n be applied during the life of this object.\n chunkshape : tuple or number or None, optional\n The shape of the data chunk to be read or written in a\n single HDF5 I/O operation. Filters are applied to those\n chunks of data. The dimensionality of chunkshape must be\n the same as that of shape. If None, a sensible value is\n calculated (which is recommended).\n byteorder : str, optional\n The byteorder of the data *on disk*, specified as 'little'\n or 'big'. If this is not specified, the byteorder is that\n of the given object.\n createparents : bool, optional\n Whether to create the needed groups for the parent path to\n exist (not done by default).\n obj : python object\n The array or scalar to be saved. Accepted types are NumPy\n arrays and scalars, as well as native Python sequences and\n scalars, provided that values are regular (i.e. they are\n not like ``[[1,2],2]``) and homogeneous (i.e. all the\n elements are of the same type).\n\n Also, objects that have some of their dimensions equal to 0\n are not supported. Please use an EArray node (see\n :ref:`EArrayClassDescr`) if you want to store an array with\n one of its dimensions equal to 0.\n\n The *obj* parameter is optional and it can be provided in\n alternative to the *atom* and *shape* parameters.\n If both *obj* and *atom* and/or *shape* are provided they must\n be consistent with each other.\n\n .. versionadded:: 3.0\n\n See Also\n --------\n CArray : for more information on chunked arrays\n\n \"\"\"\n\n if obj is not None:\n flavor = flavor_of(obj)\n obj = array_as_internal(obj, flavor)\n\n if shape is not None and shape != obj.shape:\n raise TypeError('the shape parameter do not match obj.shape')\n else:\n shape = obj.shape\n\n if atom is not None and atom.dtype != obj.dtype:\n raise TypeError('the atom parameter is not consistent with '\n 'the data type of the obj parameter')\n elif atom is None:\n atom = Atom.from_dtype(obj.dtype)\n\n parentnode = self._get_or_create_path(where, createparents)\n _checkfilters(filters)\n ptobj = CArray(parentnode, name,\n atom=atom, shape=shape, title=title, filters=filters,\n chunkshape=chunkshape, byteorder=byteorder)\n\n if obj is not None:\n ptobj[...] = obj\n\n return ptobj\n\n createCArray = previous_api(create_carray)\n\n def create_earray(self, where, name, atom=None, shape=None, title=\"\",\n filters=None, expectedrows=1000,\n chunkshape=None, byteorder=None,\n createparents=False, obj=None):\n \"\"\"Create a new enlargeable array.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new array will hang. It can be a\n path string (for example '/level1/leaf5'), or a Group instance\n (see :ref:`GroupClassDescr`).\n name : str\n The name of the new array\n atom : Atom\n An Atom (see :ref:`AtomClassDescr`) instance representing the\n *type* and *shape* of the atomic objects to be saved.\n\n .. versionchanged:: 3.0\n The *atom* parameter can be None (default) if *obj* is\n provided.\n\n shape : tuple\n The shape of the new array. One (and only one) of the shape\n dimensions *must* be 0. The dimension being 0 means that the\n resulting EArray object can be extended along it. Multiple\n enlargeable dimensions are not supported right now.\n\n .. versionchanged:: 3.0\n The *shape* parameter can be None (default) if *obj* is\n provided.\n\n title : str, optional\n A description for this node (it sets the TITLE HDF5 attribute on\n disk).\n expectedrows : int, optional\n A user estimate about the number of row elements that will be added\n to the growable dimension in the EArray node. If not provided, the\n default value is EXPECTED_ROWS_EARRAY (see tables/parameters.py).\n If you plan to create either a much smaller or a much bigger array\n try providing a guess; this will optimize the HDF5 B-Tree creation\n and management process time and the amount of memory used.\n chunkshape : tuple, numeric, or None, optional\n The shape of the data chunk to be read or written in a single HDF5\n I/O operation. Filters are applied to those chunks of data. The\n dimensionality of chunkshape must be the same as that of shape\n (beware: no dimension should be 0 this time!). If None, a sensible\n value is calculated based on the expectedrows parameter (which is\n recommended).\n byteorder : str, optional\n The byteorder of the data *on disk*, specified as 'little' or\n 'big'. If this is not specified, the byteorder is that of the\n platform.\n createparents : bool, optional\n Whether to create the needed groups for the parent path to exist\n (not done by default).\n obj : python object\n The array or scalar to be saved. Accepted types are NumPy\n arrays and scalars, as well as native Python sequences and\n scalars, provided that values are regular (i.e. they are\n not like ``[[1,2],2]``) and homogeneous (i.e. all the\n elements are of the same type).\n\n The *obj* parameter is optional and it can be provided in\n alternative to the *atom* and *shape* parameters.\n If both *obj* and *atom* and/or *shape* are provided they must\n be consistent with each other.\n\n .. versionadded:: 3.0\n\n See Also\n --------\n EArray : for more information on enlargeable arrays\n\n \"\"\"\n\n if obj is not None:\n flavor = flavor_of(obj)\n obj = array_as_internal(obj, flavor)\n\n earray_shape = (0,) + obj.shape[1:]\n\n if shape is not None and shape != earray_shape:\n raise TypeError('the shape parameter is not compatible '\n 'with obj.shape.')\n else:\n shape = earray_shape\n\n if atom is not None and atom.dtype != obj.dtype:\n raise TypeError('the atom parameter is not consistent with '\n 'the data type of the obj parameter')\n elif atom is None:\n atom = Atom.from_dtype(obj.dtype)\n\n parentnode = self._get_or_create_path(where, createparents)\n _checkfilters(filters)\n ptobj = EArray(parentnode, name,\n atom=atom, shape=shape, title=title,\n filters=filters, expectedrows=expectedrows,\n chunkshape=chunkshape, byteorder=byteorder)\n\n if obj is not None:\n ptobj.append(obj)\n\n return ptobj\n\n createEArray = previous_api(create_earray)\n\n def create_vlarray(self, where, name, atom=None, title=\"\",\n filters=None, expectedrows=None,\n chunkshape=None, byteorder=None,\n createparents=False, obj=None):\n \"\"\"Create a new variable-length array.\n\n Parameters\n ----------\n where : str or Group\n The parent group from which the new array will hang. It can\n be a path string (for example '/level1/leaf5'), or a Group\n instance (see :ref:`GroupClassDescr`).\n name : str\n The name of the new array\n atom : Atom\n An Atom (see :ref:`AtomClassDescr`) instance representing\n the *type* and *shape* of the atomic objects to be saved.\n\n .. versionchanged:: 3.0\n The *atom* parameter can be None (default) if *obj* is\n provided.\n\n title : str, optional\n A description for this node (it sets the TITLE HDF5 attribute\n on disk).\n filters : Filters\n An instance of the Filters class (see :ref:`FiltersClassDescr`)\n that provides information about the desired I/O filters to\n be applied during the life of this object.\n expectedrows : int, optional\n A user estimate about the number of row elements that will\n be added to the growable dimension in the `VLArray` node.\n If not provided, the default value is ``EXPECTED_ROWS_VLARRAY``\n (see ``tables/parameters.py``). If you plan to create either\n a much smaller or a much bigger `VLArray` try providing a guess;\n this will optimize the HDF5 B-Tree creation and management\n process time and the amount of memory used.\n\n .. versionadded:: 3.0\n\n chunkshape : int or tuple of int, optional\n The shape of the data chunk to be read or written in a\n single HDF5 I/O operation. Filters are applied to those\n chunks of data. The dimensionality of chunkshape must be 1.\n If None, a sensible value is calculated (which is recommended).\n byteorder : str, optional\n The byteorder of the data *on disk*, specified as 'little' or\n 'big'. If this is not specified, the byteorder is that of the\n platform.\n createparents : bool, optional\n Whether to create the needed groups for the parent path to\n exist (not done by default).\n obj : python object\n The array or scalar to be saved. Accepted types are NumPy\n arrays and scalars, as well as native Python sequences and\n scalars, provided that values are regular (i.e. they are\n not like ``[[1,2],2]``) and homogeneous (i.e. all the\n elements are of the same type).\n\n The *obj* parameter is optional and it can be provided in\n alternative to the *atom* parameter.\n If both *obj* and *atom* and are provided they must\n be consistent with each other.\n\n .. versionadded:: 3.0\n\n See Also\n --------\n VLArray : for more informationon variable-length arrays\n\n .. versionchanged:: 3.0\n The *expectedsizeinMB* parameter has been replaced by\n *expectedrows*.\n\n \"\"\"\n\n if obj is not None:\n flavor = flavor_of(obj)\n obj = array_as_internal(obj, flavor)\n\n if atom is not None and atom.dtype != obj.dtype:\n raise TypeError('the atom parameter is not consistent with '\n 'the data type of the obj parameter')\n if atom is None:\n atom = Atom.from_dtype(obj.dtype)\n elif atom is None:\n raise ValueError('atom parameter cannot be None')\n\n parentnode = self._get_or_create_path(where, createparents)\n _checkfilters(filters)\n ptobj = VLArray(parentnode, name,\n atom=atom, title=title, filters=filters,\n expectedrows=expectedrows,\n chunkshape=chunkshape, byteorder=byteorder)\n\n if obj is not None:\n ptobj.append(obj)\n\n return ptobj\n\n createVLArray = previous_api(create_vlarray)\n\n def create_hard_link(self, where, name, target, createparents=False):\n \"\"\"Create a hard link.\n\n Create a hard link to a `target` node with the given `name` in\n `where` location. `target` can be a node object or a path\n string. If `createparents` is true, the intermediate groups\n required for reaching `where` are created (the default is not\n doing so).\n\n The returned node is a regular `Group` or `Leaf` instance.\n\n \"\"\"\n\n targetnode = self.get_node(target)\n parentnode = self._get_or_create_path(where, createparents)\n linkextension._g_create_hard_link(parentnode, name, targetnode)\n # Refresh children names in link's parent node\n parentnode._g_add_children_names()\n # Return the target node\n return self.get_node(parentnode, name)\n\n createHardLink = previous_api(create_hard_link)\n\n def create_soft_link(self, where, name, target, createparents=False):\n \"\"\"Create a soft link (aka symbolic link) to a `target` node.\n\n Create a soft link (aka symbolic link) to a `target` nodewith\n the given `name` in `where` location. `target` can be a node\n object or a path string. If `createparents` is true, the\n intermediate groups required for reaching `where` are created.\n\n (the default is not doing so).\n\n The returned node is a SoftLink instance. See the SoftLink\n class (in :ref:`SoftLinkClassDescr`) for more information on\n soft links.\n\n \"\"\"\n\n if not isinstance(target, str):\n if hasattr(target, '_v_pathname'): # quacks like a Node\n target = target._v_pathname\n else:\n raise ValueError(\n \"`target` has to be a string or a node object\")\n parentnode = self._get_or_create_path(where, createparents)\n slink = SoftLink(parentnode, name, target)\n # Refresh children names in link's parent node\n parentnode._g_add_children_names()\n return slink\n\n createSoftLink = previous_api(create_soft_link)\n\n def create_external_link(self, where, name, target, createparents=False):\n \"\"\"Create an external link.\n\n Create an external link to a *target* node with the given *name*\n in *where* location. *target* can be a node object in another\n file or a path string in the form 'file:/path/to/node'. If\n *createparents* is true, the intermediate groups required for\n reaching *where* are created (the default is not doing so).\n\n The returned node is an :class:`ExternalLink` instance.\n\n \"\"\"\n\n if not isinstance(target, str):\n if hasattr(target, '_v_pathname'): # quacks like a Node\n target = target._v_file.filename + ':' + target._v_pathname\n else:\n raise ValueError(\n \"`target` has to be a string or a node object\")\n elif target.find(':/') == -1:\n raise ValueError(\n \"`target` must expressed as 'file:/path/to/node'\")\n parentnode = self._get_or_create_path(where, createparents)\n elink = ExternalLink(parentnode, name, target)\n # Refresh children names in link's parent node\n parentnode._g_add_children_names()\n return elink\n\n createExternalLink = previous_api(create_external_link)\n\n def _get_node(self, nodepath):\n # The root node is always at hand.\n if nodepath == '/':\n return self.root\n\n node = self._node_manager.get_node(nodepath)\n assert node is not None, \"unable to instantiate node ``%s``\" % nodepath\n\n return node\n\n _getNode = previous_api(_get_node)\n\n def get_node(self, where, name=None, classname=None):\n \"\"\"Get the node under where with the given name.\n\n where can be a Node instance (see :ref:`NodeClassDescr`) or a\n path string leading to a node. If no name is specified, that\n node is returned.\n\n If a name is specified, this must be a string with the name of\n a node under where. In this case the where argument can only\n lead to a Group (see :ref:`GroupClassDescr`) instance (else a\n TypeError is raised). The node called name under the group\n where is returned.\n\n In both cases, if the node to be returned does not exist, a\n NoSuchNodeError is raised. Please note that hidden nodes are\n also considered.\n\n If the classname argument is specified, it must be the name of\n a class derived from Node. If the node is found but it is not\n an instance of that class, a NoSuchNodeError is also raised.\n\n \"\"\"\n\n self._check_open()\n\n # For compatibility with old default arguments.\n if name == '':\n name = None\n\n # Get the parent path (and maybe the node itself).\n if isinstance(where, Node):\n node = where\n node._g_check_open() # the node object must be open\n nodepath = where._v_pathname\n elif isinstance(where, (basestring, numpy.str_)):\n node = None\n if where.startswith('/'):\n nodepath = where\n else:\n raise NameError(\n \"``where`` must start with a slash ('/')\")\n else:\n raise TypeError(\n \"``where`` is not a string nor a node: %r\" % (where,))\n\n # Get the name of the child node.\n if name is not None:\n node = None\n nodepath = join_path(nodepath, name)\n\n assert node is None or node._v_pathname == nodepath\n\n # Now we have the definitive node path, let us try to get the node.\n if node is None:\n node = self._get_node(nodepath)\n\n # Finally, check whether the desired node is an instance\n # of the expected class.\n if classname:\n class_ = get_class_by_name(classname)\n if not isinstance(node, class_):\n npathname = node._v_pathname\n nclassname = node.__class__.__name__\n # This error message is right since it can never be shown\n # for ``classname in [None, 'Node']``.\n raise NoSuchNodeError(\n \"could not find a ``%s`` node at ``%s``; \"\n \"instead, a ``%s`` node has been found there\"\n % (classname, npathname, nclassname))\n\n return node\n\n getNode = previous_api(get_node)\n\n def is_visible_node(self, path):\n \"\"\"Is the node under `path` visible?\n\n If the node does not exist, a NoSuchNodeError is raised.\n\n \"\"\"\n\n # ``util.isvisiblepath()`` is still recommended for internal use.\n return self.get_node(path)._f_isvisible()\n\n isVisibleNode = previous_api(is_visible_node)\n\n def rename_node(self, where, newname, name=None, overwrite=False):\n \"\"\"Change the name of the node specified by where and name to newname.\n\n Parameters\n ----------\n where, name\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted upon.\n newname : str\n The new name to be assigned to the node (a string).\n overwrite : bool\n Whether to recursively remove a node with the same\n newname if it already exists (not done by default).\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n obj._f_rename(newname, overwrite)\n\n renameNode = previous_api(rename_node)\n\n def move_node(self, where, newparent=None, newname=None, name=None,\n overwrite=False, createparents=False):\n \"\"\"Move the node specified by where and name to newparent/newname.\n\n Parameters\n ----------\n where, name : path\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted upon.\n newparent\n The destination group the node will be moved into (a\n path name or a Group instance). If it is\n not specified or None, the current parent\n group is chosen as the new parent.\n newname\n The new name to be assigned to the node in its\n destination (a string). If it is not specified or\n None, the current name is chosen as the\n new name.\n\n Notes\n -----\n The other arguments work as in :meth:`Node._f_move`.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n obj._f_move(newparent, newname, overwrite, createparents)\n\n moveNode = previous_api(move_node)\n\n def copy_node(self, where, newparent=None, newname=None, name=None,\n overwrite=False, recursive=False, createparents=False,\n **kwargs):\n \"\"\"Copy the node specified by where and name to newparent/newname.\n\n Parameters\n ----------\n where : str\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted\n upon.\n newparent : str or Group\n The destination group that the node will be copied\n into (a path name or a Group\n instance). If not specified or None, the\n current parent group is chosen as the new parent.\n newname : str\n The name to be assigned to the new copy in its\n destination (a string). If it is not specified or\n None, the current name is chosen as the\n new name.\n name : str\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted\n upon.\n overwrite : bool, optional\n If True, the destination group will be overwritten if it already\n exists. Defaults to False.\n recursive : bool, optional\n If True, all descendant nodes of srcgroup are recursively copied.\n Defaults to False.\n createparents : bool, optional\n If True, any necessary parents of dstgroup will be created.\n Defaults to False.\n kwargs\n Additional keyword arguments can be used to customize the copying\n process. See the documentation of :meth:`Group._f_copy`\n for a description of those arguments.\n\n Returns\n -------\n node : Node\n The newly created copy of the source node (i.e. the destination\n node). See :meth:`.Node._f_copy` for further details on the\n semantics of copying nodes.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n if obj._v_depth == 0 and newparent and not newname:\n npobj = self.get_node(newparent)\n if obj._v_file is not npobj._v_file:\n # Special case for copying file1:/ --> file2:/path\n self.root._f_copy_children(npobj, overwrite=overwrite,\n recursive=recursive, **kwargs)\n return npobj\n else:\n raise IOError(\n \"You cannot copy a root group over the same file\")\n return obj._f_copy(newparent, newname,\n overwrite, recursive, createparents, **kwargs)\n\n copyNode = previous_api(copy_node)\n\n def remove_node(self, where, name=None, recursive=False):\n \"\"\"Remove the object node *name* under *where* location.\n\n Parameters\n ----------\n where, name\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted upon.\n recursive : bool\n If not supplied or false, the node will be removed\n only if it has no children; if it does, a\n NodeError will be raised. If supplied\n with a true value, the node and all its descendants will be\n completely removed.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n obj._f_remove(recursive)\n\n removeNode = previous_api(remove_node)\n\n def get_node_attr(self, where, attrname, name=None):\n \"\"\"Get a PyTables attribute from the given node.\n\n Parameters\n ----------\n where, name\n These arguments work as in :meth:`File.get_node`, referencing the\n node to be acted upon.\n attrname\n The name of the attribute to retrieve. If the named\n attribute does not exist, an AttributeError is raised.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n return obj._f_getattr(attrname)\n\n getNodeAttr = previous_api(get_node_attr)\n\n def set_node_attr(self, where, attrname, attrvalue, name=None):\n \"\"\"Set a PyTables attribute for the given node.\n\n Parameters\n ----------\n where, name\n These arguments work as in\n :meth:`File.get_node`, referencing the node to be acted upon.\n attrname\n The name of the attribute to set.\n attrvalue\n The value of the attribute to set. Any kind of Python\n object (like strings, ints, floats, lists, tuples, dicts,\n small NumPy objects ...) can be stored as an attribute.\n However, if necessary, pickle is automatically used so as\n to serialize objects that you might want to save.\n See the :class:`AttributeSet` class for details.\n\n Notes\n -----\n If the node already has a large number of attributes, a\n PerformanceWarning is issued.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n obj._f_setattr(attrname, attrvalue)\n\n setNodeAttr = previous_api(set_node_attr)\n\n def del_node_attr(self, where, attrname, name=None):\n \"\"\"Delete a PyTables attribute from the given node.\n\n Parameters\n ----------\n where, name\n These arguments work as in :meth:`File.get_node`, referencing the\n node to be acted upon.\n attrname\n The name of the attribute to delete. If the named\n attribute does not exist, an AttributeError is raised.\n\n \"\"\"\n\n obj = self.get_node(where, name=name)\n obj._f_delattr(attrname)\n\n delNodeAttr = previous_api(del_node_attr)\n\n def copy_node_attrs(self, where, dstnode, name=None):\n \"\"\"Copy PyTables attributes from one node to another.\n\n Parameters\n ----------\n where, name\n These arguments work as in :meth:`File.get_node`, referencing the\n node to be acted upon.\n dstnode\n The destination node where the attributes will be copied to. It can\n be a path string or a Node instance (see :ref:`NodeClassDescr`).\n\n \"\"\"\n\n srcobject = self.get_node(where, name=name)\n dstobject = self.get_node(dstnode)\n srcobject._v_attrs._f_copy(dstobject)\n\n copyNodeAttrs = previous_api(copy_node_attrs)\n\n def copy_children(self, srcgroup, dstgroup,\n overwrite=False, recursive=False,\n createparents=False, **kwargs):\n \"\"\"Copy the children of a group into another group.\n\n Parameters\n ----------\n srcgroup : str\n The group to copy from.\n dstgroup : str\n The destination group.\n overwrite : bool, optional\n If True, the destination group will be overwritten if it already\n exists. Defaults to False.\n recursive : bool, optional\n If True, all descendant nodes of srcgroup are recursively copied.\n Defaults to False.\n createparents : bool, optional\n If True, any necessary parents of dstgroup will be created.\n Defaults to False.\n kwargs : dict\n Additional keyword arguments can be used to customize the copying\n process. See the documentation of :meth:`Group._f_copy_children`\n for a description of those arguments.\n\n \"\"\"\n\n srcgroup = self.get_node(srcgroup) # Does the source node exist?\n self._check_group(srcgroup) # Is it a group?\n\n srcgroup._f_copy_children(\n dstgroup, overwrite, recursive, createparents, **kwargs)\n\n copyChildren = previous_api(copy_children)\n\n def copy_file(self, dstfilename, overwrite=False, **kwargs):\n \"\"\"Copy the contents of this file to dstfilename.\n\n Parameters\n ----------\n dstfilename : str\n A path string indicating the name of the destination file. If\n it already exists, the copy will fail with an IOError, unless\n the overwrite argument is true.\n overwrite : bool, optional\n If true, the destination file will be overwritten if it already\n exists. In this case, the destination file must be closed, or\n errors will occur. Defaults to False.\n kwargs\n Additional keyword arguments discussed below.\n\n Notes\n -----\n Additional keyword arguments may be passed to customize the\n copying process. For instance, title and filters may be changed,\n user attributes may be or may not be copied, data may be\n sub-sampled, stats may be collected, etc. Arguments unknown to\n nodes are simply ignored. Check the documentation for copying\n operations of nodes to see which options they support.\n\n In addition, it recognizes the names of parameters present in\n :file:`tables/parameters.py` as additional keyword arguments.\n See :ref:`parameter_files` for a detailed info on the supported\n parameters.\n\n Copying a file usually has the beneficial side effect of\n creating a more compact and cleaner version of the original\n file.\n\n \"\"\"\n\n self._check_open()\n\n # Check that we are not treading our own shoes\n if os.path.abspath(self.filename) == os.path.abspath(dstfilename):\n raise IOError(\"You cannot copy a file over itself\")\n\n # Compute default arguments.\n # These are *not* passed on.\n filters = kwargs.pop('filters', None)\n if filters is None:\n # By checking the HDF5 attribute, we avoid setting filters\n # in the destination file if not explicitly set in the\n # source file. Just by assigning ``self.filters`` we would\n # not be able to tell.\n filters = getattr(self.root._v_attrs, 'FILTERS', None)\n copyuserattrs = kwargs.get('copyuserattrs', True)\n title = kwargs.pop('title', self.title)\n\n if os.path.isfile(dstfilename) and not overwrite:\n raise IOError((\"file ``%s`` already exists; \"\n \"you may want to use the ``overwrite`` \"\n \"argument\") % dstfilename)\n\n # Create destination file, overwriting it.\n dstfileh = open_file(\n dstfilename, mode=\"w\", title=title, filters=filters, **kwargs)\n\n try:\n # Maybe copy the user attributes of the root group.\n if copyuserattrs:\n self.root._v_attrs._f_copy(dstfileh.root)\n\n # Copy the rest of the hierarchy.\n self.root._f_copy_children(dstfileh.root, recursive=True, **kwargs)\n finally:\n dstfileh.close()\n\n copyFile = previous_api(copy_file)\n\n def list_nodes(self, where, classname=None):\n \"\"\"Return a *list* with children nodes hanging from where.\n\n This is a list-returning version of :meth:`File.iter_nodes`.\n\n \"\"\"\n\n group = self.get_node(where) # Does the parent exist?\n self._check_group(group) # Is it a group?\n\n return group._f_list_nodes(classname)\n\n listNodes = previous_api(list_nodes)\n\n def iter_nodes(self, where, classname=None):\n \"\"\"Iterate over children nodes hanging from where.\n\n Parameters\n ----------\n where\n This argument works as in :meth:`File.get_node`, referencing the\n node to be acted upon.\n classname\n If the name of a class derived from\n Node (see :ref:`NodeClassDescr`) is supplied, only instances of\n that class (or subclasses of it) will be returned.\n\n Notes\n -----\n The returned nodes are alphanumerically sorted by their name.\n This is an iterator version of :meth:`File.list_nodes`.\n\n \"\"\"\n\n group = self.get_node(where) # Does the parent exist?\n self._check_group(group) # Is it a group?\n\n return group._f_iter_nodes(classname)\n\n iterNodes = previous_api(iter_nodes)\n\n def __contains__(self, path):\n \"\"\"Is there a node with that path?\n\n Returns True if the file has a node with the given path (a\n string), False otherwise.\n\n \"\"\"\n\n try:\n self.get_node(path)\n except NoSuchNodeError:\n return False\n else:\n return True\n\n def __iter__(self):\n \"\"\"Recursively iterate over the nodes in the tree.\n\n This is equivalent to calling :meth:`File.walk_nodes` with no\n arguments.\n\n Examples\n --------\n\n ::\n\n # Recursively list all the nodes in the object tree.\n h5file = tables.open_file('vlarray1.h5')\n print(\"All nodes in the object tree:\")\n for node in h5file:\n print(node)\n\n \"\"\"\n\n return self.walk_nodes('/')\n\n def walk_nodes(self, where=\"/\", classname=None):\n \"\"\"Recursively iterate over nodes hanging from where.\n\n Parameters\n ----------\n where : str or Group, optional\n If supplied, the iteration starts from (and includes)\n this group. It can be a path string or a\n Group instance (see :ref:`GroupClassDescr`).\n classname\n If the name of a class derived from\n Node (see :ref:`GroupClassDescr`) is supplied, only instances of\n that class (or subclasses of it) will be returned.\n\n Notes\n -----\n This version iterates over the leaves in the same group in order\n to avoid having a list referencing to them and thus, preventing\n the LRU cache to remove them after their use.\n\n Examples\n --------\n\n ::\n\n # Recursively print all the nodes hanging from '/detector'.\n print(\"Nodes hanging from group '/detector':\")\n for node in h5file.walk_nodes('/detector', classname='EArray'):\n print(node)\n\n \"\"\"\n\n class_ = get_class_by_name(classname)\n\n if class_ is Group: # only groups\n for group in self.walk_groups(where):\n yield group\n elif class_ is Node: # all nodes\n yield self.get_node(where)\n for group in self.walk_groups(where):\n for leaf in self.iter_nodes(group):\n yield leaf\n else: # only nodes of the named type\n for group in self.walk_groups(where):\n for leaf in self.iter_nodes(group, classname):\n yield leaf\n\n walkNodes = previous_api(walk_nodes)\n\n def walk_groups(self, where=\"/\"):\n \"\"\"Recursively iterate over groups (not leaves) hanging from where.\n\n The where group itself is listed first (preorder), then each of its\n child groups (following an alphanumerical order) is also traversed,\n following the same procedure. If where is not supplied, the root\n group is used.\n\n The where argument can be a path string\n or a Group instance (see :ref:`GroupClassDescr`).\n\n \"\"\"\n\n group = self.get_node(where) # Does the parent exist?\n self._check_group(group) # Is it a group?\n return group._f_walk_groups()\n\n walkGroups = previous_api(walk_groups)\n\n def _check_open(self):\n \"\"\"Check the state of the file.\n\n If the file is closed, a `ClosedFileError` is raised.\n\n \"\"\"\n\n if not self.isopen:\n raise ClosedFileError(\"the file object is closed\")\n\n _checkOpen = previous_api(_check_open)\n\n def _iswritable(self):\n \"\"\"Is this file writable?\"\"\"\n\n return self.mode in ('w', 'a', 'r+')\n\n _isWritable = previous_api(_iswritable)\n\n def _check_writable(self):\n \"\"\"Check whether the file is writable.\n\n If the file is not writable, a `FileModeError` is raised.\n\n \"\"\"\n\n if not self._iswritable():\n raise FileModeError(\"the file is not writable\")\n\n _checkWritable = previous_api(_check_writable)\n\n def _check_group(self, node):\n # `node` must already be a node.\n if not isinstance(node, Group):\n raise TypeError(\"node ``%s`` is not a group\" % (node._v_pathname,))\n\n _checkGroup = previous_api(_check_group)\n\n # <Undo/Redo support>\n def is_undo_enabled(self):\n \"\"\"Is the Undo/Redo mechanism enabled?\n\n Returns True if the Undo/Redo mechanism has been enabled for\n this file, False otherwise. Please note that this mechanism is\n persistent, so a newly opened PyTables file may already have\n Undo/Redo support enabled.\n\n \"\"\"\n\n self._check_open()\n return self._undoEnabled\n\n isUndoEnabled = previous_api(is_undo_enabled)\n\n def _check_undo_enabled(self):\n if not self._undoEnabled:\n raise UndoRedoError(\"Undo/Redo feature is currently disabled!\")\n\n _checkUndoEnabled = previous_api(_check_undo_enabled)\n\n def _create_transaction_group(self):\n tgroup = TransactionGroupG(\n self.root, _trans_group_name,\n \"Transaction information container\", new=True)\n # The format of the transaction container.\n tgroup._v_attrs._g__setattr('FORMATVERSION', _trans_version)\n return tgroup\n\n _createTransactionGroup = previous_api(_create_transaction_group)\n\n def _create_transaction(self, troot, tid):\n return TransactionG(\n troot, _trans_name % tid,\n \"Transaction number %d\" % tid, new=True)\n\n _createTransaction = previous_api(_create_transaction)\n\n def _create_mark(self, trans, mid):\n return MarkG(\n trans, _markName % mid,\n \"Mark number %d\" % mid, new=True)\n\n _createMark = previous_api(_create_mark)\n\n def enable_undo(self, filters=Filters(complevel=1)):\n \"\"\"Enable the Undo/Redo mechanism.\n\n This operation prepares the database for undoing and redoing\n modifications in the node hierarchy. This\n allows :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and\n other methods to be called.\n\n The filters argument, when specified,\n must be an instance of class Filters (see :ref:`FiltersClassDescr`) and\n is meant for setting the compression values for the action log. The\n default is having compression enabled, as the gains in terms of\n space can be considerable. You may want to disable compression if\n you want maximum speed for Undo/Redo operations.\n\n Calling this method when the Undo/Redo mechanism is already\n enabled raises an UndoRedoError.\n\n \"\"\"\n\n maxundo = self.params['MAX_UNDO_PATH_LENGTH']\n\n class ActionLog(NotLoggedMixin, Table):\n pass\n\n class ActionLogDesc(IsDescription):\n opcode = UInt8Col(pos=0)\n arg1 = StringCol(maxundo, pos=1, dflt=b\"\")\n arg2 = StringCol(maxundo, pos=2, dflt=b\"\")\n\n self._check_open()\n\n # Enabling several times is not allowed to avoid the user having\n # the illusion that a new implicit mark has been created\n # when calling enable_undo for the second time.\n\n if self.is_undo_enabled():\n raise UndoRedoError(\"Undo/Redo feature is already enabled!\")\n\n self._markers = {}\n self._seqmarkers = []\n self._nmarks = 0\n self._curtransaction = 0\n self._curmark = -1 # No marks yet\n\n # Get the Group for keeping user actions\n try:\n tgroup = self.get_node(_trans_group_path)\n except NodeError:\n # The file is going to be changed.\n self._check_writable()\n\n # A transaction log group does not exist. Create it\n tgroup = self._create_transaction_group()\n\n # Create a transaction.\n self._trans = self._create_transaction(\n tgroup, self._curtransaction)\n\n # Create an action log\n self._actionlog = ActionLog(\n tgroup, _action_log_name, ActionLogDesc, \"Action log\",\n filters=filters)\n\n # Create an implicit mark\n self._actionlog.append([(_op_to_code[\"MARK\"], str(0), '')])\n self._nmarks += 1\n self._seqmarkers.append(0) # current action is 0\n\n # Create a group for mark 0\n self._create_mark(self._trans, 0)\n # Initialize the marker pointer\n self._curmark = int(self._nmarks - 1)\n # Initialize the action pointer\n self._curaction = self._actionlog.nrows - 1\n else:\n # The group seems to exist already\n # Get the default transaction\n self._trans = tgroup._f_get_child(\n _trans_name % self._curtransaction)\n # Open the action log and go to the end of it\n self._actionlog = tgroup.actionlog\n for row in self._actionlog:\n if row[\"opcode\"] == _op_to_code[\"MARK\"]:\n name = row[\"arg2\"].decode('utf-8')\n self._markers[name] = self._nmarks\n self._seqmarkers.append(row.nrow)\n self._nmarks += 1\n # Get the current mark and current action\n self._curmark = int(self._actionlog.attrs.CURMARK)\n self._curaction = self._actionlog.attrs.CURACTION\n\n # The Undo/Redo mechanism has been enabled.\n self._undoEnabled = True\n\n enableUndo = previous_api(enable_undo)\n\n def disable_undo(self):\n \"\"\"Disable the Undo/Redo mechanism.\n\n Disabling the Undo/Redo mechanism leaves the database in the\n current state and forgets past and future database states. This\n makes :meth:`File.mark`, :meth:`File.undo`, :meth:`File.redo` and other\n methods fail with an UndoRedoError.\n\n Calling this method when the Undo/Redo mechanism is already\n disabled raises an UndoRedoError.\n\n \"\"\"\n\n self._check_open()\n\n if not self.is_undo_enabled():\n raise UndoRedoError(\"Undo/Redo feature is already disabled!\")\n\n # The file is going to be changed.\n self._check_writable()\n\n del self._markers\n del self._seqmarkers\n del self._curmark\n del self._curaction\n del self._curtransaction\n del self._nmarks\n del self._actionlog\n # Recursively delete the transaction group\n tnode = self.get_node(_trans_group_path)\n tnode._g_remove(recursive=1)\n\n # The Undo/Redo mechanism has been disabled.\n self._undoEnabled = False\n\n disableUndo = previous_api(disable_undo)\n\n def mark(self, name=None):\n \"\"\"Mark the state of the database.\n\n Creates a mark for the current state of the database. A unique (and\n immutable) identifier for the mark is returned. An optional name (a\n string) can be assigned to the mark. Both the identifier of a mark and\n its name can be used in :meth:`File.undo` and :meth:`File.redo`\n operations. When the name has already been used for another mark,\n an UndoRedoError is raised.\n\n This method can only be called when the Undo/Redo mechanism has been\n enabled. Otherwise, an UndoRedoError is raised.\n\n \"\"\"\n\n self._check_open()\n self._check_undo_enabled()\n\n if name is None:\n name = ''\n else:\n if not isinstance(name, str):\n raise TypeError(\"Only strings are allowed as mark names. \"\n \"You passed object: '%s'\" % name)\n if name in self._markers:\n raise UndoRedoError(\"Name '%s' is already used as a marker \"\n \"name. Try another one.\" % name)\n\n # The file is going to be changed.\n self._check_writable()\n\n self._markers[name] = self._curmark + 1\n\n # Create an explicit mark\n # Insert the mark in the action log\n self._log(\"MARK\", str(self._curmark + 1), name)\n self._curmark += 1\n self._nmarks = self._curmark + 1\n self._seqmarkers.append(self._curaction)\n # Create a group for the current mark\n self._create_mark(self._trans, self._curmark)\n return self._curmark\n\n def _log(self, action, *args):\n \"\"\"Log an action.\n\n The `action` must be an all-uppercase string identifying it.\n Arguments must also be strings.\n\n This method should be called once the action has been completed.\n\n This method can only be called when the Undo/Redo mechanism has\n been enabled. Otherwise, an `UndoRedoError` is raised.\n\n \"\"\"\n\n assert self.is_undo_enabled()\n\n maxundo = self.params['MAX_UNDO_PATH_LENGTH']\n # Check whether we are at the end of the action log or not\n if self._curaction != self._actionlog.nrows - 1:\n # We are not, so delete the trailing actions\n self._actionlog.remove_rows(self._curaction + 1,\n self._actionlog.nrows)\n # Reset the current marker group\n mnode = self.get_node(_markPath % (self._curtransaction,\n self._curmark))\n mnode._g_reset()\n # Delete the marker groups with backup objects\n for mark in xrange(self._curmark + 1, self._nmarks):\n mnode = self.get_node(_markPath % (self._curtransaction, mark))\n mnode._g_remove(recursive=1)\n # Update the new number of marks\n self._nmarks = self._curmark + 1\n self._seqmarkers = self._seqmarkers[:self._nmarks]\n\n if action not in _op_to_code: # INTERNAL\n raise UndoRedoError(\"Action ``%s`` not in ``_op_to_code`` \"\n \"dictionary: %r\" % (action, _op_to_code))\n\n arg1 = \"\"\n arg2 = \"\"\n if len(args) <= 1:\n arg1 = args[0]\n elif len(args) <= 2:\n arg1 = args[0]\n arg2 = args[1]\n else: # INTERNAL\n raise UndoRedoError(\"Too many parameters for action log: \"\n \"%r\").with_traceback(args)\n if (len(arg1) > maxundo\n or len(arg2) > maxundo): # INTERNAL\n raise UndoRedoError(\"Parameter arg1 or arg2 is too long: \"\n \"(%r, %r)\" % (arg1, arg2))\n # print(\"Logging-->\", (action, arg1, arg2))\n self._actionlog.append([(_op_to_code[action],\n arg1.encode('utf-8'),\n arg2.encode('utf-8'))])\n self._curaction += 1\n\n def _get_mark_id(self, mark):\n \"\"\"Get an integer markid from a mark sequence number or name.\"\"\"\n\n if isinstance(mark, int):\n markid = mark\n elif isinstance(mark, str):\n if mark not in self._markers:\n lmarkers = sorted(self._markers.iterkeys())\n raise UndoRedoError(\"The mark that you have specified has not \"\n \"been found in the internal marker list: \"\n \"%r\" % lmarkers)\n markid = self._markers[mark]\n else:\n raise TypeError(\"Parameter mark can only be an integer or a \"\n \"string, and you passed a type <%s>\" % type(mark))\n # print(\"markid, self._nmarks:\", markid, self._nmarks)\n return markid\n\n _getMarkID = previous_api(_get_mark_id)\n\n def _get_final_action(self, markid):\n \"\"\"Get the action to go.\n\n It does not touch the self private attributes\n\n \"\"\"\n\n if markid > self._nmarks - 1:\n # The required mark is beyond the end of the action log\n # The final action is the last row\n return self._actionlog.nrows\n elif markid <= 0:\n # The required mark is the first one\n # return the first row\n return 0\n\n return self._seqmarkers[markid]\n\n _getFinalAction = previous_api(_get_final_action)\n\n def _doundo(self, finalaction, direction):\n \"\"\"Undo/Redo actions up to final action in the specificed direction.\"\"\"\n\n if direction < 0:\n actionlog = \\\n self._actionlog[finalaction + 1:self._curaction + 1][::-1]\n else:\n actionlog = self._actionlog[self._curaction:finalaction]\n\n # Uncomment this for debugging\n# print(\"curaction, finalaction, direction\", \\\n# self._curaction, finalaction, direction)\n for i in xrange(len(actionlog)):\n if actionlog['opcode'][i] != _op_to_code[\"MARK\"]:\n # undo/redo the action\n if direction > 0:\n # Uncomment this for debugging\n# print(\"redo-->\", \\\n# _code_to_op[actionlog['opcode'][i]],\\\n# actionlog['arg1'][i],\\\n# actionlog['arg2'][i])\n undoredo.redo(self,\n # _code_to_op[actionlog['opcode'][i]],\n # The next is a workaround for python < 2.5\n _code_to_op[int(actionlog['opcode'][i])],\n actionlog['arg1'][i].decode('utf8'),\n actionlog['arg2'][i].decode('utf8'))\n else:\n # Uncomment this for debugging\n # print(\"undo-->\", \\\n # _code_to_op[actionlog['opcode'][i]],\\\n # actionlog['arg1'][i].decode('utf8'),\\\n # actionlog['arg2'][i].decode('utf8'))\n undoredo.undo(self,\n # _code_to_op[actionlog['opcode'][i]],\n # The next is a workaround for python < 2.5\n _code_to_op[int(actionlog['opcode'][i])],\n actionlog['arg1'][i].decode('utf8'),\n actionlog['arg2'][i].decode('utf8'))\n else:\n if direction > 0:\n self._curmark = int(actionlog['arg1'][i])\n else:\n self._curmark = int(actionlog['arg1'][i]) - 1\n # Protection against negative marks\n if self._curmark < 0:\n self._curmark = 0\n self._curaction += direction\n\n def undo(self, mark=None):\n \"\"\"Go to a past state of the database.\n\n Returns the database to the state associated with the specified mark.\n Both the identifier of a mark and its name can be used. If the mark is\n omitted, the last created mark is used. If there are no past\n marks, or the specified mark is not older than the current one, an\n UndoRedoError is raised.\n\n This method can only be called when the Undo/Redo mechanism\n has been enabled. Otherwise, an UndoRedoError\n is raised.\n\n \"\"\"\n\n self._check_open()\n self._check_undo_enabled()\n\n# print(\"(pre)UNDO: (curaction, curmark) = (%s,%s)\" % \\\n# (self._curaction, self._curmark))\n if mark is None:\n markid = self._curmark\n # Correction if we are settled on top of a mark\n opcode = self._actionlog.cols.opcode\n if opcode[self._curaction] == _op_to_code[\"MARK\"]:\n markid -= 1\n else:\n # Get the mark ID number\n markid = self._get_mark_id(mark)\n # Get the final action ID to go\n finalaction = self._get_final_action(markid)\n if finalaction > self._curaction:\n raise UndoRedoError(\"Mark ``%s`` is newer than the current mark. \"\n \"Use `redo()` or `goto()` instead.\" % (mark,))\n\n # The file is going to be changed.\n self._check_writable()\n\n # Try to reach this mark by unwinding actions in the log\n self._doundo(finalaction - 1, -1)\n if self._curaction < self._actionlog.nrows - 1:\n self._curaction += 1\n self._curmark = int(self._actionlog.cols.arg1[self._curaction])\n# print(\"(post)UNDO: (curaction, curmark) = (%s,%s)\" % \\\n# (self._curaction, self._curmark))\n\n def redo(self, mark=None):\n \"\"\"Go to a future state of the database.\n\n Returns the database to the state associated with the specified\n mark. Both the identifier of a mark and its name can be used.\n If the `mark` is omitted, the next created mark is used. If\n there are no future marks, or the specified mark is not newer\n than the current one, an UndoRedoError is raised.\n\n This method can only be called when the Undo/Redo mechanism has\n been enabled. Otherwise, an UndoRedoError is raised.\n\n \"\"\"\n\n self._check_open()\n self._check_undo_enabled()\n\n# print(\"(pre)REDO: (curaction, curmark) = (%s, %s)\" % \\\n# (self._curaction, self._curmark))\n if self._curaction >= self._actionlog.nrows - 1:\n # We are at the end of log, so no action\n return\n\n if mark is None:\n mark = self._curmark + 1\n elif mark == -1:\n mark = int(self._nmarks) # Go beyond the mark bounds up to the end\n # Get the mark ID number\n markid = self._get_mark_id(mark)\n finalaction = self._get_final_action(markid)\n if finalaction < self._curaction + 1:\n raise UndoRedoError(\"Mark ``%s`` is older than the current mark. \"\n \"Use `redo()` or `goto()` instead.\" % (mark,))\n\n # The file is going to be changed.\n self._check_writable()\n\n # Get the final action ID to go\n self._curaction += 1\n\n # Try to reach this mark by redoing the actions in the log\n self._doundo(finalaction, 1)\n # Increment the current mark only if we are not at the end of marks\n if self._curmark < self._nmarks - 1:\n self._curmark += 1\n if self._curaction > self._actionlog.nrows - 1:\n self._curaction = self._actionlog.nrows - 1\n# print(\"(post)REDO: (curaction, curmark) = (%s,%s)\" % \\\n# (self._curaction, self._curmark))\n\n def goto(self, mark):\n \"\"\"Go to a specific mark of the database.\n\n Returns the database to the state associated with the specified mark.\n Both the identifier of a mark and its name can be used.\n\n This method can only be called when the Undo/Redo mechanism has been\n enabled. Otherwise, an UndoRedoError is raised.\n\n \"\"\"\n\n self._check_open()\n self._check_undo_enabled()\n\n if mark == -1: # Special case\n mark = self._nmarks # Go beyond the mark bounds up to the end\n # Get the mark ID number\n markid = self._get_mark_id(mark)\n finalaction = self._get_final_action(markid)\n if finalaction < self._curaction:\n self.undo(mark)\n else:\n self.redo(mark)\n\n def get_current_mark(self):\n \"\"\"Get the identifier of the current mark.\n\n Returns the identifier of the current mark. This can be used\n to know the state of a database after an application crash, or to\n get the identifier of the initial implicit mark after a call\n to :meth:`File.enable_undo`.\n\n This method can only be called when the Undo/Redo mechanism\n has been enabled. Otherwise, an UndoRedoError\n is raised.\n\n \"\"\"\n\n self._check_open()\n self._check_undo_enabled()\n return self._curmark\n\n getCurrentMark = previous_api(get_current_mark)\n\n def _shadow_name(self):\n \"\"\"Compute and return a shadow name.\n\n Computes the current shadow name according to the current\n transaction, mark and action. It returns a tuple with the\n shadow parent node and the name of the shadow in it.\n\n \"\"\"\n\n parent = self.get_node(\n _shadow_parent % (self._curtransaction, self._curmark))\n name = _shadow_name % (self._curaction,)\n\n return (parent, name)\n\n _shadowName = previous_api(_shadow_name)\n\n # </Undo/Redo support>\n\n def flush(self):\n \"\"\"Flush all the alive leaves in the object tree.\"\"\"\n\n self._check_open()\n\n # Flush the cache to disk\n self._node_manager.flush_nodes()\n self._flush_file(0) # 0 means local scope, 1 global (virtual) scope\n\n def close(self):\n \"\"\"Flush all the alive leaves in object tree and close the file.\"\"\"\n\n # If the file is already closed, return immediately\n if not self.isopen:\n return\n\n # If this file has been opened more than once, decrease the\n # counter and return\n if self._open_count > 1:\n self._open_count -= 1\n return\n\n filename = self.filename\n\n if self._undoEnabled and self._iswritable():\n # Save the current mark and current action\n self._actionlog.attrs._g__setattr(\"CURMARK\", self._curmark)\n self._actionlog.attrs._g__setattr(\"CURACTION\", self._curaction)\n\n # Close all loaded nodes.\n self.root._f_close()\n\n self._node_manager.shutdown()\n\n # Post-conditions\n assert len(self._node_manager.cache) == 0, \\\n (\"cached nodes remain after closing: %s\"\n % list(self._node_manager.cache))\n\n # No other nodes should have been revived.\n assert len(self._node_manager.registry) == 0, \\\n (\"alive nodes remain after closing: %s\"\n % list(self._node_manager.registry))\n\n # Close the file\n self._close_file()\n\n # After the objects are disconnected, destroy the\n # object dictionary using the brute force ;-)\n # This should help to the garbage collector\n self.__dict__.clear()\n\n # Set the flag to indicate that the file is closed\n self.isopen = 0\n\n # Restore the filename attribute that is used by _FileRegistry\n self.filename = filename\n\n # Delete the entry from he registry of opened files\n _open_files.remove(self)\n\n def __enter__(self):\n \"\"\"Enter a context and return the same file.\"\"\"\n\n return self\n\n def __exit__(self, *exc_info):\n \"\"\"Exit a context and close the file.\"\"\"\n\n self.close()\n return False # do not hide exceptions\n\n def __str__(self):\n \"\"\"Return a short string representation of the object tree.\n\n Examples\n --------\n\n ::\n\n >>> f = tables.open_file('data/test.h5')\n >>> print(f)\n data/test.h5 (File) 'Table Benchmark'\n Last modif.: 'Mon Sep 20 12:40:47 2004'\n Object Tree:\n / (Group) 'Table Benchmark'\n /tuple0 (Table(100,)) 'This is the table title'\n /group0 (Group) ''\n /group0/tuple1 (Table(100,)) 'This is the table title'\n /group0/group1 (Group) ''\n /group0/group1/tuple2 (Table(100,)) 'This is the table title'\n /group0/group1/group2 (Group) ''\n\n \"\"\"\n\n if not self.isopen:\n return \"<closed File>\"\n\n # Print all the nodes (Group and Leaf objects) on object tree\n try:\n date = time.asctime(time.localtime(os.stat(self.filename)[8]))\n except OSError:\n # in-memory file\n date = \"\"\n astring = self.filename + ' (File) ' + repr(self.title) + '\\n'\n# astring += 'root_uep :=' + repr(self.root_uep) + '; '\n# astring += 'format_version := ' + self.format_version + '\\n'\n# astring += 'filters :=' + repr(self.filters) + '\\n'\n astring += 'Last modif.: ' + repr(date) + '\\n'\n astring += 'Object Tree: \\n'\n\n for group in self.walk_groups(\"/\"):\n astring += str(group) + '\\n'\n for kind in self._node_kinds[1:]:\n for node in self.list_nodes(group, kind):\n astring += str(node) + '\\n'\n return astring\n\n def __repr__(self):\n \"\"\"Return a detailed string representation of the object tree.\"\"\"\n\n if not self.isopen:\n return \"<closed File>\"\n\n # Print all the nodes (Group and Leaf objects) on object tree\n astring = 'File(filename=' + str(self.filename) + \\\n ', title=' + repr(self.title) + \\\n ', mode=' + repr(self.mode) + \\\n ', root_uep=' + repr(self.root_uep) + \\\n ', filters=' + repr(self.filters) + \\\n ')\\n'\n for group in self.walk_groups(\"/\"):\n astring += str(group) + '\\n'\n for kind in self._node_kinds[1:]:\n for node in self.list_nodes(group, kind):\n astring += repr(node) + '\\n'\n return astring\n\n def _update_node_locations(self, oldpath, newpath):\n \"\"\"Update location information of nodes under `oldpath`.\n\n This only affects *already loaded* nodes.\n\n \"\"\"\n\n oldprefix = oldpath + '/' # root node can not be renamed, anyway\n oldprefix_len = len(oldprefix)\n\n # Update alive and dead descendents.\n for cache in [self._node_manager.cache, self._node_manager.registry]:\n for nodepath in cache:\n if nodepath.startswith(oldprefix) and nodepath != oldprefix:\n nodesuffix = nodepath[oldprefix_len:]\n newnodepath = join_path(newpath, nodesuffix)\n newnodeppath = split_path(newnodepath)[0]\n descendent_node = self._get_node(nodepath)\n descendent_node._g_update_location(newnodeppath)\n\n _updateNodeLocations = previous_api(_update_node_locations)\n\n\n# If a user hits ^C during a run, it is wise to gracefully close the\n# opened files.\nimport atexit\natexit.register(_open_files.close_all)\n\n\n## Local Variables:\n## mode: python\n## py-indent-offset: 4\n## tab-width: 4\n## fill-column: 72\n## End:\n",
"\"\"\"Beware! you need PyTables >= 2.3 to run this script!\"\"\"\n\nfrom __future__ import print_function\nfrom time import time # use clock for Win\nimport numpy as np\nimport tables\n\n# NEVENTS = 10000\nNEVENTS = 20000\nMAX_PARTICLES_PER_EVENT = 100\n\n# Particle description\n\n\nclass Particle(tables.IsDescription):\n # event_id = tables.Int32Col(pos=1, indexed=True) # event id (indexed)\n event_id = tables.Int32Col(pos=1) # event id (not indexed)\n particle_id = tables.Int32Col(pos=2) # particle id in the event\n parent_id = tables.Int32Col(pos=3) # the id of the parent\n # particle (negative\n # values means no parent)\n momentum = tables.Float64Col(shape=3, pos=4) # momentum of the particle\n mass = tables.Float64Col(pos=5) # mass of the particle\n\n# Create a new table for events\nt1 = time()\nprint(\"Creating a table with %s entries aprox.. Wait please...\" %\n (int(NEVENTS * (MAX_PARTICLES_PER_EVENT / 2.))))\nfileh = tables.open_file(\"particles-pro.h5\", mode=\"w\")\ngroup = fileh.create_group(fileh.root, \"events\")\ntable = fileh.create_table(group, 'table', Particle, \"A table\",\n tables.Filters(0))\n# Choose this line if you want data compression\n# table = fileh.create_table(group, 'table', Particle, \"A table\", Filters(1))\n\n# Fill the table with events\nnp.random.seed(1) # In order to have reproducible results\nparticle = table.row\nfor i in range(NEVENTS):\n for j in range(np.random.randint(0, MAX_PARTICLES_PER_EVENT)):\n particle['event_id'] = i\n particle['particle_id'] = j\n particle['parent_id'] = j - 10 # 10 root particles (max)\n particle['momentum'] = np.random.normal(5.0, 2.0, size=3)\n particle['mass'] = np.random.normal(500.0, 10.0)\n # This injects the row values.\n particle.append()\ntable.flush()\nprint(\"Added %s entries --- Time: %s sec\" %\n (table.nrows, round((time() - t1), 3)))\n\nt1 = time()\nprint(\"Creating index...\")\ntable.cols.event_id.create_index(optlevel=0, _verbose=True)\nprint(\"Index created --- Time: %s sec\" % (round((time() - t1), 3)))\n# Add the number of events as an attribute\ntable.attrs.nevents = NEVENTS\n\nfileh.close()\n\n# Open the file en read only mode and start selections\nprint(\"Selecting events...\")\nfileh = tables.open_file(\"particles-pro.h5\", mode=\"r\")\ntable = fileh.root.events.table\n\nprint(\"Particles in event 34:\", end=' ')\nnrows = 0\nt1 = time()\nfor row in table.where(\"event_id == 34\"):\n nrows += 1\nprint(nrows)\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\nprint(\"Root particles in event 34:\", end=' ')\nnrows = 0\nt1 = time()\nfor row in table.where(\"event_id == 34\"):\n if row['parent_id'] < 0:\n nrows += 1\nprint(nrows)\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\nprint(\"Sum of masses of root particles in event 34:\", end=' ')\nsmass = 0.0\nt1 = time()\nfor row in table.where(\"event_id == 34\"):\n if row['parent_id'] < 0:\n smass += row['mass']\nprint(smass)\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\nprint(\n \"Sum of masses of daughter particles for particle 3 in event 34:\", end=' ')\nsmass = 0.0\nt1 = time()\nfor row in table.where(\"event_id == 34\"):\n if row['parent_id'] == 3:\n smass += row['mass']\nprint(smass)\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\nprint(\"Sum of module of momentum for particle 3 in event 34:\", end=' ')\nsmomentum = 0.0\nt1 = time()\n# for row in table.where(\"(event_id == 34) & ((parent_id) == 3)\"):\nfor row in table.where(\"event_id == 34\"):\n if row['parent_id'] == 3:\n smomentum += np.sqrt(np.add.reduce(row['momentum'] ** 2))\nprint(smomentum)\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\n# This is the same than above, but using generator expressions\n# Python >= 2.4 needed here!\nprint(\"Sum of module of momentum for particle 3 in event 34 (2):\", end=' ')\nt1 = time()\nprint(sum(np.sqrt(np.add.reduce(row['momentum'] ** 2))\n for row in table.where(\"event_id == 34\")\n if row['parent_id'] == 3))\nprint(\"Done --- Time:\", round((time() - t1), 3), \"sec\")\n\n\nfileh.close()\n",
"#######################################################################\n# This script compares the speed of the computation of a polynomial\n# for different (numpy.memmap and tables.Expr) out-of-memory paradigms.\n#\n# Author: Francesc Alted\n# Date: 2010-02-24\n#######################################################################\n\nfrom __future__ import print_function\nimport os\nfrom time import time\nimport numpy as np\nimport tables as tb\nimport numexpr as ne\n\nexpr = \".25*x**3 + .75*x**2 - 1.5*x - 2\" # the polynomial to compute\nN = 10 * 1000 * 1000 # the number of points to compute expression (80 MB)\nstep = 100 * 1000 # perform calculation in slices of `step` elements\ndtype = np.dtype('f8') # the datatype\n#CHUNKSHAPE = (2**17,)\nCHUNKSHAPE = None\n\n# Global variable for the x values for pure numpy & numexpr\nx = None\n\n# *** The next variables do not need to be changed ***\n\n# Filenames for numpy.memmap\nfprefix = \"numpy.memmap\" # the I/O file prefix\nmpfnames = [fprefix + \"-x.bin\", fprefix + \"-r.bin\"]\n\n# Filename for tables.Expr\nh5fname = \"tablesExpr.h5\" # the I/O file\n\nMB = 1024 * 1024. # a MegaByte\n\n\ndef print_filesize(filename, clib=None, clevel=0):\n \"\"\"Print some statistics about file sizes.\"\"\"\n\n # os.system(\"sync\") # make sure that all data has been flushed to disk\n if isinstance(filename, list):\n filesize_bytes = 0\n for fname in filename:\n filesize_bytes += os.stat(fname)[6]\n else:\n filesize_bytes = os.stat(filename)[6]\n filesize_MB = round(filesize_bytes / MB, 1)\n print(\"\\t\\tTotal file sizes: %d -- (%s MB)\" % (\n filesize_bytes, filesize_MB), end=' ')\n if clevel > 0:\n print(\"(using %s lvl%s)\" % (clib, clevel))\n else:\n print()\n\n\ndef populate_x_numpy():\n \"\"\"Populate the values in x axis for numpy.\"\"\"\n global x\n # Populate x in range [-1, 1]\n x = np.linspace(-1, 1, N)\n\n\ndef populate_x_memmap():\n \"\"\"Populate the values in x axis for numpy.memmap.\"\"\"\n # Create container for input\n x = np.memmap(mpfnames[0], dtype=dtype, mode=\"w+\", shape=(N,))\n\n # Populate x in range [-1, 1]\n for i in range(0, N, step):\n chunk = np.linspace((2 * i - N) / float(N),\n (2 * (i + step) - N) / float(N), step)\n x[i:i + step] = chunk\n del x # close x memmap\n\n\ndef populate_x_tables(clib, clevel):\n \"\"\"Populate the values in x axis for pytables.\"\"\"\n f = tb.open_file(h5fname, \"w\")\n\n # Create container for input\n atom = tb.Atom.from_dtype(dtype)\n filters = tb.Filters(complib=clib, complevel=clevel)\n x = f.create_carray(f.root, \"x\", atom=atom, shape=(N,),\n filters=filters,\n chunkshape=CHUNKSHAPE,\n )\n\n # Populate x in range [-1, 1]\n for i in range(0, N, step):\n chunk = np.linspace((2 * i - N) / float(N),\n (2 * (i + step) - N) / float(N), step)\n x[i:i + step] = chunk\n f.close()\n\n\ndef compute_numpy():\n \"\"\"Compute the polynomial with pure numpy.\"\"\"\n y = eval(expr)\n\n\ndef compute_numexpr():\n \"\"\"Compute the polynomial with pure numexpr.\"\"\"\n y = ne.evaluate(expr)\n\n\ndef compute_memmap():\n \"\"\"Compute the polynomial with numpy.memmap.\"\"\"\n # Reopen inputs in read-only mode\n x = np.memmap(mpfnames[0], dtype=dtype, mode='r', shape=(N,))\n # Create the array output\n r = np.memmap(mpfnames[1], dtype=dtype, mode=\"w+\", shape=(N,))\n\n # Do the computation by chunks and store in output\n r[:] = eval(expr) # where is stored the result?\n # r = eval(expr) # result is stored in-memory\n\n del x, r # close x and r memmap arrays\n print_filesize(mpfnames)\n\n\ndef compute_tables(clib, clevel):\n \"\"\"Compute the polynomial with tables.Expr.\"\"\"\n f = tb.open_file(h5fname, \"a\")\n x = f.root.x # get the x input\n # Create container for output\n atom = tb.Atom.from_dtype(dtype)\n filters = tb.Filters(complib=clib, complevel=clevel)\n r = f.create_carray(f.root, \"r\", atom=atom, shape=(N,),\n filters=filters,\n chunkshape=CHUNKSHAPE,\n )\n\n # Do the actual computation and store in output\n ex = tb.Expr(expr) # parse the expression\n ex.set_output(r) # where is stored the result?\n # when commented out, the result goes in-memory\n ex.eval() # evaluate!\n\n f.close()\n print_filesize(h5fname, clib, clevel)\n\n\nif __name__ == '__main__':\n\n tb.print_versions()\n\n print(\"Total size for datasets:\",\n round(2 * N * dtype.itemsize / MB, 1), \"MB\")\n\n # Get the compression libraries supported\n # supported_clibs = [clib for clib in (\"zlib\", \"lzo\", \"bzip2\", \"blosc\")\n # supported_clibs = [clib for clib in (\"zlib\", \"lzo\", \"blosc\")\n supported_clibs = [clib for clib in (\"blosc\",)\n if tb.which_lib_version(clib)]\n\n # Initialization code\n # for what in [\"numpy\", \"numpy.memmap\", \"numexpr\"]:\n for what in [\"numpy\", \"numexpr\"]:\n # break\n print(\"Populating x using %s with %d points...\" % (what, N))\n t0 = time()\n if what == \"numpy\":\n populate_x_numpy()\n compute = compute_numpy\n elif what == \"numexpr\":\n populate_x_numpy()\n compute = compute_numexpr\n elif what == \"numpy.memmap\":\n populate_x_memmap()\n compute = compute_memmap\n print(\"*** Time elapsed populating:\", round(time() - t0, 3))\n print(\"Computing: '%s' using %s\" % (expr, what))\n t0 = time()\n compute()\n print(\"**************** Time elapsed computing:\",\n round(time() - t0, 3))\n\n for what in [\"tables.Expr\"]:\n t0 = time()\n first = True # Sentinel\n for clib in supported_clibs:\n # for clevel in (0, 1, 3, 6, 9):\n for clevel in range(10):\n # for clevel in (1,):\n if not first and clevel == 0:\n continue\n print(\"Populating x using %s with %d points...\" % (what, N))\n populate_x_tables(clib, clevel)\n print(\"*** Time elapsed populating:\", round(time() - t0, 3))\n print(\"Computing: '%s' using %s\" % (expr, what))\n t0 = time()\n compute_tables(clib, clevel)\n print(\"**************** Time elapsed computing:\",\n round(time() - t0, 3))\n first = False\n"
] | [
[
"numpy.array"
],
[
"numpy.all"
],
[
"numpy.arange",
"numpy.dtype"
],
[
"numpy.zeros"
],
[
"numpy.random.normal",
"numpy.add.reduce",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.memmap",
"numpy.dtype",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aljanabim/svea | [
"37d27089237af3777456d7664473ffb811dabf33"
] | [
"src/teleop_tools/mouse_teleop/scripts/mouse_teleop.py"
] | [
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Enrique Fernandez\n# Released under the BSD License.\n#\n# Authors:\n# * Enrique Fernandez\n\nimport Tkinter\n\nimport rospy\nfrom geometry_msgs.msg import Twist, Vector3\n\nimport numpy\n\n\nclass MouseTeleop():\n def __init__(self):\n # Retrieve params:\n self._frequency = rospy.get_param('~frequency', 0.0)\n self._scale = rospy.get_param('~scale', 1.0)\n self._holonomic = rospy.get_param('~holonomic', False)\n\n # Create twist publisher:\n self._pub_cmd = rospy.Publisher('mouse_vel', Twist, queue_size=100)\n\n # Initialize twist components to zero:\n self._v_x = 0.0\n self._v_y = 0.0\n self._w = 0.0\n\n # Initialize mouse position (x, y) to None (unknown); it's initialized\n # when the mouse button is pressed on the _start callback that handles\n # that event:\n self._x = None\n self._y = None\n\n # Create window:\n self._root = Tkinter.Tk()\n self._root.title('Mouse Teleop')\n\n # Make window non-resizable:\n self._root.resizable(0, 0)\n\n # Create canvas:\n self._canvas = Tkinter.Canvas(self._root, bg='white')\n\n # Create canvas objects:\n self._canvas.create_arc(0, 0, 0, 0, fill='red', outline='red',\n width=1, style=Tkinter.PIESLICE, start=90.0, tag='w')\n self._canvas.create_line(0, 0, 0, 0, fill='blue', width=4, tag='v_x')\n\n if self._holonomic:\n self._canvas.create_line(0, 0, 0, 0,\n fill='blue', width=4, tag='v_y')\n\n # Create canvas text objects:\n self._text_v_x = Tkinter.StringVar()\n if self._holonomic:\n self._text_v_y = Tkinter.StringVar()\n self._text_w = Tkinter.StringVar()\n\n self._label_v_x = Tkinter.Label(self._root,\n anchor=Tkinter.W, textvariable=self._text_v_x)\n if self._holonomic:\n self._label_v_y = Tkinter.Label(self._root,\n anchor=Tkinter.W, textvariable=self._text_v_y)\n self._label_w = Tkinter.Label(self._root,\n anchor=Tkinter.W, textvariable=self._text_w)\n\n if self._holonomic:\n self._text_v_x.set('v_x = %0.2f m/s' % self._v_x)\n self._text_v_y.set('v_y = %0.2f m/s' % self._v_y)\n self._text_w.set( 'w = %0.2f deg/s' % self._w)\n else:\n self._text_v_x.set('v = %0.2f m/s' % self._v_x)\n self._text_w.set( 'w = %0.2f deg/s' % self._w)\n\n self._label_v_x.pack()\n if self._holonomic:\n self._label_v_y.pack()\n self._label_w.pack()\n\n # Bind event handlers:\n self._canvas.bind('<Button-1>', self._start)\n self._canvas.bind('<ButtonRelease-1>', self._release)\n\n self._canvas.bind('<Configure>', self._configure)\n\n if self._holonomic:\n self._canvas.bind('<B1-Motion>', self._mouse_motion_linear)\n self._canvas.bind('<Shift-B1-Motion>', self._mouse_motion_angular)\n\n self._root.bind('<Shift_L>', self._change_to_motion_angular)\n self._root.bind('<KeyRelease-Shift_L>',\n self._change_to_motion_linear)\n else:\n self._canvas.bind('<B1-Motion>', self._mouse_motion_angular)\n\n self._canvas.pack()\n\n # If frequency is positive, use synchronous publishing mode:\n if self._frequency > 0.0:\n # Create timer for the given frequency to publish the twist:\n period = rospy.Duration(1.0 / self._frequency)\n\n self._timer = rospy.Timer(period, self._publish_twist)\n\n # Start window event manager main loop:\n self._root.mainloop()\n\n def __del__(self):\n if self._frequency > 0.0:\n self._timer.shutdown()\n\n self._root.quit()\n\n def _start(self, event):\n self._x, self._y = event.y, event.x\n\n self._y_linear = self._y_angular = 0\n\n self._v_x = self._v_y = self._w = 0.0\n\n def _release(self, event):\n self._v_x = self._v_y = self._w = 0.0\n\n self._send_motion()\n\n def _configure(self, event):\n self._width, self._height = event.height, event.width\n\n self._c_x = self._height / 2.0\n self._c_y = self._width / 2.0\n\n self._r = min(self._height, self._width) * 0.25\n\n def _mouse_motion_linear(self, event):\n self._v_x, self._v_y = self._relative_motion(event.y, event.x)\n\n self._send_motion()\n\n def _mouse_motion_angular(self, event):\n self._v_x, self._w = self._relative_motion(event.y, event.x)\n\n self._send_motion()\n\n def _update_coords(self, tag, x0, y0, x1, y1):\n x0 += self._c_x\n y0 += self._c_y\n\n x1 += self._c_x\n y1 += self._c_y\n\n self._canvas.coords(tag, (x0, y0, x1, y1))\n\n def _draw_v_x(self, v):\n x = -v * float(self._width)\n\n self._update_coords('v_x', 0, 0, 0, x)\n\n def _draw_v_y(self, v):\n y = -v * float(self._height)\n\n self._update_coords('v_y', 0, 0, y, 0)\n\n def _draw_w(self, w):\n x0 = y0 = -self._r\n x1 = y1 = self._r\n\n self._update_coords('w', x0, y0, x1, y1)\n\n yaw = w * numpy.rad2deg(self._scale)\n\n self._canvas.itemconfig('w', extent=yaw)\n\n def _send_motion(self):\n v_x = self._v_x * self._scale\n v_y = self._v_y * self._scale\n w = self._w * self._scale\n\n linear = Vector3(v_x, v_y, 0.0)\n angular = Vector3(0.0, 0.0, w)\n\n self._draw_v_x(self._v_x)\n if self._holonomic:\n self._draw_v_y(self._v_y)\n self._draw_w(self._w)\n\n if self._holonomic:\n self._text_v_x.set('v_x = %0.2f m/s' % self._v_x)\n self._text_v_y.set('v_y = %0.2f m/s' % self._v_y)\n self._text_w.set( 'w = %0.2f deg/s' % numpy.rad2deg(self._w))\n else:\n self._text_v_x.set('v = %0.2f m/s' % self._v_x)\n self._text_w.set( 'w = %0.2f deg/s' % numpy.rad2deg(self._w))\n\n twist = Twist(linear, angular)\n self._pub_cmd.publish(twist)\n\n def _publish_twist(self, event):\n self._send_motion()\n\n def _relative_motion(self, x, y):\n dx = self._x - x\n dy = self._y - y\n\n dx /= float(self._width)\n dy /= float(self._height)\n\n dx = max(-1.0, min(dx, 1.0))\n dy = max(-1.0, min(dy, 1.0))\n\n return dx, dy\n\n def _change_to_motion_linear(self, event):\n if self._y is not None:\n y = event.x\n\n self._y_angular = self._y - y\n self._y = self._y_linear + y\n\n def _change_to_motion_angular(self, event):\n if self._y is not None:\n y = event.x\n\n self._y_linear = self._y - y\n self._y = self._y_angular + y\n\n\ndef main():\n rospy.init_node('mouse_teleop')\n\n MouseTeleop()\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n"
] | [
[
"numpy.rad2deg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dnjst/squidpy | [
"ca765d04b9621debb8752d3d4693dd68f6909513"
] | [
"tests/image/test_segmentation.py"
] | [
"from typing import Tuple, Union, Callable, Optional, Sequence\nfrom pytest_mock import MockerFixture\nimport pytest\n\nimport numpy as np\nimport dask.array as da\n\nfrom squidpy.im import (\n segment,\n ImageContainer,\n SegmentationCustom,\n SegmentationWatershed,\n)\nfrom squidpy.im._segment import _SEG_DTYPE\nfrom squidpy._constants._constants import SegmentationBackend\nfrom squidpy._constants._pkg_constants import Key\n\n\ndef dummy_segment(arr: np.ndarray) -> np.ndarray:\n assert isinstance(arr, np.ndarray)\n assert arr.ndim == 3\n return arr[..., 0].astype(np.uint32)\n\n\nclass TestGeneral:\n @pytest.mark.parametrize(\"ndim\", [2, 3])\n def test_input_ndim(self, ndim: int):\n img = np.zeros(shape=(10, 10))\n if ndim == 3:\n img = img[..., np.newaxis]\n sc = SegmentationCustom(dummy_segment)\n\n res = sc.segment(img)\n\n assert isinstance(res, np.ndarray)\n assert res.ndim == 3\n if ndim == 2:\n assert res.shape == img.shape + (1,)\n else:\n assert res.shape == img.shape\n\n def test_segment_invalid_shape(self):\n img = np.zeros(shape=(1, 10, 10, 2))\n sc = SegmentationCustom(dummy_segment)\n\n with pytest.raises(ValueError, match=r\"Expected `2` or `3` dimensions\"):\n sc.segment(img)\n\n def test_segment_container(self):\n img = ImageContainer(np.zeros(shape=(10, 10, 1)), layer=\"image\")\n sc = SegmentationCustom(dummy_segment)\n\n res = sc.segment(img, layer=\"image\", library_id=img[\"image\"].z.values[0])\n\n assert isinstance(res, ImageContainer)\n assert res.shape == img.shape\n assert \"image\" in res\n assert res[\"image\"].dims == img[\"image\"].dims\n\n\nclass TestWatershed:\n @pytest.mark.parametrize(\"thresh\", [None, 0.1, 0.5, 1.0])\n def test_threshold(self, thresh: Optional[float], mocker: MockerFixture):\n img = np.zeros((100, 200), dtype=np.float64)\n img[2:10, 2:10] = 1.0\n img[30:34, 10:16] = 1.0\n img = ImageContainer(img, layer=\"image\")\n\n sw = SegmentationWatershed()\n spy = mocker.spy(sw, \"_segment\")\n\n res = sw.segment(img, layer=\"image\", library_id=img[\"image\"].z.values[0], fn_kwargs={\"thresh\": thresh})\n\n assert isinstance(res, ImageContainer)\n spy.assert_called_once()\n call = spy.call_args_list[0]\n\n assert call[1][\"thresh\"] == thresh\n\n\nclass TestHighLevel:\n def test_invalid_layer(self, small_cont: ImageContainer):\n with pytest.raises(KeyError, match=r\"Image layer `foobar` not found in\"):\n segment(small_cont, layer=\"foobar\")\n\n @pytest.mark.parametrize(\"method\", [\"watershed\", dummy_segment])\n def test_method(self, small_cont: ImageContainer, method: Union[str, Callable]):\n res = segment(small_cont, method=method, copy=True)\n\n assert isinstance(res, ImageContainer)\n assert res.shape == small_cont.shape\n\n if callable(method):\n method = SegmentationBackend.CUSTOM.s\n\n assert Key.img.segment(method) in res\n\n if method in (\"log\", \"dog\", \"dog\"):\n assert res[Key.img.segment(method)].values.max() <= 1\n\n @pytest.mark.parametrize(\"dy\", [11, 0.5, None])\n @pytest.mark.parametrize(\"dx\", [15, 0.1, None])\n def test_size(self, small_cont: ImageContainer, dy: Optional[Union[int, float]], dx: Optional[Union[int, float]]):\n res = segment(small_cont, size=(dy, dx), copy=True)\n\n assert isinstance(res, ImageContainer)\n assert res.shape == small_cont.shape\n\n @pytest.mark.parametrize(\"channel\", [0, 1, 2])\n def test_channel(self, small_cont: ImageContainer, channel: int):\n segment(small_cont, copy=False, layer=\"image\", channel=channel)\n\n assert Key.img.segment(\"watershed\") in small_cont\n np.testing.assert_array_equal(\n list(small_cont[Key.img.segment(\"watershed\")].dims),\n [\"y\", \"x\", \"z\", f\"{small_cont['image'].dims[-1]}:{channel}\"],\n )\n\n def test_all_channels(self, small_cont: ImageContainer):\n def func(arr: np.ndarray):\n assert arr.shape == (small_cont.shape + (n_channels,))\n return np.zeros(arr.shape[:2], dtype=np.uint8)\n\n n_channels = small_cont[\"image\"].sizes[\"channels\"]\n segment(small_cont, copy=False, layer=\"image\", channel=None, method=func, layer_added=\"seg\")\n\n np.testing.assert_array_equal(small_cont[\"seg\"], np.zeros(small_cont.shape + (1, 1)))\n assert small_cont[\"seg\"].dtype == _SEG_DTYPE\n\n @pytest.mark.parametrize(\"key_added\", [None, \"foo\"])\n def test_key_added(self, small_cont: ImageContainer, key_added: Optional[str]):\n res = segment(small_cont, copy=False, layer=\"image\", layer_added=key_added)\n\n assert res is None\n assert Key.img.segment(\"watershed\", layer_added=key_added) in small_cont\n\n def test_passing_kwargs(self, small_cont: ImageContainer):\n def func(chunk: np.ndarray, sentinel: bool = False):\n assert sentinel, \"Sentinel not set.\"\n return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)\n\n segment(\n small_cont, method=func, layer=\"image\", layer_added=\"bar\", chunks=25, lazy=False, depth=None, sentinel=True\n )\n assert small_cont[\"bar\"].values.dtype == _SEG_DTYPE\n np.testing.assert_array_equal(small_cont[\"bar\"].values, 0)\n\n @pytest.mark.parametrize(\"dask_input\", [False, True])\n @pytest.mark.parametrize(\"chunks\", [25, (50, 50, 1), \"auto\"])\n @pytest.mark.parametrize(\"lazy\", [False, True])\n def test_dask_segment(\n self, small_cont: ImageContainer, dask_input: bool, chunks: Union[int, Tuple[int, ...], str], lazy: bool\n ):\n def func(chunk: np.ndarray):\n if isinstance(chunks, tuple):\n np.testing.assert_array_equal(chunk.shape, [chunks[0] + 2 * d, chunks[1] + 2 * d, 1])\n elif isinstance(chunks, int):\n np.testing.assert_array_equal(chunk.shape, [chunks + 2 * d, chunks + 2 * d, 1])\n\n return np.zeros(chunk[..., 0].shape, dtype=_SEG_DTYPE)\n\n small_cont[\"foo\"] = da.asarray(small_cont[\"image\"].data) if dask_input else small_cont[\"image\"].values\n d = 10 # overlap depth\n assert isinstance(small_cont[\"foo\"].data, da.Array if dask_input else np.ndarray)\n\n segment(small_cont, method=func, layer=\"foo\", layer_added=\"bar\", chunks=chunks, lazy=lazy, depth={0: d, 1: d})\n\n if lazy:\n assert isinstance(small_cont[\"bar\"].data, da.Array)\n small_cont.compute()\n assert isinstance(small_cont[\"foo\"].data, np.ndarray)\n else:\n # make sure we didn't accidentally trigger foo's computation\n assert isinstance(small_cont[\"foo\"].data, da.Array if dask_input else np.ndarray)\n\n assert isinstance(small_cont[\"bar\"].data, np.ndarray)\n assert small_cont[\"bar\"].values.dtype == _SEG_DTYPE\n np.testing.assert_array_equal(small_cont[\"bar\"].values, 0)\n\n def test_copy(self, small_cont: ImageContainer):\n prev_keys = set(small_cont)\n res = segment(small_cont, copy=True, layer=\"image\")\n\n assert isinstance(res, ImageContainer)\n assert set(small_cont) == prev_keys\n assert Key.img.segment(\"watershed\") in res\n\n def test_parallelize(self, small_cont: ImageContainer):\n res1 = segment(small_cont, layer=\"image\", n_jobs=1, copy=True)\n res2 = segment(small_cont, layer=\"image\", n_jobs=2, copy=True)\n\n np.testing.assert_array_equal(\n res1[Key.img.segment(\"watershed\")].values, res2[Key.img.segment(\"watershed\")].values\n )\n\n @pytest.mark.parametrize(\"chunks\", [25, 50])\n def test_blocking(self, small_cont: ImageContainer, chunks: int):\n def func(chunk: np.ndarray):\n labels = np.zeros(chunk[..., 0].shape, dtype=np.uint32)\n labels[0, 0] = 1\n return labels\n\n segment(small_cont, method=func, layer=\"image\", layer_added=\"bar\", chunks=chunks, lazy=False, depth=None)\n # blocks are label from top-left to bottom-right in an ascending order [0, num_blocks - 1]\n # lowest n bits are allocated for block, rest is for the label (i.e. for blocksize=25, we need 16 blocks ids\n # from [0, 15], which can be stored in 4 bits, then we just prepend 1 bit (see the above `func`, resulting\n # in unique 16 labels [10000, 11111]\n\n expected = np.zeros_like(small_cont[\"bar\"].values)\n start = 16 if chunks == 25 else 4\n for i in range(0, 100, chunks):\n for j in range(0, 100, chunks):\n expected[i, j] = start\n start += 1\n\n assert small_cont[\"bar\"].values.dtype == _SEG_DTYPE\n np.testing.assert_array_equal(small_cont[\"bar\"].values, expected)\n\n @pytest.mark.parametrize(\"size\", [None, 11])\n def test_watershed_works(self, size: Optional[int]):\n img_orig = np.zeros((100, 200, 30), dtype=np.float64)\n img_orig[2:10, 2:10] = 1.0\n img_orig[30:34, 10:16] = 1.0\n\n cont = ImageContainer(img_orig, layer=\"image_0\")\n segment(\n img=cont,\n method=\"watershed\",\n layer=\"image_0\",\n layer_added=\"segment\",\n size=size,\n channel=0,\n thresh=0.5,\n )\n # check that blobs are in segments\n assert np.mean(cont.data[\"segment\"].values[img_orig[:, :, 0] > 0] > 0) > 0.5\n\n # for size=10, \"fails with `size=10` due to border effects\"\n # the reason why there is no test for it that inside tox, it \"works\" (i.e. the assertion passes)\n # but outside, the assertion fails, as it should\n\n @pytest.mark.parametrize(\"library_id\", [None, \"3\", [\"1\", \"2\"]])\n def test_library_id(self, cont_4d: ImageContainer, library_id: Optional[Union[str, Sequence[str]]]):\n def func(arr: np.ndarray):\n assert arr.shape == cont_4d.shape + (1,)\n return np.ones(arr[..., 0].shape, dtype=_SEG_DTYPE)\n\n segment(cont_4d, method=func, layer=\"image\", layer_added=\"image_seg\", library_id=library_id, copy=False)\n\n np.testing.assert_array_equal(cont_4d[\"image\"].coords, cont_4d[\"image_seg\"].coords)\n if library_id is None:\n np.testing.assert_array_equal(1, cont_4d[\"image_seg\"])\n else:\n if isinstance(library_id, str):\n library_id = [library_id]\n for lid in library_id:\n np.testing.assert_array_equal(1, cont_4d[\"image_seg\"].sel(z=lid))\n for lid in set(cont_4d.library_ids) - set(library_id):\n # channels have been changed, apply sets to 0\n np.testing.assert_array_equal(0, cont_4d[\"image_seg\"].sel(z=lid))\n"
] | [
[
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.mean",
"numpy.zeros_like",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
schibsen/MLops_exercises_organized | [
"2c9b386fed7b1e400524905cb68f220caf9d015b"
] | [
"src/models/model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MyAwesomeModel(nn.Module):\n def __init__(self, n_classes):\n super(MyAwesomeModel, self).__init__()\n\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=6, kernel_size=4, stride=1),\n nn.Tanh(),\n nn.AvgPool2d(kernel_size=2),\n nn.Conv2d(in_channels=6, out_channels=16, kernel_size=4, stride=1),\n nn.Tanh(),\n nn.AvgPool2d(kernel_size=2),\n nn.Conv2d(in_channels=16, out_channels=120, kernel_size=4, stride=1),\n nn.Tanh(),\n )\n\n self.classifier = nn.Sequential(\n nn.Linear(in_features=120, out_features=84),\n nn.Tanh(),\n nn.Linear(in_features=84, out_features=n_classes),\n )\n\n def forward(self, x, return_features=False):\n x = self.feature_extractor(x)\n x = torch.flatten(x, 1)\n logits = self.classifier(x)\n probs = F.log_softmax(logits, dim=1)\n if return_features:\n return x\n else:\n return probs\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hugerepo-tianhang/low_dim_update_stable | [
"565f6cbf886d266d0633bc112ccae28f1d116ee1",
"565f6cbf886d266d0633bc112ccae28f1d116ee1",
"565f6cbf886d266d0633bc112ccae28f1d116ee1"
] | [
"stable_baselines/cmaes/cma_redo.py",
"neuron_vis/load_pickles.py",
"new_neuron_analysis/util.py"
] | [
"from stable_baselines.ppo2.run_mujoco import eval_return\nimport cma\n\nimport numpy as np\nfrom stable_baselines.low_dim_analysis.eval_util import *\nfrom stable_baselines.low_dim_analysis.common import do_pca, plot_2d, \\\n dump_rows_write_csv, generate_run_dir, do_proj_on_first_n_IPCA, get_allinone_concat_df\nfrom sklearn.decomposition import IncrementalPCA\n\n\nfrom stable_baselines import logger\n\nimport pandas as pd\nfrom sklearn.decomposition import PCA\n\nfrom joblib import Parallel, delayed\nfrom matplotlib import pyplot as plt\nimport time\nimport os\nfrom stable_baselines.common.cmd_util import mujoco_arg_parser\nfrom stable_baselines.low_dim_analysis.common_parser import get_common_parser\nfrom numpy import linalg as LA\n\n\n\ndef plot_cma_returns(plot_dir_alg, name, mean_rets, min_rets, max_rets, show):\n\n X = np.arange(len(mean_rets))\n fig, ax = plt.subplots()\n plt.xlabel('num of eval')\n plt.ylabel('mean returns with min and max filled')\n\n ax.plot(X, mean_rets)\n ax.fill_between(X, min_rets, max_rets, alpha=0.5)\n file_path = f\"{plot_dir_alg}/{name}.pdf\"\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n logger.log(f\"saving cma plot to {file_path}\")\n fig.savefig(file_path, dpi=300,\n bbox_inches='tight', format='pdf')\n if show: plt.show()\n\n\n\n\ndef do_cma(cma_args, first_n_pcs, orgin_param, save_dir, starting_coord, var):\n\n tic = time.time()\n\n #TODO better starting locations, record how many samples,\n\n logger.log(f\"CMAES STARTING :{starting_coord}\")\n es = cma.CMAEvolutionStrategy(starting_coord, var)\n total_num_of_evals = 0\n total_num_timesteps = 0\n\n\n mean_rets = []\n min_rets = []\n max_rets = []\n eval_returns = None\n\n optimization_path = []\n while total_num_timesteps < cma_args.cma_num_timesteps and not es.stop():\n solutions = es.ask()\n optimization_path.extend(solutions)\n thetas = [np.matmul(coord, first_n_pcs) + orgin_param for coord in solutions]\n logger.log(f\"current time steps num: {total_num_timesteps} total time steps: {cma_args.cma_num_timesteps}\")\n eval_returns = Parallel(n_jobs=cma_args.cores_to_use) \\\n (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for\n (i, theta) in enumerate(thetas))\n\n\n mean_rets.append(np.mean(eval_returns))\n min_rets.append(np.min(eval_returns))\n max_rets.append(np.max(eval_returns))\n\n\n total_num_of_evals += len(eval_returns)\n total_num_timesteps += cma_args.eval_num_timesteps * len(eval_returns)\n\n logger.log(f\"current eval returns: {str(eval_returns)}\")\n logger.log(f\"total timesteps so far: {total_num_timesteps}\")\n negative_eval_returns = [-r for r in eval_returns]\n\n es.tell(solutions, negative_eval_returns)\n es.logger.add() # write data to disc to be plotted\n es.disp()\n\n toc = time.time()\n logger.log(f\"####################################CMA took {toc-tic} seconds\")\n\n es_logger = es.logger\n\n if not hasattr(es_logger, 'xmean'):\n es_logger.load()\n\n\n n_comp_used = first_n_pcs.shape[0]\n optimization_path_mean = np.vstack((starting_coord, es_logger.xmean[:,5:5+n_comp_used]))\n\n return mean_rets, min_rets, max_rets, np.array(optimization_path), np.array(optimization_path_mean)\n\n\ndef main():\n\n\n import sys\n logger.log(sys.argv)\n common_arg_parser = get_common_parser()\n cma_args, cma_unknown_args = common_arg_parser.parse_known_args()\n\n origin = \"mean_param\"\n\n\n this_run_dir = get_dir_path_for_this_run(cma_args)\n\n traj_params_dir_name = get_full_params_dir(this_run_dir)\n intermediate_data_dir = get_intermediate_data_dir(this_run_dir)\n save_dir = get_save_dir( this_run_dir)\n\n\n if not os.path.exists(intermediate_data_dir):\n os.makedirs(intermediate_data_dir)\n\n cma_run_num, cma_intermediate_data_dir = generate_run_dir(get_cma_returns_dirname, intermediate_dir=intermediate_data_dir, n_comp=cma_args.n_comp_to_use)\n '''\n ==========================================================================================\n get the pc vectors\n ==========================================================================================\n '''\n\n logger.log(\"grab final params\")\n final_file = get_full_param_traj_file_path(traj_params_dir_name, \"final\")\n final_param = pd.read_csv(final_file, header=None).values[0]\n\n\n final_pca = IncrementalPCA(n_components=2) # for sparse PCA to speed up\n\n theta_file = get_full_param_traj_file_path(traj_params_dir_name, 0)\n concat_df = pd.read_csv(theta_file, header=None, chunksize=10000)\n\n\n tic = time.time()\n for chunk in concat_df:\n logger.log(f\"currnet at : {concat_df._currow}\")\n\n if chunk.shape[0] < 2:\n logger.log(f\"last column too few: {chunk.shape[0]}\")\n continue\n final_pca.partial_fit(chunk.values)\n\n toc = time.time()\n logger.log('\\nElapsed time computing the chunked PCA {:.2f} s\\n'\n .format(toc - tic))\n\n logger.log(final_pca.explained_variance_ratio_)\n\n pcs_components = final_pca.components_\n\n first_2_pcs = pcs_components[:2]\n mean_param = final_pca.mean_\n\n origin_param = mean_param\n\n\n theta_file = get_full_param_traj_file_path(traj_params_dir_name, 0)\n concat_df = pd.read_csv(theta_file, header=None, chunksize=10000)\n\n proj_coords = do_proj_on_first_n_IPCA(concat_df, first_2_pcs, origin_param)\n\n\n '''\n ==========================================================================================\n eval all xy coords\n ==========================================================================================\n '''\n\n\n from stable_baselines.low_dim_analysis.common import plot_contour_trajectory, gen_subspace_coords,do_eval_returns, \\\n get_allinone_concat_df, do_proj_on_first_n\n\n from stable_baselines.ppo2.run_mujoco import eval_return\n\n last_proj_coord = do_proj_on_first_n(final_param, first_2_pcs, origin_param)\n starting_coord = last_proj_coord\n\n tic = time.time()\n\n #TODO better starting locations, record how many samples,\n\n logger.log(f\"CMAES STARTING :{starting_coord}\")\n es = cma.CMAEvolutionStrategy(starting_coord, 5)\n total_num_of_evals = 0\n total_num_timesteps = 0\n\n\n mean_rets = []\n min_rets = []\n max_rets = []\n eval_returns = None\n\n optimization_path = []\n while total_num_timesteps < cma_args.cma_num_timesteps and not es.stop():\n solutions = es.ask()\n optimization_path.extend(solutions)\n thetas = [np.matmul(coord, first_2_pcs) + origin_param for coord in solutions]\n logger.log(f\"current time steps num: {total_num_timesteps} total time steps: {cma_args.cma_num_timesteps}\")\n eval_returns = Parallel(n_jobs=cma_args.cores_to_use) \\\n (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for\n (i, theta) in enumerate(thetas))\n\n\n mean_rets.append(np.mean(eval_returns))\n min_rets.append(np.min(eval_returns))\n max_rets.append(np.max(eval_returns))\n\n\n total_num_of_evals += len(eval_returns)\n total_num_timesteps += cma_args.eval_num_timesteps * len(eval_returns)\n\n logger.log(f\"current eval returns: {str(eval_returns)}\")\n logger.log(f\"total timesteps so far: {total_num_timesteps}\")\n negative_eval_returns = [-r for r in eval_returns]\n\n es.tell(solutions, negative_eval_returns)\n es.logger.add() # write data to disc to be plotted\n es.disp()\n\n toc = time.time()\n logger.log(f\"####################################CMA took {toc-tic} seconds\")\n\n es_logger = es.logger\n\n if not hasattr(es_logger, 'xmean'):\n es_logger.load()\n\n\n n_comp_used = first_2_pcs.shape[0]\n optimization_path_mean = np.vstack((starting_coord, es_logger.xmean[:,5:5+n_comp_used]))\n\n\n dump_rows_write_csv(cma_intermediate_data_dir, optimization_path_mean, \"opt_mean_path\")\n\n\n\n plot_dir = get_plot_dir(cma_args)\n cma_plot_dir = get_cma_plot_dir(plot_dir, cma_args.n_comp_to_use, cma_run_num, origin=origin)\n if not os.path.exists(cma_plot_dir):\n os.makedirs(cma_plot_dir)\n\n ret_plot_name = f\"cma return on {cma_args.n_comp_to_use} dim space of real pca plane, \" \\\n f\"explained {np.sum(final_pca.explained_variance_ratio_[:2])}\"\n plot_cma_returns(cma_plot_dir, ret_plot_name, mean_rets, min_rets, max_rets, show=False)\n\n\n\n\n assert proj_coords.shape[1] == 2\n\n xcoordinates_to_eval, ycoordinates_to_eval = gen_subspace_coords(cma_args, np.vstack((proj_coords, optimization_path_mean)).T)\n\n from stable_baselines.ppo2.run_mujoco import eval_return\n thetas_to_eval = [origin_param + x * first_2_pcs[0] + y * first_2_pcs[1] for y in ycoordinates_to_eval for x in\n xcoordinates_to_eval]\n\n tic = time.time()\n\n eval_returns = Parallel(n_jobs=-1, max_nbytes='100M') \\\n (delayed(eval_return)(cma_args, save_dir, theta, cma_args.eval_num_timesteps, i) for (i, theta) in\n enumerate(thetas_to_eval))\n toc = time.time()\n logger.log(f\"####################################1st version took {toc-tic} seconds\")\n\n plot_contour_trajectory(cma_plot_dir, f\"cma redo___{origin}_origin_eval_return_contour_plot\", xcoordinates_to_eval,\n ycoordinates_to_eval, eval_returns, proj_coords[:, 0], proj_coords[:, 1],\n final_pca.explained_variance_ratio_,\n num_levels=25, show=False, sub_alg_path=optimization_path_mean.T)\n\n\n\n opt_mean_path_in_old_basis = [mean_projected_param.dot(first_2_pcs) + mean_param for mean_projected_param in optimization_path_mean]\n distance_to_final = [LA.norm(opt_mean - final_param, ord=2) for opt_mean in opt_mean_path_in_old_basis]\n distance_to_final_plot_name = f\"cma redo distance_to_final over generations \"\n plot_2d(cma_plot_dir, distance_to_final_plot_name, np.arange(len(distance_to_final)), distance_to_final, \"num generation\", \"distance_to_final\", False)\n\n # plot_3d_trajectory(cma_plot_dir, \"end_point_origin_eval_return_3d_plot\", xcoordinates_to_eval, ycoordinates_to_eval,\n # eval_returns, proj_xcoord, proj_ycoord,\n # result[\"explained_variance_ratio\"][:2],\n # num_levels=15, show=False)\n\n\n\nif __name__ == '__main__':\n\n main()\n\n#TODO Give filenames more info to identify which hyperparameter is the data for\n\n",
"import numpy as np\n\n\nlin_reg_1_M = np.load(f\"lin_reg_1_M.txt\")\nlin_reg_2_M = np.load(f\"lin_reg_2_M.txt\")\n\nbest_lin_reg_1_M = np.load(f\"best_lin_reg_1_M.txt\")\nbest_lin_reg_2_M = np.load(f\"best_lin_reg_2_M.txt\")\nlin_reg_1_COM = np.load(f\"lin_reg_1_COM.txt\")\nlin_reg_2_COM = np.load(f\"lin_reg_2_COM.txt\")\n\nbest_lin_reg_1_COM = np.load(f\"best_lin_reg_1_COM.txt\")\nbest_lin_reg_2_COM = np.load(f\"best_lin_reg_2_COM.txt\")\npass",
"import numpy as np\ndef comp_dict(a,b):\n if a is None and b is None:\n return True\n if a.keys() != b.keys():\n return False\n keys = a.keys()\n\n result = []\n for key in keys:\n re = (np.array(a[key]) == np.array(b[key])).all()\n\n result.append(re)\n return all(result)\n"
] | [
[
"pandas.read_csv",
"sklearn.decomposition.IncrementalPCA",
"numpy.min",
"numpy.matmul",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"numpy.max",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.vstack",
"matplotlib.pyplot.ylabel"
],
[
"numpy.load"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
larsbarring/icclim | [
"f3685c77a1a3aaff58b0d05609380c9387e9aa99"
] | [
"icclim/user_indices/stat.py"
] | [
"from typing import Sequence\n\nimport numpy as np\nimport xarray\nfrom xarray import DataArray\nfrom xclim.indices.run_length import rle_1d\n\n\ndef get_longest_run_start_index(\n arr: DataArray,\n window: int = 1,\n dim: str = \"time\",\n) -> DataArray:\n return xarray.apply_ufunc(\n get_index_of_longest_run,\n arr,\n input_core_dims=[[dim]],\n kwargs={\"window\": window},\n vectorize=True,\n dask=\"parallelized\",\n output_dtypes=[float],\n )\n\n\ndef get_index_of_longest_run(arr: Sequence[bool], window: int = 1) -> int:\n values, rl, pos = rle_1d(arr)\n if not np.any(values) or np.all(values * rl < window): # type:ignore\n return 0\n index_of_max = np.nanargmax(\n np.where(values * rl >= window, rl, np.NaN) # type:ignore\n )\n return pos[index_of_max] # type:ignore\n\n\ndef get_first_occurrence_index(da: DataArray) -> DataArray:\n \"\"\"\n Return the index of the first True value in the 3D booleans array along\n time dimension.\n \"\"\"\n stacked = da.stack(latlon=(\"lat\", \"lon\"))\n res = stacked.argmax(\"time\")\n return res.unstack()\n"
] | [
[
"numpy.all",
"numpy.where",
"numpy.any"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ikerlz/dcd | [
"056e5c4060f9d655ce4f6234b86481ae4b3f7106"
] | [
"DC_method/util.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nimport itertools\nimport findspark\nimport pyspark\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\nfrom pyspark.sql.types import *\nimport time\n\n\ndef simulate_sbm_dc_data(sbm_matrix, sample_size=1000, partition_num=10, cluster_num=3):\n \"\"\"\n :param sbm_matrix:\n :param sample_size:\n :param partition_num:\n :param cluster_num:\n :return:\n \"\"\"\n if (sbm_matrix.shape[0] != cluster_num) | \\\n (sbm_matrix.shape[1] != cluster_num) | \\\n (sbm_matrix.shape[0] != sbm_matrix.shape[1]):\n raise Exception(\"sbm_matrix shape Error or the Shape is not equal to Cluster_num\")\n else:\n data_index = [x for x in range(sample_size)]\n data_cluster = np.random.randint(0, cluster_num, sample_size).tolist()\n index_cluster = dict(zip(data_index, data_cluster))\n X = np.empty(shape=[0, 3], dtype=int)\n X = np.append(X, [[0, -1, np.random.randint(0, partition_num, 1)[0]]], axis=0)\n for i in range(1, sample_size):\n p_num = np.random.randint(0, partition_num, 1)[0]\n X = np.append(X, [[i, -1, p_num]], axis=0) # to avoid node lost\n for j in range(i):\n if np.random.binomial(1, sbm_matrix[index_cluster[i], index_cluster[j]], 1):\n X = np.append(X, [[i, j, p_num]], axis=0)\n data_pdf = pd.DataFrame(X, columns=[\"IndexNum1\"] + [\"IndexNum2\"] + [\"PartitionID\"])\n return data_pdf, index_cluster\n\n\ndef get_laplace_matrix(adjacency_matrix, position=\"master\", regularization=False):\n \"\"\"\n :param adjacency_matrix: 邻接矩阵(方阵或长矩阵)\n :param position: master或worker\n :param regularization: 是否进行正则化\n :return: 拉普拉斯矩阵\n \"\"\"\n if regularization:\n if position == \"master\":\n degree = np.sum(adjacency_matrix, axis=1)\n d = np.diag((degree + np.mean(degree)) ** (-0.5)) # 得到度矩阵\n return np.dot(np.dot(d, adjacency_matrix), d)\n\n elif position == \"worker\":\n\n # 2020.7.18 for test\n out_degree = np.sum(adjacency_matrix, axis=1)\n out_degree_matrix = np.diag((out_degree + np.mean(out_degree)) ** (-0.5))\n for i in range(out_degree_matrix.shape[0]):\n if out_degree_matrix[i, i] == np.infty:\n out_degree_matrix[i, i] = 1000\n in_degree = np.sum(adjacency_matrix, axis=0)\n in_degree_matrix = np.diag((in_degree + np.mean(in_degree)) ** (-0.5))\n ###\n laplace_matrix = np.dot(np.dot(out_degree_matrix, adjacency_matrix), in_degree_matrix)\n\n return laplace_matrix\n\n # D = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5))\n # F = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5))\n # return np.dot(np.dot(D, adjacency_matrix), F) # 得到度矩阵\n\n else:\n raise Exception(\"Input Error: worker or master is expected but {} are given\".format(position))\n else:\n if position == \"master\":\n d = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5)) # 得到度矩阵\n return np.dot(np.dot(d, adjacency_matrix), d)\n\n elif position == \"worker\":\n out_degree_matrix = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5))\n for i in range(out_degree_matrix.shape[0]):\n if out_degree_matrix[i, i] == np.infty:\n out_degree_matrix[i, i] = 10000\n in_degree_matrix = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5))\n laplace_matrix = np.dot(np.dot(out_degree_matrix, adjacency_matrix), in_degree_matrix)\n\n return laplace_matrix\n\n # D = np.diag(np.sum(adjacency_matrix, axis=1) ** (-0.5))\n # F = np.diag(np.sum(adjacency_matrix, axis=0) ** (-0.5))\n # return np.dot(np.dot(D, adjacency_matrix), F) # 得到度矩阵\n\n else:\n raise Exception(\"Input Error: worker or master is expected but {} are given\".format(position))\n\n\ndef get_spectral(laplace_matrix, k, normalization=False, method='svd'):\n \"\"\"\n :param laplace_matrix: 拉普拉斯矩阵\n :param k: 截取SVD后的前k个向量\n :param normalization: 是否归一化\n :param method: 选择用奇异值分解(SVD)还是特征值分解(EVD)\n :return: 得到的谱\n \"\"\"\n if method == 'svd':\n u, _, _ = np.linalg.svd(laplace_matrix)\n spectral = u[:, list(range(k))]\n if normalization:\n row_len = len(u) # 行数\n for i in range(row_len):\n norm2 = np.linalg.norm(spectral[i])\n if norm2:\n spectral[i] = spectral[i] / np.linalg.norm(spectral[i])\n elif method == 'evd':\n e_vals, e_vecs = np.linalg.eig(laplace_matrix)\n sorted_indices = np.argsort(e_vals)\n spectral = e_vecs[:, sorted_indices[:-k-1:-1]]\n if normalization:\n row_len = len(e_vecs) # 行数\n for i in range(row_len):\n norm2 = np.linalg.norm(spectral[i])\n if norm2:\n spectral[i] = spectral[i] / np.linalg.norm(spectral[i])\n else:\n raise ValueError(\"method must be 'svd' or 'evd' but {} is given\".format(method))\n\n return spectral\n\n\ndef worker_clustering(worker_df, cluster_num):\n \"\"\"\n :param worker_df:\n :param method:\n :param cluster_num:\n :return:\n \"\"\"\n node_list = list(set(worker_df[\"IndexNum1\"].tolist()))\n node_num = len(node_list)\n index_list = [x for x in range(node_num)]\n node2index = dict(zip(node_list, index_list))\n adj_matrix = np.zeros((node_num, node_num), dtype=int)\n for i in range(node_num):\n adj_matrix[i][i] = 10\n for row in worker_df.itertuples(index=False, name='Pandas'):\n item1 = getattr(row, \"IndexNum1\")\n item2 = getattr(row, \"IndexNum2\")\n if (item2 in node_list) & (item2 != -1):\n adj_matrix[node2index[item1]][node2index[item2]] = 1\n adj_matrix[node2index[item2]][node2index[item1]] = 1\n\n # first, get the laplace matrix\n laplace_matrix = get_laplace_matrix(adj_matrix,\n position='master',\n regularization=False)\n\n # second, get the spectral\n spectral = get_spectral(laplace_matrix, cluster_num, normalization=False, method='svd')\n\n # third, do k-means in spectral\n model = KMeans(n_clusters=cluster_num)\n model_fit = model.fit(spectral) # do k_means in spectral_transpose\n # cluster_center = model_fit.cluster_centers_ # center points\n cluster_label = list(model_fit.labels_) # labels (cluster information)\n # return\n worker_num = worker_df[\"PartitionID\"].tolist()[0]\n out_df = pd.DataFrame({\"PartitionID\": [worker_num for _ in range(len(node_list))],\n \"IndexNum\": node_list,\n \"ClusterExp\": cluster_label})\n return out_df\n\n\ndef get_accurate(clustering_res_df, cluster_number, error=False):\n \"\"\"\n :param clustering_res_df: a pandas DataFrame about clustering result\n :param cluster_number: the number of the cluster\n (the first column is the index,\n the second column is the right information,\n the third column is the clustering information)\n :param error: if error=True, then return the error rate, else, return the accuracy rate\n :return: the clustering accuracy\n \"\"\"\n if clustering_res_df.shape[1] != 3:\n raise Exception(\"Shape Error: the input DataFrame's column number is not 3\")\n real_dict = {}\n clustering_dict = {}\n for i in range(cluster_number):\n real_df = clustering_res_df.loc[clustering_res_df['ClusterInfo'] == i]\n clustering_df = clustering_res_df.loc[clustering_res_df['ClusterExp'] == i]\n real_dict[i] = real_df['IndexNum'].tolist()\n clustering_dict[i] = clustering_df['IndexNum'].tolist()\n\n accuracy_matrix = np.zeros((cluster_number, cluster_number))\n for i in range(cluster_number):\n for j in range(cluster_number):\n accuracy_matrix[i][j] = len(set(real_dict[i]).intersection(set(clustering_dict[j])))\n # for test\n # print(\"The accuracy matrix is: \\n\", accuracy_matrix)\n case_iterator = itertools.permutations(range(cluster_number), cluster_number)\n\n accurate = 0\n\n for item in case_iterator:\n acc = sum([accuracy_matrix[i][item[i]] for i in range(cluster_number)])\n if acc > accurate:\n accurate = acc\n if not error:\n return accurate / clustering_res_df.shape[0]\n else:\n return 1 - accurate / clustering_res_df.shape[0]\n\n\n\n\n# TODO some SBM matrix\n\n\nsbm_matrix1 = np.array([[0.7, 0.45, 0.45],\n [0.45, 0.7, 0.45],\n [0.45, 0.45, 0.7]])\nsbm_matrix2 = np.array([[0.8, 0.4, 0.4],\n [0.4, 0.8, 0.4],\n [0.4, 0.4, 0.8]])\nsbm_matrix3 = np.array([[0.6, 0.45, 0.45],\n [0.45, 0.6, 0.45],\n [0.45, 0.45, 0.6]])\nsbm_matrix4 = np.array([[0.2, 0.1, 0.1],\n [0.1, 0.2, 0.1],\n [0.1, 0.1, 0.2]])\n\n\n\nif __name__ == '__main__':\n # Model Settings\n sbm_matrix = sbm_matrix4\n sample_size = 1000\n master_num = 100\n worker_per_sub = 20\n partition_num = 50\n cluster_num = 3\n a, b = simulate_sbm_dc_data(sbm_matrix)\n c = worker_clustering(a, 3)\n real_label = []\n for row in c.itertuples(index=False, name='Pandas'):\n item = getattr(row, \"IndexNum\")\n real_label.append(b[item])\n c[\"ClusterInfo\"] = real_label\n print(get_accurate(c, 3))\n print(c)\n # print(a)\n"
] | [
[
"numpy.dot",
"numpy.linalg.svd",
"sklearn.cluster.KMeans",
"numpy.linalg.eig",
"numpy.linalg.norm",
"pandas.DataFrame",
"numpy.append",
"numpy.mean",
"numpy.random.binomial",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
denix56/fcdd | [
"d110aa8b141dc13f47156da913a6b4f9d64ddc74"
] | [
"python/fcdd/datasets/outlier_exposure/emnist.py"
] | [
"import os.path as pt\n\nimport numpy as np\nimport torchvision.transforms as transforms\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import EMNIST\n\n\ndef ceil(x: float):\n return int(np.ceil(x))\n\n\nclass MyEMNIST(EMNIST):\n \"\"\" Reimplements get_item to transform tensor input to pil image before applying transformation. \"\"\"\n def __getitem__(self, index):\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = transforms.ToPILImage()(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n\nclass OEEMNIST(EMNIST):\n def __init__(self, size: torch.Size, root: str = None, split='letters', limit_var=20): # split = Train\n \"\"\"\n Outlier Exposure dataset for EMNIST.\n :param size: size of the samples in n x c x h x w, samples will be resized to h x w. If n is larger than the\n number of samples available in EMNIST, dataset will be enlarged by repetitions to fit n.\n This is important as exactly n images are extracted per iteration of the data_loader.\n For online supervision n should be set to 1 because only one sample is extracted at a time.\n :param root: root directory where data is found or is to be downloaded to.\n :param split: The dataset has 6 different splits: ``byclass``, ``bymerge``,\n ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies\n which one to use.\n :param limit_var: limits the number of different samples, i.e. randomly chooses limit_var many samples\n from all available ones to be the training data.\n \"\"\"\n assert len(size) == 3 and size[1] == size[2]\n root = pt.join(root, 'emnist', )\n transform = transforms.Compose([\n transforms.Resize((size[1], size[2])),\n transforms.ToTensor()\n ])\n super().__init__(root, split, transform=transform, download=True)\n self.size = size\n self.data = self.data.transpose(1, 2)\n self.idx_to_class = {v: k for k, v in self.class_to_idx.items()}\n if limit_var is not None and limit_var < len(self):\n picks = np.random.choice(np.arange(self.data.size(0)), size=limit_var, replace=False)\n self.data = self.data[picks]\n self.targets = self.targets[picks]\n if limit_var is not None and limit_var > len(self):\n print(\n 'OEEMNIST shall be limited to {} samples, but Cifar100 contains only {} samples, thus using all.'\n .format(limit_var, len(self))\n )\n if len(self) < size[0]:\n rep = ceil(size[0] / len(self))\n old = len(self)\n self.data = self.data.repeat(rep, 1, 1)\n self.targets = self.targets.repeat(rep)\n if rep != size[0] / old:\n import warnings\n warnings.warn(\n 'OEEMNIST has been limited to {} samples. '\n 'Due to the requested size of {}, the dataset will be enlarged. ' \n 'But {} repetitions will make some samples appear more often than others in the dataset, '\n 'because the final size after repetitions is {}, which is cut to {}'\n .format(limit_var, size[0], rep, len(self), size[0])\n )\n\n def data_loader(self):\n return DataLoader(dataset=self, batch_size=self.size[0], shuffle=True, num_workers=0)\n\n def __getitem__(self, index):\n sample, target = super().__getitem__(index)\n sample = sample.squeeze().mul(255).byte()\n\n return sample\n\n"
] | [
[
"numpy.ceil",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ava6969/rgb_stacking_extend | [
"a36f1e35aa796e77201321161056e174966e7707",
"a36f1e35aa796e77201321161056e174966e7707"
] | [
"rgb_stacking/contrib/common.py",
"rgb_stacking/utils/pose_estimator/models/detr.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom rgb_stacking.utils.utils import init\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Sum(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n return torch.sum(x, self.dim)\n\n\nclass Mean(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n return torch.mean(x, self.dim)\n\n\ndef init_rec(rec):\n for name, param in rec.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n elif 'weight' in name:\n nn.init.orthogonal_(param)\n return rec\n\n\ndef init_(m):\n return init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n\n\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom rgb_stacking.utils.pose_estimator.util.misc import (NestedTensor, nested_tensor_from_tensor_list)\n\n\nclass DETR(nn.Module):\n \"\"\" This is the DETR module that performs object detection \"\"\"\n def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n DETR can detect in a single image. For COCO, we recommend 100 queries.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n \"\"\"\n super().__init__()\n self.num_queries = num_queries\n self.transformer = transformer\n hidden_dim = transformer.d_model\n # self.class_embed = nn.Linear(hidden_dim, num_classes + 1)\n # self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.pose_embed = nn.Linear(hidden_dim, num_classes)\n self.query_embed = nn.Embedding(num_queries, hidden_dim)\n self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)\n self.backbone = backbone\n self.aux_loss = aux_loss\n\n def forward(self, samples: NestedTensor):\n \"\"\" The forward expects a NestedTensor, which consists of:\n - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - \"pred_logits\": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.backbone(samples)\n\n src, mask = features[-1].decompose()\n assert mask is not None\n hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]\n\n return self.pose_embed(hs)\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_logits': a, 'pred_boxes': b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
] | [
[
"torch.mean",
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.sum",
"torch.nn.init.orthogonal_"
],
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.Embedding"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SunsetWolf/qlib | [
"89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5"
] | [
"qlib/contrib/data/highfreq_processor.py"
] | [
"import os\n\nimport numpy as np\nimport pandas as pd\nfrom qlib.data.dataset.processor import Processor\nfrom qlib.data.dataset.utils import fetch_df_by_index\nfrom typing import Dict\n\n\nclass HighFreqTrans(Processor):\n def __init__(self, dtype: str = \"bool\"):\n self.dtype = dtype\n\n def fit(self, df_features):\n pass\n\n def __call__(self, df_features):\n if self.dtype == \"bool\":\n return df_features.astype(np.int8)\n else:\n return df_features.astype(np.float32)\n\n\nclass HighFreqNorm(Processor):\n def __init__(\n self,\n fit_start_time: pd.Timestamp,\n fit_end_time: pd.Timestamp,\n feature_save_dir: str,\n norm_groups: Dict[str, int],\n ):\n\n self.fit_start_time = fit_start_time\n self.fit_end_time = fit_end_time\n self.feature_save_dir = feature_save_dir\n self.norm_groups = norm_groups\n\n def fit(self, df_features) -> None:\n if os.path.exists(self.feature_save_dir) and len(os.listdir(self.feature_save_dir)) != 0:\n return\n os.makedirs(self.feature_save_dir)\n fetch_df = fetch_df_by_index(df_features, slice(self.fit_start_time, self.fit_end_time), level=\"datetime\")\n del df_features\n index = 0\n names = {}\n for name, dim in self.norm_groups.items():\n names[name] = slice(index, index + dim)\n index += dim\n for name, name_val in names.items():\n df_values = fetch_df.iloc(axis=1)[name_val].values\n if name.endswith(\"volume\"):\n df_values = np.log1p(df_values)\n self.feature_mean = np.nanmean(df_values)\n np.save(self.feature_save_dir + name + \"_mean.npy\", self.feature_mean)\n df_values = df_values - self.feature_mean\n self.feature_std = np.nanstd(np.absolute(df_values))\n np.save(self.feature_save_dir + name + \"_std.npy\", self.feature_std)\n df_values = df_values / self.feature_std\n np.save(self.feature_save_dir + name + \"_vmax.npy\", np.nanmax(df_values))\n np.save(self.feature_save_dir + name + \"_vmin.npy\", np.nanmin(df_values))\n return\n\n def __call__(self, df_features):\n if \"date\" in df_features:\n df_features.droplevel(\"date\", inplace=True)\n df_values = df_features.values\n index = 0\n names = {}\n for name, dim in self.norm_groups.items():\n names[name] = slice(index, index + dim)\n index += dim\n for name, name_val in names.items():\n feature_mean = np.load(self.feature_save_dir + name + \"_mean.npy\")\n feature_std = np.load(self.feature_save_dir + name + \"_std.npy\")\n\n if name.endswith(\"volume\"):\n df_values[:, name_val] = np.log1p(df_values[:, name_val])\n df_values[:, name_val] -= feature_mean\n df_values[:, name_val] /= feature_std\n df_features = pd.DataFrame(data=df_values, index=df_features.index, columns=df_features.columns)\n return df_features.fillna(0)\n"
] | [
[
"numpy.nanmax",
"numpy.absolute",
"numpy.nanmin",
"numpy.save",
"pandas.DataFrame",
"numpy.nanmean",
"numpy.load",
"numpy.log1p"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hgKwak/SeriesSleepNet- | [
"1e90c3a0ed6244c2b876979194d7cd94056f5c8a"
] | [
"network/cnn.py"
] | [
"import torch\nimport torch.nn as nn\n\nuse_cuda = torch.cuda.is_available()\nclass CNNClassifier(nn.Module):\n def __init__(self, channel, SHHS=False):\n super(CNNClassifier, self).__init__()\n conv1 = nn.Conv2d(1, 10, (1, 200))\n pool1 = nn.MaxPool2d((1, 2))\n if channel == 1:\n conv2 = nn.Conv2d(10, 20, (1, 32))\n conv3 = nn.Conv2d(20, 30, (1, 128))\n conv4 = nn.Conv2d(30, 40, (1, 512))\n freq = 1\n else:\n conv2 = nn.Conv2d(10, 20, (2, 32))\n conv3 = nn.Conv2d(20, 30, (2, 128))\n conv4 = nn.Conv2d(30, 40, (2, 512))\n freq=channel-3\n pool2 = nn.MaxPool2d((1, 2))\n self.conv_module = nn.Sequential(conv1, nn.ReLU(), pool1, conv2, nn.ReLU(), conv3, nn.ReLU(), conv4, nn.ReLU(), pool2)\n\n if SHHS:\n fc1 = nn.Linear(freq * 40 * 553, 100)\n else:\n fc1 = nn.Linear(freq*40*365, 100)\n fc2 = nn.Linear(100, 5)\n\n self.fc_module = nn.Sequential(fc1, nn.ReLU(), fc2)\n\n if use_cuda:\n self.conv_module = self.conv_module.cuda()\n self.fc_module = self.fc_module.cuda()\n\n def forward(self, x, isfc):\n out = self.conv_module(x)\n dim = 1\n for d in out.size()[1:]:\n dim *= d\n if isfc:\n out = out.view(-1, dim)\n out = self.fc_module(out)\n else:\n out = out.permute(0, 3, 2, 1).reshape([-1, 200, 73])\n return out"
] | [
[
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.cuda.is_available",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WildflowerSchools/wf-cv-utils | [
"647a2a46e3d6e6e14a1f813d17064cb33a3ced92"
] | [
"cv_utils/core.py"
] | [
"import cv_datetime_utils\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nimport json\nimport os\n\ndef compose_transformations(\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2):\n rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3)\n translation_vector_1 = np.asarray(translation_vector_1).reshape(3)\n rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3)\n translation_vector_2 = np.asarray(translation_vector_2).reshape(3)\n rotation_vector_composed, translation_vector_composed = cv.composeRT(\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2)[:2]\n rotation_vector_composed = np.squeeze(rotation_vector_composed)\n translation_vector_composed = np.squeeze(translation_vector_composed)\n return rotation_vector_composed, translation_vector_composed\n\n\ndef invert_transformation(\n rotation_vector,\n translation_vector):\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n translation_vector = np.asarray(translation_vector).reshape(3)\n new_rotation_vector, new_translation_vector = compose_transformations(\n np.array([0.0, 0.0, 0.0]),\n -translation_vector,\n -rotation_vector,\n np.array([0.0, 0.0, 0.0]))\n new_rotation_vector = np.squeeze(new_rotation_vector)\n new_translation_vector = np.squeeze(new_translation_vector)\n return new_rotation_vector, new_translation_vector\n\ndef quaternion_vector_to_rotation_vector(quaternion_vector):\n quaternion_vector = np.asarray(quaternion_vector).reshape(4)\n spatial_vector = quaternion_vector[1:]\n qw = quaternion_vector[0]\n spatial_vector_length = np.linalg.norm(spatial_vector)\n unit_vector = spatial_vector/spatial_vector_length\n theta = 2*np.arctan2(spatial_vector_length, qw)\n rotation_vector = theta*unit_vector\n return rotation_vector\n\ndef quaternion_vector_to_rotation_matrix(quaternion_vector):\n quaternion_tuple = tuple(np.asarray(quaternion_vector).reshape(4))\n qw, qx, qy, qz = quaternion_tuple\n R = np.array([\n [qw**2 + qx**2 - qy**2 - qz**2, 2*(qx*qy - qw*qz), 2*(qw*qy + qx*qz)],\n [2*(qx*qy + qw*qz), qw**2 - qx**2 + qy**2 - qz**2, 2*(qy*qz - qw*qx)],\n [2*(qx*qz - qw*qy), 2*(qw*qx + qy*qz), qw**2 - qx**2 - qy**2 + qz**2]\n ])\n return R\n\ndef rotation_vector_to_rotation_matrix(rotation_vector):\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n rotation_matrix = cv.Rodrigues(rotation_vector)[0]\n return rotation_matrix\n\ndef transform_object_points(\n object_points,\n rotation_vector=np.array([0.0, 0.0, 0.0]),\n translation_vector=np.array([0.0, 0.0, 0.0])):\n object_points = np.asarray(object_points)\n rotation_vector = np.asarray(rotation_vector)\n translation_vector = np.asarray(translation_vector)\n if object_points.size == 0:\n return object_points\n object_points = object_points.reshape((-1, 3))\n rotation_vector = rotation_vector.reshape(3)\n translation_vector = translation_vector.reshape(3)\n transformed_points = np.add(\n np.matmul(\n cv.Rodrigues(rotation_vector)[0],\n object_points.T).T,\n translation_vector.reshape((1, 3)))\n transformed_points = np.squeeze(transformed_points)\n return transformed_points\n\n\ndef generate_camera_pose(\n camera_position=np.array([0.0, 0.0, 0.0]),\n yaw=0.0,\n pitch=0.0,\n roll=0.0):\n # yaw: 0.0 points north (along the positive y-axis), positive angles rotate counter-clockwise\n # pitch: 0.0 is level with the ground, positive angles rotate upward\n # roll: 0.0 is level with the ground, positive angles rotate clockwise\n # All angles in radians\n camera_position = np.asarray(camera_position).reshape(3)\n # First: Move the camera to the specified position\n rotation_vector_1 = np.array([0.0, 0.0, 0.0])\n translation_vector_1 = -camera_position\n # Second: Rotate the camera so when we lower to the specified inclination, it will point in the specified compass direction\n rotation_vector_2 = np.array([0.0, 0.0, -(yaw - np.pi / 2)])\n translation_vector_2 = np.array([0.0, 0.0, 0.0])\n # Third: Lower to the specified inclination\n rotation_vector_2_3 = np.array([(np.pi / 2 - pitch), 0.0, 0.0])\n translation_vector_2_3 = np.array([0.0, 0.0, 0.0])\n # Fourth: Roll the camera by the specified angle\n rotation_vector_2_3_4 = np.array([0.0, 0.0, -roll])\n translation_vector_2_3_4 = np.array([0.0, 0.0, 0.0])\n # Combine these four moves\n rotation_vector_1_2, translation_vector_1_2 = compose_transformations(\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2)\n rotation_vector_1_2_3, translation_vector_1_2_3 = compose_transformations(\n rotation_vector_1_2,\n translation_vector_1_2,\n rotation_vector_2_3,\n translation_vector_2_3)\n rotation_vector, translation_vector = compose_transformations(\n rotation_vector_1_2_3,\n translation_vector_1_2_3,\n rotation_vector_2_3_4,\n translation_vector_2_3_4)\n rotation_vector = np.squeeze(rotation_vector)\n translation_vector = np.squeeze(translation_vector)\n return rotation_vector, translation_vector\n\n\ndef extract_camera_position(\n rotation_vector,\n translation_vector):\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n translation_vector = np.asarray(translation_vector).reshape(3)\n new_rotation_vector, new_translation_vector = compose_transformations(\n rotation_vector,\n translation_vector,\n -rotation_vector,\n np.array([0.0, 0.0, 0.0]))\n camera_position = -np.squeeze(new_translation_vector)\n return camera_position\n\ndef extract_camera_position_rotation_matrix(rotation_matrix, translation_vector):\n rotation_matrix = np.asarray(rotation_matrix).reshape((3,3))\n translation_vector = np.asarray(translation_vector).reshape(3)\n position = np.matmul(rotation_matrix.T, -translation_vector.T)\n return position\n\ndef extract_camera_direction(\n rotation_vector,\n translation_vector):\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n translation_vector = np.asarray(translation_vector).reshape(3)\n camera_direction = np.matmul(\n cv.Rodrigues(-rotation_vector)[0],\n np.array([[0.0], [0.0], [1.0]]))\n camera_direction = np.squeeze(camera_direction)\n return camera_direction\n\n\ndef reconstruct_z_rotation(x, y):\n if x >= 0.0 and y >= 0.0:\n return np.arctan(y / x)\n if x >= 0.0 and y < 0.0:\n return np.arctan(y / x) + 2 * np.pi\n return np.arctan(y / x) + np.pi\n\n\n# Currently unused; needs to be fixed up for cases in which x and/or y are close\n# to zero\ndef extract_yaw_from_camera_direction(\n camera_direction):\n camera_direction = np.asarray(camera_direction).reshape(3)\n yaw = reconstruct_z_rotation(\n camera_direction[0],\n camera_direction[1])\n return yaw\n\n\ndef generate_camera_matrix(\n focal_length,\n principal_point):\n focal_length = np.asarray(focal_length).reshape(2)\n principal_point = np.asarray(principal_point).reshape(2)\n camera_matrix = np.array([\n [focal_length[0], 0, principal_point[0]],\n [0, focal_length[1], principal_point[1]],\n [0, 0, 1.0]])\n return camera_matrix\n\n\ndef generate_projection_matrix(\n camera_matrix,\n rotation_vector,\n translation_vector):\n camera_matrix = np.asarray(camera_matrix).reshape((3, 3))\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n translation_vector = np.asarray(translation_vector).reshape(3)\n projection_matrix = np.matmul(\n camera_matrix,\n np.concatenate((\n cv.Rodrigues(rotation_vector)[0],\n translation_vector.reshape((3, 1))),\n axis=1))\n return(projection_matrix)\n\ndef ground_grid_camera_view(\n image_width,\n image_height,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),\n fill_image=False,\n step=0.1\n):\n grid_corners = ground_rectangle_camera_view(\n image_width=image_width,\n image_height=image_height,\n rotation_vector=rotation_vector,\n translation_vector=translation_vector,\n camera_matrix=camera_matrix,\n distortion_coefficients=distortion_coefficients,\n fill_image=fill_image\n )\n grid_points = generate_ground_grid(\n grid_corners=grid_corners,\n step=step\n )\n return grid_points\n\ndef ground_rectangle_camera_view(\n image_width,\n image_height,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]),\n fill_image=False\n):\n image_points = np.array([\n [0.0, 0.0],\n [image_width, 0.0],\n [image_width, image_height],\n [0.0, image_height]\n ])\n ground_points=np.empty((4, 3))\n for i in range(4):\n ground_points[i] = ground_point(\n image_point=image_points[i],\n rotation_vector=rotation_vector,\n translation_vector=translation_vector,\n camera_matrix=camera_matrix,\n distortion_coefficients=distortion_coefficients\n )\n x_values_sorted = np.sort(ground_points[:, 0])\n y_values_sorted = np.sort(ground_points[:, 1])\n if fill_image:\n x_min = x_values_sorted[0]\n x_max = x_values_sorted[3]\n y_min = y_values_sorted[0]\n y_max = y_values_sorted[3]\n else:\n x_min = x_values_sorted[1]\n x_max = x_values_sorted[2]\n y_min = y_values_sorted[1]\n y_max = y_values_sorted[2]\n return np.array([\n [x_min, y_min],\n [x_max, y_max]\n ])\n\ndef ground_point(\n image_point,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0])\n):\n image_point = np.asarray(image_point)\n rotation_vector = np.asarray(rotation_vector)\n translation_vector = np.asarray(translation_vector)\n camera_matrix = np.asarray(camera_matrix)\n distortion_coefficients = np.asarray(distortion_coefficients)\n image_point = image_point.reshape((2))\n rotation_vector = rotation_vector.reshape(3)\n translation_vector = translation_vector.reshape(3)\n camera_matrix = camera_matrix.reshape((3, 3))\n image_point_undistorted = cv.undistortPoints(\n image_point,\n camera_matrix,\n distortion_coefficients,\n P=camera_matrix\n )\n image_point_undistorted = np.squeeze(image_point_undistorted)\n camera_position = np.matmul(\n cv.Rodrigues(-rotation_vector)[0],\n -translation_vector.T\n ).T\n camera_point_homogeneous = np.matmul(\n np.linalg.inv(camera_matrix),\n np.array([image_point_undistorted[0], image_point_undistorted[1], 1.0]).T\n ).T\n camera_direction = np.matmul(\n cv.Rodrigues(-rotation_vector)[0],\n camera_point_homogeneous.T\n ).T\n theta = -camera_position[2]/camera_direction[2]\n ground_point = camera_position + theta*camera_direction\n return ground_point\n\ndef generate_ground_grid(\n grid_corners,\n step=0.1\n):\n x_grid, y_grid = np.meshgrid(\n np.arange(grid_corners[0, 0], grid_corners[1, 0], step=step),\n np.arange(grid_corners[0, 1], grid_corners[1, 1], step=step)\n )\n grid = np.stack((x_grid, y_grid, np.full_like(x_grid, 0.0)), axis=-1)\n points = grid.reshape((-1, 3))\n return points\n\ndef project_points(\n object_points,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients,\n remove_behind_camera=False,\n remove_outside_frame=False,\n image_corners=None\n):\n object_points = np.asarray(object_points).reshape((-1, 3))\n rotation_vector = np.asarray(rotation_vector).reshape(3)\n translation_vector = np.asarray(translation_vector).reshape(3)\n camera_matrix = np.asarray(camera_matrix).reshape((3, 3))\n distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))\n if object_points.size == 0:\n return np.zeros((0, 2))\n image_points = cv.projectPoints(\n object_points,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients\n )[0]\n if remove_behind_camera:\n behind_camera_boolean = behind_camera(\n object_points,\n rotation_vector,\n translation_vector\n )\n image_points[behind_camera_boolean] = np.array([np.nan, np.nan])\n if remove_outside_frame:\n outside_frame_boolean = outside_frame(\n object_points,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients,\n image_corners\n )\n image_points[outside_frame_boolean] = np.array([np.nan, np.nan])\n image_points = np.squeeze(image_points)\n return image_points\n\ndef behind_camera(\n object_points,\n rotation_vector,\n translation_vector):\n object_points = np.asarray(object_points)\n rotation_vector = np.asarray(rotation_vector)\n translation_vector = np.asarray(translation_vector)\n if object_points.size == 0:\n return np.zeros((0, 2))\n object_points = object_points.reshape((-1, 3))\n rotation_vector = rotation_vector.reshape(3)\n translation_vector = translation_vector.reshape(3)\n object_points_transformed = transform_object_points(\n object_points,\n rotation_vector,\n translation_vector\n )\n behind_camera_boolean = (object_points_transformed <= 0)[..., 2]\n return behind_camera_boolean\n\ndef outside_frame(\n object_points,\n rotation_vector,\n translation_vector,\n camera_matrix,\n distortion_coefficients,\n image_corners\n):\n object_points = np.asarray(object_points).reshape((-1, 3))\n rotation_vector = np.asarray(rotation_vector)\n translation_vector = np.asarray(translation_vector).reshape(3)\n camera_matrix = np.asarray(camera_matrix).reshape((3,3))\n distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients))\n image_corners = np.asarray(image_corners).reshape((2,2))\n if object_points.size == 0:\n return np.zeros((0, 2))\n image_points = cv.projectPoints(\n object_points,\n rotation_vector,\n translation_vector,\n camera_matrix,\n np.array([0.0, 0.0, 0.0, 0.0])\n )[0]\n image_points = image_points.reshape((-1, 2))\n outside_frame_boolean = (\n (image_points[:, 0] < image_corners[0, 0]) |\n (image_points[:, 0] > image_corners[1, 0]) |\n (image_points[:, 1] < image_corners[0, 1]) |\n (image_points[:, 1] > image_corners[1, 1])\n )\n return outside_frame_boolean\n\n\ndef undistort_points(\n image_points,\n camera_matrix,\n distortion_coefficients):\n image_points = np.asarray(image_points)\n camera_matrix = np.asarray(camera_matrix)\n distortion_coefficients = np.asarray(distortion_coefficients)\n if image_points.size == 0:\n return image_points\n image_points = image_points.reshape((-1, 1, 2))\n camera_matrix = camera_matrix.reshape((3, 3))\n undistorted_points = cv.undistortPoints(\n image_points,\n camera_matrix,\n distortion_coefficients,\n P=camera_matrix)\n undistorted_points = np.squeeze(undistorted_points)\n return undistorted_points\n\n\ndef estimate_camera_pose_from_image_points(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1=np.array([0.0, 0.0, 0.0]),\n translation_vector_1=np.array([0.0, 0.0, 0.0]),\n distance_between_cameras=1.0):\n image_points_1 = np.asarray(image_points_1)\n image_points_2 = np.asarray(image_points_2)\n camera_matrix = np.asarray(camera_matrix)\n rotation_vector_1 = np.asarray(rotation_vector_1)\n translation_vector_1 = np.asarray(translation_vector_1)\n if image_points_1.size == 0 or image_points_2.size == 0:\n raise ValueError('One or both sets of image points appear to be empty')\n image_points_1 = image_points_1.reshape((-1, 2))\n image_points_2 = image_points_2.reshape((-1, 2))\n if image_points_1.shape != image_points_2.shape:\n raise ValueError('Sets of image points do not appear to be the same shape')\n camera_matrix = camera_matrix.reshape((3, 3))\n rotation_vector_1 = rotation_vector_1.reshape(3)\n translation_vector_1 = translation_vector_1.reshape(3)\n essential_matrix, mask = cv.findEssentialMat(\n image_points_1,\n image_points_2,\n camera_matrix)\n relative_rotation_matrix, relative_translation_vector = cv.recoverPose(\n essential_matrix,\n image_points_1,\n image_points_2,\n camera_matrix,\n mask=mask)[1:3]\n relative_rotation_vector = cv.Rodrigues(relative_rotation_matrix)[0]\n relative_translation_vector = relative_translation_vector * distance_between_cameras\n rotation_vector_2, translation_vector_2 = compose_transformations(\n rotation_vector_1,\n translation_vector_1,\n relative_rotation_vector,\n relative_translation_vector)\n rotation_vector_2 = np.squeeze(rotation_vector_2)\n translation_vector_2 = np.squeeze(translation_vector_2)\n return rotation_vector_2, translation_vector_2\n\n\ndef reconstruct_object_points_from_camera_poses(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2):\n image_points_1 = np.asarray(image_points_1)\n image_points_2 = np.asarray(image_points_2)\n camera_matrix = np.asarray(camera_matrix)\n rotation_vector_1 = np.asarray(rotation_vector_1)\n translation_vector_1 = np.asarray(translation_vector_1)\n rotation_vector_2 = np.asarray(rotation_vector_2)\n translation_vector_2 = np.asarray(translation_vector_2)\n if image_points_1.size == 0 or image_points_2.size == 0:\n return np.zeros((0, 3))\n image_points_1 = image_points_1.reshape((-1, 2))\n image_points_2 = image_points_2.reshape((-1, 2))\n if image_points_1.shape != image_points_2.shape:\n raise ValueError('Sets of image points do not appear to be the same shape')\n camera_matrix = camera_matrix.reshape((3, 3))\n rotation_vector_1 = rotation_vector_1.reshape(3)\n translation_vector_1 = translation_vector_1.reshape(3)\n rotation_vector_2 = rotation_vector_2.reshape(3)\n translation_vector_2 = translation_vector_2.reshape(3)\n projection_matrix_1 = generate_projection_matrix(\n camera_matrix,\n rotation_vector_1,\n translation_vector_1)\n projection_matrix_2 = generate_projection_matrix(\n camera_matrix,\n rotation_vector_2,\n translation_vector_2)\n object_points_homogeneous = cv.triangulatePoints(\n projection_matrix_1,\n projection_matrix_2,\n image_points_1.T,\n image_points_2.T)\n object_points = cv.convertPointsFromHomogeneous(\n object_points_homogeneous.T)\n object_points = np.squeeze(object_points)\n return object_points\n\n\ndef reconstruct_object_points_from_relative_camera_pose(\n image_points_1,\n image_points_2,\n camera_matrix,\n relative_rotation_vector,\n relative_translation_vector,\n rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),\n translation_vector_1=np.array([[0.0], [0.0], [0.0]]),\n distance_between_cameras=1.0):\n image_points_1 = np.asarray(image_points_1)\n image_points_2 = np.asarray(image_points_2)\n camera_matrix = np.asarray(camera_matrix)\n relative_rotation_vector = np.asarray(relative_rotation_vector)\n relative_translation_vector = np.asarray(relative_translation_vector)\n rotation_vector_1 = np.asarray(rotation_vector_1)\n translation_vector_1 = np.asarray(translation_vector_1)\n if image_points_1.size == 0 or image_points_2.size == 0:\n return np.zeros((0, 3))\n image_points_1 = image_points_1.reshape((-1, 2))\n image_points_2 = image_points_2.reshape((-1, 2))\n if image_points_1.shape != image_points_2.shape:\n raise ValueError('Sets of image points do not appear to be the same shape')\n camera_matrix = camera_matrix.reshape((3, 3))\n relative_rotation_vector = relative_rotation_vector.reshape(3)\n relative_translation_vector = relative_translation_vector.reshape(3)\n rotation_vector_1 = rotation_vector_1.reshape(3)\n translation_vector_1 = translation_vector_1.reshape(3)\n rotation_vector_2, translation_vector_2 = cv.composeRT(\n rotation_vector_1,\n translation_vector_1,\n relative_rotation_vector,\n relative_translation_vector * distance_between_cameras)[:2]\n object_points = reconstruct_object_points_from_camera_poses(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2)\n return object_points\n\n\ndef reconstruct_object_points_from_image_points(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),\n translation_vector_1=np.array([[0.0], [0.0], [0.0]]),\n distance_between_cameras=1.0):\n image_points_1 = np.asarray(image_points_1)\n image_points_2 = np.asarray(image_points_2)\n camera_matrix = np.asarray(camera_matrix)\n rotation_vector_1 = np.asarray(rotation_vector_1)\n translation_vector_1 = np.asarray(translation_vector_1)\n if image_points_1.size == 0 or image_points_2.size == 0:\n return np.zeros((0, 3))\n image_points_1 = image_points_1.reshape((-1, 2))\n image_points_2 = image_points_2.reshape((-1, 2))\n if image_points_1.shape != image_points_2.shape:\n raise ValueError('Sets of image points do not appear to be the same shape')\n camera_matrix = camera_matrix.reshape((3, 3))\n rotation_vector_1 = rotation_vector_1.reshape(3)\n translation_vector_1 = translation_vector_1.reshape(3)\n rotation_vector_2, translation_vector_2 = estimate_camera_pose_from_image_points(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1,\n translation_vector_1,\n distance_between_cameras)\n object_points = reconstruct_object_points_from_camera_poses(\n image_points_1,\n image_points_2,\n camera_matrix,\n rotation_vector_1,\n translation_vector_1,\n rotation_vector_2,\n translation_vector_2)\n return object_points\n\n\ndef estimate_camera_pose_from_plane_object_points(\n input_object_points,\n height,\n origin_index,\n x_axis_index,\n y_reference_point,\n y_reference_point_sign,\n distance_calibration_indices,\n calibration_distance):\n input_object_points = np.asarray(input_object_points)\n if input_object_points.size == 0:\n raise ValueError('Obect point array appears to be empty')\n input_object_points = input_object_points.reshape((-1, 3))\n\n scale_factor = np.divide(\n calibration_distance,\n np.linalg.norm(\n np.subtract(\n input_object_points[distance_calibration_indices[0]],\n input_object_points[distance_calibration_indices[1]])))\n\n object_points_1 = np.multiply(\n input_object_points,\n scale_factor)\n\n def objective_function(parameters):\n rotation_x = parameters[0]\n rotation_y = parameters[1]\n translation_z = parameters[2]\n object_points_transformed = transform_object_points(\n object_points_1,\n np.array([rotation_x, rotation_y, 0.0]),\n np.array([0.0, 0.0, translation_z]))\n return np.sum(np.square(object_points_transformed[:, 2] - height))\n\n optimization_solution = scipy.optimize.minimize(\n objective_function,\n np.array([0.0, 0.0, 0.0]))\n\n rotation_x_a = optimization_solution['x'][0]\n rotation_y_a = optimization_solution['x'][1]\n translation_z_a = optimization_solution['x'][2]\n\n rotation_x_rotation_y_a_norm = np.linalg.norm([rotation_x_a, rotation_y_a])\n\n rotation_x_b = rotation_x_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)\n rotation_y_b = rotation_y_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm)\n translation_z_b = - translation_z_a\n\n rotation_vector_2_a = np.array([rotation_x_a, rotation_y_a, 0.0])\n translation_vector_2_a = np.array([0.0, 0.0, translation_z_a])\n object_points_2_a = transform_object_points(\n object_points_1,\n rotation_vector_2_a,\n translation_vector_2_a)\n\n rotation_vector_2_b = np.array([rotation_x_b, rotation_y_b, 0.0])\n translation_vector_2_b = np.array([0.0, 0.0, translation_z_b])\n object_points_2_b = transform_object_points(\n object_points_1,\n rotation_vector_2_b,\n translation_vector_2_b)\n\n sign_a = np.sign(\n np.cross(\n np.subtract(\n object_points_2_a[x_axis_index],\n object_points_2_a[origin_index]),\n np.subtract(\n object_points_2_a[y_reference_point],\n object_points_2_a[origin_index]))[2])\n\n sign_b = np.sign(\n np.cross(\n np.subtract(\n object_points_2_b[x_axis_index],\n object_points_2_b[origin_index]),\n np.subtract(\n object_points_2_b[y_reference_point],\n object_points_2_b[origin_index]))[2])\n\n if sign_a == y_reference_point_sign:\n rotation_vector_2 = rotation_vector_2_a\n translation_vector_2 = translation_vector_2_a\n object_points_2 = object_points_2_a\n else:\n rotation_vector_2 = rotation_vector_2_b\n translation_vector_2 = translation_vector_2_b\n object_points_2 = object_points_2_b\n\n xy_shift = - object_points_2[origin_index, :2]\n\n rotation_vector_3 = np.array([0.0, 0.0, 0.0])\n translation_vector_3 = np.array([xy_shift[0], xy_shift[1], 0.0])\n object_points_3 = transform_object_points(\n object_points_2,\n rotation_vector_3,\n translation_vector_3)\n\n final_z_rotation = - reconstruct_z_rotation(\n object_points_3[x_axis_index, 0],\n object_points_3[x_axis_index, 1])\n\n rotation_vector_4 = np.array([0.0, 0.0, final_z_rotation])\n translation_vector_4 = np.array([0.0, 0.0, 0.0])\n object_points_4 = transform_object_points(\n object_points_3,\n rotation_vector_4,\n translation_vector_4)\n\n rotation_vector_2_3, translation_vector_2_3 = compose_transformations(\n rotation_vector_2,\n translation_vector_2,\n rotation_vector_3,\n translation_vector_3)\n\n rotation_vector_2_3_4, translation_vector_2_3_4 = compose_transformations(\n rotation_vector_2_3,\n translation_vector_2_3,\n rotation_vector_4,\n translation_vector_4)\n\n camera_rotation_vector, camera_translation_vector = invert_transformation(\n rotation_vector_2_3_4,\n translation_vector_2_3_4)\n\n return camera_rotation_vector, camera_translation_vector, scale_factor, object_points_4\n\n\ndef estimate_camera_poses_from_plane_image_points(\n image_points_1,\n image_points_2,\n camera_matrix,\n height,\n origin_index,\n x_axis_index,\n y_reference_point,\n y_reference_point_sign,\n distance_calibration_indices,\n calibration_distance):\n image_points_1 = np.asarray(image_points_1)\n image_points_2 = np.asarray(image_points_2)\n camera_matrix = np.asarray(camera_matrix)\n if image_points_1.size == 0 or image_points_2.size == 0:\n raise ValueError('One or both sets of image points appear to be empty')\n image_points_1 = image_points_1.reshape((-1, 2))\n image_points_2 = image_points_2.reshape((-1, 2))\n if image_points_1.shape != image_points_2.shape:\n raise ValueError('Sets of image points do not appear to be the same shape')\n camera_matrix = camera_matrix.reshape((3, 3))\n relative_rotation_vector, relative_translation_vector = estimate_camera_pose_from_image_points(\n image_points_1,\n image_points_2,\n camera_matrix)\n input_object_points = reconstruct_object_points_from_image_points(\n image_points_1,\n image_points_2,\n camera_matrix)\n rotation_vector_1, translation_vector_1, scale_factor = estimate_camera_pose_from_plane_object_points(\n input_object_points,\n height,\n origin_index,\n x_axis_index,\n y_reference_point,\n y_reference_point_sign,\n distance_calibration_indices,\n calibration_distance)[:3]\n rotation_vector_2, translation_vector_2 = compose_transformations(\n rotation_vector_1,\n translation_vector_1,\n relative_rotation_vector,\n relative_translation_vector * scale_factor)\n return rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2\n"
] | [
[
"numpy.square",
"numpy.multiply",
"numpy.arctan",
"numpy.asarray",
"numpy.arange",
"numpy.squeeze",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.sort",
"numpy.subtract",
"numpy.arctan2",
"numpy.full_like",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
teomores/kafka-twitter | [
"29d7c48fd1d225e33ec06be9bfed1826fa4d6b60"
] | [
"data_preprocessing/tweet_api.py"
] | [
"# Import the Twython class\nfrom twython import Twython\nimport json\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\ntry:\n os.remove('twitter_dataset.csv')\nexcept OSError:\n pass\n\ndef main():\n old_df = pd.read_csv('data/twitter_dataset_2.csv', lineterminator='\\n')\n #first load the dictonary with the top used english words\n with open('improved_dict.txt') as d:\n word_list = d.read()\n\n words = word_list.split('\\n')\n\n\n # Dictonary structure with the fields that we are interested in acquire from the tweets\n dict_ = {'user': [],\n 'text': [],\n 'hashtags': [],\n 'mentions': []\n }\n\n\n # Instantiate an object\n python_tweets = Twython('9Tz9FnZ1PR9AcEvudwC7hqOod', #API Key\n 'Z7upFmGJZE3oAfcb2ZUmRdEeBJJkkYTQ86PuB3iKgWqXFdMFNo') #API Secret\n\n\n #each query has a target word\n queries = []\n for w in words:\n query = {'q': w, #the query word\n 'result_type': 'recent',\n 'count': 100, #100 tweets, which is the maximum limit admitted by Twitter\n 'lang': 'en', #we are interested only in english tweets\n }\n queries.append(query)\n\n #perform the queries to get the tweet and map the JSON in our dictonary\n for q in tqdm(queries[:50]):\n for status in python_tweets.search(**q)['statuses']:\n dict_['user'].append(status['user']['screen_name']) #username\n dict_['text'].append(status['text']) #content of the tweet\n\n #this is necessary cuz the hashtags may be null or there can be more than one\n #this can easily be done with this magical regular expression\n ht = [d['text'] for d in status['entities']['hashtags'] if 'text' in d] #list of hashtags\n dict_['hashtags'].append(ht)\n\n #same thing for the mentions\n ment = [d['screen_name'] for d in status['entities']['user_mentions'] if 'screen_name' in d] #list of mentions\n dict_['mentions'].append(ment)\n\n # Structure data in a pandas DataFrame for easier manipulation\n df = pd.DataFrame(dict_)\n\n df = df.append(old_df)\n df.to_csv('data/twitter_dataset_2.csv', index=False, encoding='utf-8')\n\nif __name__ == '__main__':\n main()\n from time import sleep\n while True:\n sleep(1200)\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
RaulAstudillo/bocf | [
"cd84eab2d1b4ea5a4bdeeb452df92296afbafb87",
"cd84eab2d1b4ea5a4bdeeb452df92296afbafb87",
"cd84eab2d1b4ea5a4bdeeb452df92296afbafb87",
"cd84eab2d1b4ea5a4bdeeb452df92296afbafb87"
] | [
"GPy/kern/src/static.py",
"GPyOpt/optimization/optimizer.py",
"test_5b.py",
"test_1b.py"
] | [
"# Copyright (c) 2012, GPy authors (see AUTHORS.txt).\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\n\nfrom .kern import Kern\nimport numpy as np\nfrom ...core.parameterization import Param\nfrom paramz.transformations import Logexp\nfrom paramz.caching import Cache_this\n\nclass Static(Kern):\n def __init__(self, input_dim, variance, active_dims, name):\n super(Static, self).__init__(input_dim, active_dims, name)\n self.variance = Param('variance', variance, Logexp())\n self.link_parameters(self.variance)\n\n def _to_dict(self):\n input_dict = super(Static, self)._to_dict()\n input_dict[\"variance\"] = self.variance.values.tolist()\n return input_dict\n\n def Kdiag(self, X):\n ret = np.empty((X.shape[0],), dtype=np.float64)\n ret[:] = self.variance\n return ret\n\n def gradients_X(self, dL_dK, X, X2=None):\n return np.zeros(X.shape)\n\n def gradients_X_diag(self, dL_dKdiag, X):\n return np.zeros(X.shape)\n\n def gradients_XX(self, dL_dK, X, X2=None):\n if X2 is None:\n X2 = X\n return np.zeros((X.shape[0], X2.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)\n\n def gradients_XX_diag(self, dL_dKdiag, X, cov=False):\n return np.zeros((X.shape[0], X.shape[1], X.shape[1]), dtype=np.float64)\n\n def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n return np.zeros(Z.shape)\n\n def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)\n\n def psi0(self, Z, variational_posterior):\n return self.Kdiag(variational_posterior.mean)\n\n def psi1(self, Z, variational_posterior):\n return self.K(variational_posterior.mean, Z)\n\n def psi2(self, Z, variational_posterior):\n K = self.K(variational_posterior.mean, Z)\n return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes\n\n def input_sensitivity(self, summarize=True):\n if summarize:\n return super(Static, self).input_sensitivity(summarize=summarize)\n else:\n return np.ones(self.input_dim) * self.variance\n\nclass White(Static):\n def __init__(self, input_dim, variance=1., active_dims=None, name='white'):\n super(White, self).__init__(input_dim, variance, active_dims, name)\n\n def K(self, X, X2=None):\n if X2 is None:\n return np.eye(X.shape[0])*self.variance\n else:\n return np.zeros((X.shape[0], X2.shape[0]))\n\n def psi2(self, Z, variational_posterior):\n return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def psi2n(self, Z, variational_posterior):\n return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def update_gradients_full(self, dL_dK, X, X2=None):\n if X2 is None:\n self.variance.gradient = np.trace(dL_dK)\n else:\n self.variance.gradient = 0.\n\n def update_gradients_diag(self, dL_dKdiag, X):\n self.variance.gradient = dL_dKdiag.sum()\n\n def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n self.variance.gradient = dL_dpsi0.sum()\n\nclass WhiteHeteroscedastic(Static):\n def __init__(self, input_dim, num_data, variance=1., active_dims=None, name='white_hetero'):\n \"\"\"\n A heteroscedastic White kernel (nugget/noise).\n It defines one variance (nugget) per input sample.\n\n Prediction excludes any noise learnt by this Kernel, so be careful using this kernel.\n\n You can plot the errors learnt by this kernel by something similar as:\n plt.errorbar(m.X, m.Y, yerr=2*np.sqrt(m.kern.white.variance))\n \"\"\"\n super(Static, self).__init__(input_dim, active_dims, name)\n self.variance = Param('variance', np.ones(num_data) * variance, Logexp())\n self.link_parameters(self.variance)\n\n def Kdiag(self, X):\n if X.shape[0] == self.variance.shape[0]:\n # If the input has the same number of samples as\n # the number of variances, we return the variances\n return self.variance\n return 0.\n\n def K(self, X, X2=None):\n if X2 is None and X.shape[0] == self.variance.shape[0]:\n return np.eye(X.shape[0]) * self.variance\n else:\n return 0.\n\n def psi2(self, Z, variational_posterior):\n return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def psi2n(self, Z, variational_posterior):\n return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def update_gradients_full(self, dL_dK, X, X2=None):\n if X2 is None:\n self.variance.gradient = np.diagonal(dL_dK)\n else:\n self.variance.gradient = 0.\n\n def update_gradients_diag(self, dL_dKdiag, X):\n self.variance.gradient = dL_dKdiag\n\n def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n self.variance.gradient = dL_dpsi0\n\nclass Bias(Static):\n def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):\n super(Bias, self).__init__(input_dim, variance, active_dims, name)\n\n def to_dict(self):\n input_dict = super(Bias, self)._to_dict()\n input_dict[\"class\"] = \"GPy.kern.Bias\"\n return input_dict\n\n @staticmethod\n def _from_dict(kernel_class, input_dict):\n useGPU = input_dict.pop('useGPU', None)\n return Bias(**input_dict)\n\n def K(self, X, X2=None):\n shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])\n return np.full(shape, self.variance, dtype=np.float64)\n\n def update_gradients_full(self, dL_dK, X, X2=None):\n self.variance.gradient = dL_dK.sum()\n\n def update_gradients_diag(self, dL_dKdiag, X):\n self.variance.gradient = dL_dKdiag.sum()\n\n def psi2(self, Z, variational_posterior):\n return np.full((Z.shape[0], Z.shape[0]), self.variance*self.variance*variational_posterior.shape[0], dtype=np.float64)\n\n def psi2n(self, Z, variational_posterior):\n ret = np.empty((variational_posterior.mean.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)\n ret[:] = self.variance*self.variance\n return ret\n\n def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n if dL_dpsi2.ndim == 2:\n self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()\n + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0])\n else:\n self.variance.gradient = (dL_dpsi0.sum() + dL_dpsi1.sum()\n + 2.*self.variance*dL_dpsi2.sum())\n\nclass Fixed(Static):\n def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):\n \"\"\"\n :param input_dim: the number of input dimensions\n :type input_dim: int\n :param variance: the variance of the kernel\n :type variance: float\n \"\"\"\n super(Fixed, self).__init__(input_dim, variance, active_dims, name)\n self.fixed_K = covariance_matrix\n def K(self, X, X2):\n if X2 is None:\n return self.variance * self.fixed_K\n else:\n return np.zeros((X.shape[0], X2.shape[0]))\n\n def Kdiag(self, X):\n return self.variance * self.fixed_K.diagonal()\n\n def update_gradients_full(self, dL_dK, X, X2=None):\n if X2 is None:\n self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)\n else:\n self.variance.gradient = 0\n\n def update_gradients_diag(self, dL_dKdiag, X):\n self.variance.gradient = np.einsum('i,i', dL_dKdiag, np.diagonal(self.fixed_K))\n\n def psi2(self, Z, variational_posterior):\n return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def psi2n(self, Z, variational_posterior):\n return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)\n\n def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):\n self.variance.gradient = dL_dpsi0.sum()\n\nclass Precomputed(Fixed):\n def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='precomputed'):\n \"\"\"\n Class for precomputed kernels, indexed by columns in X\n\n Usage example:\n\n import numpy as np\n from GPy.models import GPClassification\n from GPy.kern import Precomputed\n from sklearn.cross_validation import LeaveOneOut\n\n n = 10\n d = 100\n X = np.arange(n).reshape((n,1)) # column vector of indices\n y = 2*np.random.binomial(1,0.5,(n,1))-1\n X0 = np.random.randn(n,d)\n k = np.dot(X0,X0.T)\n kern = Precomputed(1,k) # k is a n x n covariance matrix\n\n cv = LeaveOneOut(n)\n ypred = y.copy()\n for train, test in cv:\n m = GPClassification(X[train], y[train], kernel=kern)\n m.optimize()\n ypred[test] = 2*(m.predict(X[test])[0]>0.5)-1\n\n :param input_dim: the number of input dimensions\n :type input_dim: int\n :param variance: the variance of the kernel\n :type variance: float\n \"\"\"\n assert input_dim==1, \"Precomputed only implemented in one dimension. Use multiple Precomputed kernels to have more dimensions by making use of active_dims\"\n super(Precomputed, self).__init__(input_dim, covariance_matrix, variance, active_dims, name)\n\n @Cache_this(limit=2)\n def _index(self, X, X2):\n if X2 is None:\n i1 = i2 = X.astype('int').flat\n else:\n i1, i2 = X.astype('int').flat, X2.astype('int').flat\n return self.fixed_K[i1,:][:,i2]\n\n def K(self, X, X2=None):\n return self.variance * self._index(X, X2)\n\n def Kdiag(self, X):\n return self.variance * self._index(X,None).diagonal()\n\n def update_gradients_full(self, dL_dK, X, X2=None):\n self.variance.gradient = np.einsum('ij,ij', dL_dK, self._index(X, X2))\n\n def update_gradients_diag(self, dL_dKdiag, X):\n self.variance.gradient = np.einsum('i,ii', dL_dKdiag, self._index(X, None))\n",
"# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy as np\nimport cma\n\n\nclass Optimizer(object):\n \"\"\"\n Class for a general acquisition optimizer.\n\n :param bounds: list of tuple with bounds of the optimizer\n \"\"\"\n\n def __init__(self, bounds):\n self.bounds = bounds\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n raise NotImplementedError(\"The optimize method is not implemented in the parent class.\")\n\n\nclass OptSGD(Optimizer):\n '''\n (Stochastic) gradient descent algorithm.\n '''\n def __init__(self, bounds, maxiter=160):\n super(OptSGD, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n x = np.copy(x0)\n x_opt = np.copy(x0)\n #print('initial point')\n #print(x0)\n #print('initial value')\n f_opt = f(x_opt)\n #print(f_opt)\n for t in range(1, self.maxiter + 1):\n if t % 30 == 0:\n val = f(x)\n if val < f_opt:\n x_opt = np.copy(x)\n f_opt = np.atleast_2d(np.copy(val))\n #print('better value was found at iteration {}'.format(t))\n #print('point')\n #print(x_opt)\n #print('value')\n #print(f_opt)\n \n grad = f_df(x)[1]\n if not np.isnan(grad).any():\n if t < self.maxiter - 49:\n x = x - 0.05 * grad\n else:\n x = x - 0.05 * np.power(t + 50 - self.maxiter, -0.7) * grad\n else:\n print('nan found')\n for k in range(x.shape[1]):\n if x[0,k] < self.bounds[k][0]:\n x[0,k] = self.bounds[k][0]\n elif x[0,k] > self.bounds[k][1]:\n x[0,k] = self.bounds[k][1]\n if True:\n print('test begin')\n print(f_df(x)[1])\n fx = f(x)\n h = 1e-6\n x[0,0] +=h\n f_aux = f(x)\n print((f_aux-fx)/h)\n x[0,0] -=h\n x[0,2] +=h\n f_aux = f(x)\n print((f_aux-fx)/h)\n x[0,2] -=h\n print('test end')\n #if np.absolute(fx - f_previous) < 1e-5:\n #break \n \n x = np.atleast_2d(x)\n fx = np.atleast_2d(f(x))\n if fx < f_opt:\n x_opt = x\n f_opt = fx\n #print('final point')\n #print(x_opt)\n #print('final value')\n #print(f_opt)\n #print('initial point again')\n #print(x0)\n #print('initial value again')\n #print(f(x0))\n return x_opt, f_opt\n \n \nclass OptADAM(Optimizer):\n '''\n ADAM algorithm.\n '''\n def __init__(self, bounds, maxiter=100):\n super(OptADAM, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n x = x0\n alpha = 0.001\n beta1 = 0.9\n beta2 = 0.999\n eps = 1e-8\n m = 0*x0\n v = 0*x0\n beta1_power = 1.\n beta2_power = 1.\n\n for t in range(1, self.maxiter + 1):\n #print(t)\n f_x, g_x = f_df(x)\n m = beta1*m +(1-beta1)*g_x\n v = beta2*v +(1-beta2)*np.square(g_x)\n beta1_power = beta1_power*beta1\n m_hat = m/(1-beta1_power)\n beta2_power = beta2_power*beta2\n v_hat = v/(1-beta2_power)\n tmp = alpha*np.divide(m_hat,np.sqrt(v_hat)+eps)\n if np.any(np.isnan(tmp)):\n print('nan found')\n x = np.atleast_2d(x)\n f_x = f(x)\n return x, f_x\n \n x = x - tmp\n for k in range(x.shape[1]):\n if x[0,k] < self.bounds[k][0]:\n x[0,k] = self.bounds[k][0]\n elif x[0,k] > self.bounds[k][1]:\n x[0,k] = self.bounds[k][1]\n\n x = np.atleast_2d(x) \n f_x = f(x)\n return x, f_x\n\n\nclass OptAMSGrad(Optimizer):\n '''\n AMSGrad algorithm.\n '''\n\n def __init__(self, bounds, maxiter=120):\n super(OptAMSGrad, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n\n x_opt = np.copy(x0)\n x_ref = np.copy(x0)\n #print('initial point')\n #print(x_opt)\n #print('initial value')\n f_opt = f(x_opt)\n #print(f_opt)\n x = np.copy(x0)\n alpha = 0.001\n beta1 = 0.9\n beta2 = 0.999\n eps = 1e-8\n m = 0 * x0\n v = 0 * x0\n v_hat = 0 * x0\n\n for t in range(1, self.maxiter + 1):\n if t % 30 == 0 or np.max(x-x_ref) > 0.1:\n x_ref = np.copy(x)\n val = f(x)\n if val < f_opt:\n x_opt = np.copy(x)\n f_opt = np.atleast_2d(np.copy(val))\n #print('better value was found at iteration {}'.format(t))\n #print('point')\n #print(x_opt)\n #print('value')\n #print(f_opt)\n g_x = f_df(x)[1]\n\n if not np.isnan(g_x).any():\n m = beta1 * m + (1 - beta1) * g_x\n v = beta2 * v + (1 - beta2) * np.square(g_x)\n v_hat = np.maximum(v_hat, v)\n tmp = alpha * np.divide(m, np.sqrt(v_hat) + eps)\n if t > self.maxiter - 50:\n tmp /= np.sqrt(t -self.maxiter + 50)\n if not np.isnan(tmp).any():\n x = x - tmp\n else:\n return x_opt, f_opt\n\n for k in range(x.shape[1]):\n if x[0, k] < self.bounds[k][0]:\n x[0, k] = self.bounds[k][0]\n elif x[0, k] > self.bounds[k][1]:\n x[0, k] = self.bounds[k][1]\n\n x = np.atleast_2d(x)\n fx = np.atleast_2d(f(x))\n if fx < f_opt:\n x_opt = x\n f_opt = fx\n #print('final point')\n #print(x_opt)\n #print('final value')\n #print(f_opt)\n #print('initial point again')\n #print(x0)\n #print('initial value again')\n #print(f(x0))\n return x_opt, f_opt\n\n\nclass OptAGD(Optimizer):\n '''\n ADAM algorithm.\n '''\n def __init__(self, bounds, maxiter=250):\n super(OptAGD, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n x = x0\n y = np.copy(x0)\n lambd = 0\n gamma = 1\n beta_inverse = 100\n\n for t in range(1, self.maxiter + 1):\n f_x, g_x = f_df(x)\n tmp_y = x - beta_inverse*g_x\n x = (1 - gamma)*tmp_y + gamma*y\n y = np.copy(tmp_y)\n tmp_lamb = (1 + np.sqrt(1 + 4*lambd))/2\n gamma = (1 - lambd)/tmp_lamb\n lamb = np.copy(tmp_lamb)\n\n for k in range(x.shape[1]):\n if x[0,k] < self.bounds[k][0]:\n x[0,k] = self.bounds[k][0]\n elif x[0,k] > self.bounds[k][1]:\n x[0,k] = self.bounds[k][1]\n \n x = np.atleast_2d(x) \n f_x = f_df(x)[0]\n return x, f_x\n \n\nclass OptLbfgs(Optimizer):\n '''\n Wrapper for l-bfgs-b to use the true or the approximate gradients.\n '''\n def __init__(self, bounds, maxiter=500):\n super(OptLbfgs, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n import scipy.optimize\n if f_df is None and df is not None:\n f_df = lambda x: float(f(x)), df(x)\n \n if f_df is None and df is None:\n res = scipy.optimize.fmin_l_bfgs_b(f, x0=x0, bounds=self.bounds, approx_grad=True, maxiter=self.maxiter, factr=1e3, pgtol=1e-20)\n else:\n res = scipy.optimize.fmin_l_bfgs_b(f_df, x0=x0, bounds=self.bounds, maxiter=self.maxiter, factr=1e6)\n\n ### --- We check here if the the optimizer moved. It it didn't we report x0 and f(x0) as scipy can return NaNs\n if res[2]['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':\n result_x = np.atleast_2d(x0)\n result_fx = np.atleast_2d(f(x0))\n else:\n result_x = np.atleast_2d(res[0])\n result_fx = np.atleast_2d(res[1])\n \n #print(res)\n return result_x, result_fx\n\n\nclass OptLbfgs2(Optimizer):\n '''\n Wrapper for l-bfgs-b to use the true or the approximate gradients.\n '''\n\n def __init__(self, bounds, maxiter=50):\n super(OptLbfgs2, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n import scipy.optimize\n if f_df is None and df is not None:\n f_df = lambda x: float(f(x)), df(x)\n\n if f_df is None and df is None:\n res = scipy.optimize.fmin_l_bfgs_b(f, x0=x0, bounds=self.bounds, approx_grad=True, maxiter=self.maxiter,\n factr=1e6)\n else:\n res = scipy.optimize.fmin_l_bfgs_b(f_df, x0=x0, bounds=self.bounds, maxiter=self.maxiter, factr=1e5, pgtol=1e-15)\n\n ### --- We check here if the the optimizer moved. It it didn't we report x0 and f(x0) as scipy can return NaNs\n if res[2]['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':\n result_x = np.atleast_2d(x0)\n result_fx = np.atleast_2d(f(x0))\n else:\n result_x = np.atleast_2d(res[0])\n result_fx = np.atleast_2d(res[1])\n\n #print(res)\n return result_x, result_fx\n\n\nclass OptDirect(Optimizer):\n '''\n Wrapper for DIRECT optimization method. It works partitioning iteratively the domain\n of the function. Only requires f and the box constraints to work.\n\n '''\n def __init__(self, bounds, maxiter=50):\n super(OptDirect, self).__init__(bounds)\n self.maxiter = maxiter\n #assert self.space.has_types['continuous']\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n # Based on the documentation of DIRECT, it does not seem we can pass through an initial point x0\n try:\n from DIRECT import solve\n def DIRECT_f_wrapper(f):\n def g(x, user_data):\n return f(np.array([x])), 0\n return g\n lB = np.asarray(self.bounds)[:,0]\n uB = np.asarray(self.bounds)[:,1]\n x,_,_ = solve(DIRECT_f_wrapper(f),lB,uB, maxT=self.maxiter)\n return np.atleast_2d(x), f(np.atleast_2d(x))\n except ImportError:\n print(\"Cannot find DIRECT library, please install it to use this option.\")\n\n\nclass OptCma(Optimizer):\n '''\n Wrapper the Covariance Matrix Adaptation Evolutionary strategy (CMA-ES) optimization method. It works generating\n an stochastic search based on multivariate Gaussian samples. Only requires f and the box constraints to work.\n\n '''\n def __init__(self, bounds, maxiter=50):\n super(OptCma, self).__init__(bounds)\n self.maxiter = maxiter\n\n def optimize(self, x0, f=None, df=None, f_df=None):\n \"\"\"\n :param x0: initial point for a local optimizer.\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n \"\"\"\n try:\n import cma\n def CMA_f_wrapper(f):\n def g(x):\n return f(np.array([x]))[0][0]\n return g\n lB = np.asarray(self.bounds)[:,0]\n uB = np.asarray(self.bounds)[:,1]\n x = cma.fmin(CMA_f_wrapper(f), x0, 0.6, options={\"bounds\":[lB, uB], \"verbose\":-1, \"maxfevals\":100})[0]\n return np.atleast_2d(x), f(np.atleast_2d(x))\n except ImportError:\n print(\"Cannot find cma library, please install it to use this option.\")\n except:\n print(\"CMA does not work in problems of dimension 1.\")\n\n\ndef apply_optimizer(optimizer, x0, f=None, df=None, f_df=None, duplicate_manager=None, context_manager=None, space=None):\n \"\"\"\n :param x0: initial point for a local optimizer (x0 can be defined with or without the context included).\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n :param duplicate_manager: logic to check for duplicate (always operates in the full space, context included)\n :param context_manager: If provided, x0 (and the optimizer) operates in the space without the context\n :param space: GPyOpt class design space.\n \"\"\"\n\n x0 = np.atleast_2d(x0)\n\n ## --- Compute a new objective that inputs non context variables but that takes into account the values of the context ones.\n ## --- It does nothing if no context is passed\n #problem = OptimizationWithContext(x0=x0, f=f, df=df, f_df=f_df, context_manager=context_manager)\n\n #if context_manager:\n #print('context manager')\n #add_context = lambda x : context_manager._expand_vector(x)\n #else:\n #add_context = lambda x : x\n\n #if duplicate_manager and duplicate_manager.is_unzipped_x_duplicate(x0):\n #raise ValueError(\"The starting point of the optimizer cannot be a duplicate.\")\n\n ## --- Optimize point\n #optimized_x, suggested_fx = optimizer.optimize(problem.x0_nocontext, problem.f_nocontext, problem.df_nocontext, problem.f_df_nocontext)\n \n ## --- Add context and round according to the type of variables of the design space\n #suggested_x_with_context = add_context(optimized_x)\n #suggested_x_with_context_rounded = space.round_optimum(suggested_x_with_context)\n\n ## --- Run duplicate_manager\n #if duplicate_manager and duplicate_manager.is_unzipped_x_duplicate(suggested_x_with_context_rounded):\n #suggested_x, suggested_fx = x0, np.atleast_2d(f(x0))\n #else:\n #suggested_x, suggested_fx = suggested_x_with_context_rounded, f(suggested_x_with_context_rounded)\n suggested_x, suggested_fx = optimizer.optimize(x0, f, df, f_df)\n #suggested_fx = f(suggested_x)\n\n return suggested_x, suggested_fx\n\n\ndef apply_optimizer_inner(optimizer, x0, f=None, df=None, f_df=None, duplicate_manager=None, context_manager=None, space=None):\n \"\"\"\n :param x0: initial point for a local optimizer (x0 can be defined with or without the context included).\n :param f: function to optimize.\n :param df: gradient of the function to optimize.\n :param f_df: returns both the function to optimize and its gradient.\n :param duplicate_manager: logic to check for duplicate (always operates in the full space, context included)\n :param context_manager: If provided, x0 (and the optimizer) operates in the space without the context\n :param space: GPyOpt class design space.\n \"\"\"\n\n x0 = np.atleast_2d(x0)\n #print('apply inner opt')\n\n ## --- Compute a new objective that inputs non context variables but that takes into account the values of the context ones.\n ## --- It does nothing if no context is passed\n #problem = OptimizationWithContext(x0=x0, f=f, df=df, f_df=f_df, context_manager=context_manager)\n\n #if context_manager:\n #print('context manager')\n #add_context = lambda x : context_manager._expand_vector(x)\n #else:\n #add_context = lambda x : x\n\n #if duplicate_manager and duplicate_manager.is_unzipped_x_duplicate(x0):\n #raise ValueError(\"The starting point of the optimizer cannot be a duplicate.\")\n\n ## --- Optimize point\n #optimized_x, suggested_fx = optimizer.optimize(problem.x0_nocontext, problem.f_nocontext, problem.df_nocontext, problem.f_df_nocontext)\n \n ## --- Add context and round according to the type of variables of the design space\n #suggested_x_with_context = add_context(optimized_x)\n #suggested_x_with_context_rounded = space.round_optimum(suggested_x_with_context)\n\n ## --- Run duplicate_manager\n #if duplicate_manager and duplicate_manager.is_unzipped_x_duplicate(suggested_x_with_context_rounded):\n #suggested_x, suggested_fx = x0, np.atleast_2d(f(x0))\n #else:\n #suggested_x, suggested_fx = suggested_x_with_context_rounded, f(suggested_x_with_context_rounded)\n suggested_x, suggested_fx = optimizer.optimize(x0, f, df, f_df)\n #suggested_fx = f(suggested_x)\n\n return suggested_x, suggested_fx\n\n\ndef optimize_anchor_points(id, optimizer, anchor_points, f=None, df=None, f_df=None, duplicate_manager=None, context_manager=None, space=None):\n return [apply_optimizer(optimizer, a, f, df, f_df, duplicate_manager, context_manager, space) for a in anchor_points]\n\n\nclass OptimizationWithContext(object):\n\n def __init__(self, x0, f, df=None, f_df=None, context_manager=None):\n '''\n Constructor of an objective function that takes as input a vector x of the non context variables\n and retunrs a value in which the context variables have been fixed.\n '''\n self.x0 = np.atleast_2d(x0)\n self.f = f\n self.df = df\n self.f_df = f_df\n self.context_manager = context_manager\n\n if not context_manager:\n self.x0_nocontext = x0\n self.f_nocontext = self.f\n self.df_nocontext = self.df\n self.f_df_nocontext = self.f_df\n\n else:\n #print('context')\n self.x0_nocontext = self.x0[:,self.context_manager.noncontext_index]\n self.f_nocontext = self.f_nc\n if self.f_df is None:\n self.df_nocontext = None\n self.f_df_nocontext = None\n else:\n self.df_nocontext = self.df\n self.f_df_nocontext = self.f_df#self.f_df_nc\n\n def f_nc(self,x):\n '''\n Wrapper of *f*: takes an input x with size of the noncontext dimensions\n expands it and evaluates the entire function.\n '''\n x = np.atleast_2d(x)\n xx = self.context_manager._expand_vector(x)\n if x.shape[0] == 1:\n return self.f(xx)[0]\n else:\n return self.f(xx)\n\n def df_nc(self,x):\n '''\n Wrapper of the derivative of *f*: takes an input x with size of the not\n fixed dimensions expands it and evaluates the gradient of the entire function.\n '''\n x = np.atleast_2d(x)\n xx = self.context_manager._expand_vector(x)\n _, df_nocontext_xx = self.f_df(xx)\n df_nocontext_xx = df_nocontext_xx[:,np.array(self.context_manager.noncontext_index)]\n return df_nocontext_xx\n\n def f_df_nc(self,x):\n '''\n Wrapper of the derivative of *f*: takes an input x with size of the not\n fixed dimensions expands it and evaluates the gradient of the entire function.\n '''\n x = np.atleast_2d(x)\n xx = self.context_manager._expand_vector(x)\n f_nocontext_xx , df_nocontext_xx = self.f_df(xx)\n df_nocontext_xx = df_nocontext_xx[:,np.array(self.context_manager.noncontext_index)]\n return f_nocontext_xx, df_nocontext_xx\n\n\ndef choose_optimizer(optimizer_name, bounds):\n \"\"\"\n Selects the type of local optimizer\n \"\"\" \n if optimizer_name == 'lbfgs':\n optimizer = OptLbfgs(bounds)\n\n elif optimizer_name == 'lbfgs2':\n optimizer = OptLbfgs2(bounds)\n\n elif optimizer_name == 'sgd':\n optimizer = OptSGD(bounds)\n \n elif optimizer_name == 'adam':\n optimizer = OptADAM(bounds)\n\n elif optimizer_name == 'amsgrad':\n optimizer = OptAMSGrad(bounds)\n\n elif optimizer_name == 'DIRECT':\n optimizer = OptDirect(bounds)\n\n elif optimizer_name == 'CMA':\n optimizer = OptCma(bounds)\n\n elif optimizer_name == 'agd':\n optimizer = OptAGD(bounds)\n else:\n raise InvalidVariableNameError('Invalid optimizer selected.')\n\n return optimizer\n",
"import numpy as np\nimport scipy\nimport GPyOpt\nimport GPy\nfrom multi_objective import MultiObjective\nfrom multi_outputGP import multi_outputGP\nfrom maEI import maEI\nfrom parameter_distribution import ParameterDistribution\nfrom utility import Utility\nimport cbo\nfrom scipy.stats import norm\n\n# --- Function to optimize\nd = 3 # Input dimension\nm = 4 # Number of attributes\naux_model = []\nI = np.linspace(0., 1., 8)\naux_grid = np.meshgrid(I, I, I)\ngrid = np.array([a.flatten() for a in aux_grid]).T\nkernel = GPy.kern.SE(input_dim=d, variance=2., lengthscale=0.3)\ncov = kernel.K(grid)\nmean = np.zeros((8 ** d,))\nfor j in range(m):\n r = np.random.RandomState(j+7)\n Y = r.multivariate_normal(mean, cov)\n Y = np.reshape(Y, (8 ** d, 1))\n print(Y[:5, 0])\n aux_model.append(GPy.models.GPRegression(grid, Y, kernel, noise_var=1e-10))\n\n\ndef h(X):\n X = np.atleast_2d(X)\n hX = np.empty((m, X.shape[0]))\n for j in range(m):\n hX[j, :] = aux_model[j].posterior_mean(X)[:, 0]\n return hX\n\nparameter = np.atleast_1d([1.25, 1.50, 1.25, 1.50])\n\ndef g(y):\n return np.squeeze(np.sum(-np.exp(y), axis=0))\n\ndef f(X):\n return g(h(X))\n# --- Objective\nobjective = MultiObjective([f], as_list=True, output_dim=1)\n\n# --- Space\nspace = GPyOpt.Design_space(space=[{'name': 'var', 'type': 'continuous', 'domain': (0, 1), 'dimensionality': d}])\n\n# --- Model\nmodel = multi_outputGP(output_dim=1, exact_feval=[True], fixed_hyps=False)\n\n# --- Initial design\ninitial_design = GPyOpt.experiment_design.initial_design('random', space, 2*(d+1))\n\n# --- Parameter distribution\nparameter_support = np.ones((1,1))\nparameter_dist = np.ones((1,))\nparameter_distribution = ParameterDistribution(continuous=False, support=parameter_support, prob_dist=parameter_dist)\n\n# --- Utility function\ndef U_func(parameter,y):\n return np.dot(parameter,y)\n\ndef dU_func(parameter,y):\n return parameter\n\nU = Utility(func=U_func,dfunc=dU_func,parameter_dist=parameter_distribution,linear=True)\n\n# --- Acquisition optimizer\nacq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='lbfgs2', inner_optimizer='lbfgs2', space=space)\n\n# --- Acquisition function\nacquisition = maEI(model, space, optimizer=acq_opt,utility=U)\n\n# --- Evaluator\nevaluator = GPyOpt.core.evaluators.Sequential(acquisition)\n\n# --- Compute real optimum value\nbounds = [(0, 1)] * d\nstarting_points = np.random.rand(100, d)\nopt_val = 0\n#parameter = parameter_support[0,:]\n\ndef func(x):\n x_copy = np.atleast_2d(x)\n val = f(x_copy)\n return -val\n\nbest_val_found = np.inf\n\nfor x0 in starting_points:\n res = scipy.optimize.fmin_l_bfgs_b(func, x0, approx_grad=True, bounds=bounds)\n if best_val_found > res[1]:\n best_val_found = res[1]\n x_opt = res[0]\nprint('optimum')\nprint(x_opt)\nprint('optimal value')\nprint(-best_val_found)\n\nmax_iter = 50\nfor i in range(1):\n filename = './experiments/test5_EIf_noiseless_' + str(i) + '.txt'\n bo_model = cbo.CBO(model, space, objective, acquisition, evaluator, initial_design)\n bo_model.run_optimization(max_iter=max_iter, parallel=False, plot=False, results_file=filename)",
"import numpy as np\nimport scipy\nimport GPyOpt\nimport GPy\nfrom multi_objective import MultiObjective\nfrom multi_outputGP import multi_outputGP\nfrom maEI import maEI\nfrom parameter_distribution import ParameterDistribution\nfrom utility import Utility\nimport cbo\n\n\n# --- Function to optimize\nm = 5 # Number of attributes\naux_model = []\nI = np.linspace(0., 1., 6)\naux_grid = np.meshgrid(I, I, I, I)\ngrid = np.array([a.flatten() for a in aux_grid]).T\nkernel = GPy.kern.SE(input_dim=4, variance=2., lengthscale=0.3)\ncov = kernel.K(grid)\nmean = np.zeros((6 ** 4,))\nfor j in range(m):\n r = np.random.RandomState(j+7)\n Y = r.multivariate_normal(mean, cov)\n Y = np.reshape(Y, (6 ** 4, 1))\n print(Y[:5, 0])\n aux_model.append(GPy.models.GPRegression(grid, Y, kernel, noise_var=1e-10))\n\n\ndef f(X):\n X = np.atleast_2d(X)\n fX = np.empty((m, X.shape[0]))\n for j in range(m):\n fX[j, :] = aux_model[j].posterior_mean(X)[:, 0]\n return fX\n\nbounds = [(0, 1)] * 4\nstarting_points = np.random.rand(100, 4)\ny_opt = np.empty((1,m))\nfor j in range(1):\n def marginal_func(x):\n x_copy = np.atleast_2d(x)\n val = aux_model[j].posterior_mean(x_copy)[:, 0]\n return -val\n\n best_val_found = np.inf\n for x0 in starting_points:\n res = scipy.optimize.fmin_l_bfgs_b(marginal_func, x0, approx_grad=True, bounds=bounds)\n if best_val_found > res[1]:\n best_val_found = res[1]\n marginal_opt = res[0]\n y_opt = f(marginal_opt).transpose()\n\ndef g(y):\n #y_aux = np.squeeze(y)\n aux = (y.transpose() - y_opt).transpose()\n return -np.sum(np.square(aux), axis=0)\n\ndef objective(X):\n return g(f(X))\n\nobjective = MultiObjective([objective], as_list=True, output_dim=1)\n# objective = MultiObjective(f, noise_var=noise_var)\n\n# --- Space\nspace = GPyOpt.Design_space(space=[{'name': 'var', 'type': 'continuous', 'domain': (0, 1), 'dimensionality': 4}])\n\n# --- Model (Multi-output GP)\nmodel = multi_outputGP(output_dim=1, exact_feval=[True], fixed_hyps=False)\n# model = multi_outputGP(output_dim=n_attributes, noise_var=noise_var, fixed_hyps=True)\n\n# --- Aquisition optimizer\nacq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='lbfgs2', inner_optimizer='lbfgs2', space=space)\n\n# --- Initial design\ninitial_design = GPyOpt.experiment_design.initial_design('random', space, 10)\n\n\n# --- Parameter distribution\nparameter_support = np.ones((1,1))\nparameter_dist = np.ones((1,)) / 1\nparameter_distribution = ParameterDistribution(continuous=False, support=parameter_support, prob_dist=parameter_dist)\n\n# --- Utility function\ndef U_func(parameter,y):\n return np.dot(parameter,y)\n\ndef dU_func(parameter,y):\n return parameter\n\nU = Utility(func=U_func,dfunc=dU_func,parameter_dist=parameter_distribution,linear=True)\n# --- Aquisition function\nacquisition = maEI(model, space, optimizer=acq_opt,utility=U)\n# --- Evaluator\nevaluator = GPyOpt.core.evaluators.Sequential(acquisition)\n# standard BO\n\nif True:\n bounds = [(0, 1)] * 4\n starting_points = np.random.rand(100, 4)\n def marginal_func(x):\n x_copy = np.atleast_2d(x)\n fx = f(x_copy)\n # print('test begin')\n # print(parameter)\n # print(fx)\n val = g(fx)\n return -val\n\n\n best_val_found = np.inf\n for x0 in starting_points:\n res = scipy.optimize.fmin_l_bfgs_b(marginal_func, x0, approx_grad=True, bounds=bounds)\n if best_val_found > res[1]:\n # print(res)\n best_val_found = res[1]\n opt = res[0]\n print('opt')\n print(opt)\n print(f(opt))\n print(y_opt)\n print('real optimum')\n print(-best_val_found)\n\nmax_iter = 50\nfor i in range(1):\n filename = './experiments/test1_EIf_' + str(i) + '.txt'\n bo_model = cbo.CBO(model, space, objective, acquisition, evaluator, initial_design)\n bo_model.run_optimization(max_iter=max_iter, parallel=False, plot=False, results_file=filename)"
] | [
[
"numpy.einsum",
"numpy.eye",
"numpy.diagonal",
"numpy.full",
"numpy.ones",
"numpy.zeros",
"numpy.trace",
"numpy.empty"
],
[
"numpy.square",
"numpy.maximum",
"numpy.sqrt",
"numpy.power",
"numpy.isnan",
"numpy.asarray",
"numpy.atleast_2d",
"numpy.copy",
"numpy.max",
"numpy.array"
],
[
"numpy.dot",
"numpy.linspace",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.reshape",
"numpy.empty",
"numpy.ones",
"numpy.atleast_1d",
"numpy.atleast_2d",
"numpy.random.rand",
"numpy.exp",
"numpy.meshgrid",
"numpy.zeros",
"numpy.random.RandomState"
],
[
"numpy.square",
"numpy.dot",
"numpy.linspace",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.random.RandomState",
"numpy.reshape",
"numpy.ones",
"numpy.atleast_2d",
"numpy.random.rand",
"numpy.meshgrid",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
jacenkow/inside | [
"6f860420644b50b78981158a59ceed8cdbd209bf"
] | [
"inside/pipelines/clevr.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020 Grzegorz Jacenków.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n\"\"\"Training and evaluation pipeline for the networks.\"\"\"\n\nimport csv\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Mean\n\nfrom inside import config\nfrom inside.callbacks import setup_callbacks\nfrom inside.constructor import setup_comet_ml, setup_model\nfrom inside.loaders import CLEVR\nfrom inside.metrics import DiceScore\n\n\ndef _write_results(logs):\n \"\"\"Write final logs to a CSV file.\"\"\"\n w = csv.writer(open(os.path.join(\n config.EXPERIMENT_FOLDER, \"results.csv\"), \"w\"))\n for key, val in logs.items():\n w.writerow([key, val])\n\n\nclass Pipeline:\n def __init__(self):\n # Model.\n self.model = setup_model()\n\n # Comet.ml experiment.\n self.comet_ml = setup_comet_ml()\n\n # Testing metrics.\n self.test_dice = DiceScore(name=\"testing_dice\")\n self.test_loss = Mean(name=\"testing_loss\")\n\n # Training metrics.\n self.training_dice = DiceScore(name=\"training_dice\")\n self.training_loss = Mean(name=\"training_loss\")\n\n # Callbacks.\n self.cl, self.es, self.mc, self.pp = setup_callbacks()\n self.cl.model, self.es.model, self.mc.model = \\\n self.model, self.model, self.model\n\n self.pp.model = self.model\n self.pp.comet_ml = self.comet_ml\n\n def fit(self):\n \"\"\"Train the model.\"\"\"\n # Toy dataset.\n loader = CLEVR()\n train_ds, valid_ds, test_ds = loader.load()\n\n with self.comet_ml.train():\n self.cl.on_train_begin()\n self.es.on_train_begin()\n self.mc.on_train_begin()\n self.pp.on_train_begin()\n\n for epoch in range(config.EXPERIMENT_EPOCHS):\n self.comet_ml.set_epoch(epoch)\n\n for images, labels in train_ds:\n self.train_step(images, labels)\n\n for batch, (images, labels) in enumerate(valid_ds):\n self.test_step(images, labels)\n\n if not batch: # Log only first mini-batch from an epoch.\n self.pp.on_epoch_end(epoch, images, labels)\n\n # Get results.\n logs = {\n \"dice\": self.training_dice.result().numpy(),\n \"loss\": self.training_loss.result().numpy(),\n \"validation_dice\": self.test_dice.result().numpy(),\n \"validation_loss\": self.test_loss.result().numpy(),\n }\n\n template = (\"Epoch {}. Training Loss: {}. Training Dice: {}. \"\n \"Validation Loss: {}. Validation Dice: {}.\")\n\n print(template.format(epoch + 1,\n logs['loss'],\n logs['dice'],\n logs['validation_loss'],\n logs['validation_dice']))\n\n # Log metrics.\n self.comet_ml.log_metrics(logs, epoch=epoch)\n self.cl.on_epoch_end(epoch, logs)\n self.es.on_epoch_end(epoch, logs)\n self.mc.on_epoch_end(epoch, logs)\n\n # Reset the metrics for the next epoch.\n self.training_dice.reset_states()\n self.training_loss.reset_states()\n self.test_dice.reset_states()\n self.test_loss.reset_states()\n\n # Early stopping criterion.\n if self.es.model.stop_training:\n self.cl.on_train_end()\n self.es.on_train_end()\n self.mc.on_train_end()\n break\n\n with self.comet_ml.test():\n for batch, (images, labels) in enumerate(test_ds):\n self.test_step(images, labels)\n\n if not batch:\n self.pp.on_test_end(images, labels)\n\n # Get results.\n logs = {\n \"dice\": self.test_dice.result().numpy(),\n \"loss\": self.test_loss.result().numpy(),\n }\n\n print(\"Test Loss: {}. Test Dice: {}.\".format(\n logs['loss'], logs['dice']))\n\n # Log metrics.\n self.comet_ml.log_metrics(logs)\n _write_results(logs)\n\n @tf.function\n def train_step(self, images, labels):\n with tf.GradientTape() as tape:\n predictions = self.model.inference(images)\n loss = self.model.loss(labels, predictions)\n\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.model.optimiser.apply_gradients(\n zip(gradients, self.model.trainable_variables))\n\n self.training_loss(loss)\n self.training_dice(labels, predictions)\n\n @tf.function\n def test_step(self, images, labels):\n predictions = self.model.inference(images)\n t_loss = self.model.loss(labels, predictions)\n\n self.test_loss(t_loss)\n self.test_dice(labels, predictions)\n"
] | [
[
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
BaratiLab/GAMD | [
"7de91526f1c8c06ea005920e6a55c3cf031c26b2"
] | [
"dataset/generate_tip4p_data.py"
] | [
"from openmmtools import testsystems\nfrom simtk.openmm.app import *\nimport simtk.unit as unit\n\nimport logging\n\nimport numpy as np\n\nfrom openmmtools.constants import kB\nfrom openmmtools import respa, utils\n\nlogger = logging.getLogger(__name__)\n\n# Energy unit used by OpenMM unit system\nfrom openmmtools import states, integrators\nimport time\nimport numpy as np\nimport sys\nimport os\n\n\ndef get_rotation_matrix():\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n Nx3 array, original point clouds\n Return:\n Nx3 array, rotated point clouds\n \"\"\"\n angles = np.random.uniform(-1.0, 1.0, size=(3,)) * np.pi\n print(f'Using angle: {angles}')\n Rx = np.array([[1., 0, 0],\n [0, np.cos(angles[0]), -np.sin(angles[0])],\n [0, np.sin(angles[0]), np.cos(angles[0])]], dtype=np.float32)\n Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],\n [0, 1, 0],\n [-np.sin(angles[1]), 0, np.cos(angles[1])]], dtype=np.float32)\n Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],\n [np.sin(angles[2]), np.cos(angles[2]), 0],\n [0, 0, 1]], dtype=np.float32)\n rotation_matrix = np.matmul(Rz, np.matmul(Ry, Rx))\n\n return rotation_matrix\n\ndef center_positions(pos):\n offset = np.mean(pos, axis=0)\n return pos - offset, offset\n\n\nBOX_SCALE = 2\nDT = 2\nfor seed in range(10):\n print(f'Running seed: {seed}')\n\n waterbox = testsystems.WaterBox(\n box_edge=2 * unit.nanometers,\n model='tip4pew')\n [topology, system, positions] = [waterbox.topology, waterbox.system, waterbox.positions]\n\n R = get_rotation_matrix()\n positions = positions.value_in_unit(unit.angstrom)\n positions, off = center_positions(positions)\n positions = np.matmul(positions, R)\n positions += off\n positions += np.random.randn(positions.shape[0], positions.shape[1]) * 0.005\n positions *= unit.angstrom\n\n p_num = positions.shape[0] // 3\n timestep = DT * unit.femtoseconds\n temperature = 300 * unit.kelvin\n chain_length = 10\n friction = 1. / unit.picosecond\n num_mts = 5\n num_yoshidasuzuki = 5\n\n integrator = integrators.NoseHooverChainVelocityVerletIntegrator(system,\n temperature,\n friction,\n timestep, chain_length, num_mts, num_yoshidasuzuki)\n\n simulation = Simulation(topology, system, integrator)\n simulation.context.setPositions(positions)\n simulation.context.setVelocitiesToTemperature(temperature)\n\n simulation.minimizeEnergy(tolerance=1*unit.kilojoule/unit.mole)\n simulation.step(1)\n\n os.makedirs(f'./water_data_tip4p/', exist_ok=True)\n dataReporter_gt = StateDataReporter(f'./log_nvt_tip4p_{seed}.txt', 50, totalSteps=50000,\n step=True, time=True, speed=True, progress=True, elapsedTime=True, remainingTime=True,\n potentialEnergy=True, kineticEnergy=True, totalEnergy=True, temperature=True,\n separator='\\t')\n simulation.reporters.append(dataReporter_gt)\n for t in range(1000):\n if (t+1)%100 == 0:\n print(f'Finished {(t+1)*50} steps')\n state = simulation.context.getState(getPositions=True,\n getVelocities=True,\n getForces=True,\n enforcePeriodicBox=True)\n pos = state.getPositions(asNumpy=True).value_in_unit(unit.angstrom)\n vel = state.getVelocities(asNumpy=True).value_in_unit(unit.meter / unit.second)\n force = state.getForces(asNumpy=True).value_in_unit(unit.kilojoules_per_mole/unit.nanometer)\n\n np.savez(f'./water_data_tip4p/data_{seed}_{t}.npz',\n pos=pos,\n vel=vel,\n forces=force)\n simulation.step(50)\n\n\n"
] | [
[
"numpy.savez",
"numpy.matmul",
"numpy.cos",
"numpy.sin",
"numpy.mean",
"numpy.random.randn",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Intelligent-Systems-Lab/ISL-BCFL | [
"42ceb86708a76e28b31c22b33c15ee9a6a745ec7"
] | [
"script/app/agg.py"
] | [
"import os\n# import torch\nimport argparse\nimport base64\nimport sys\nimport io\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\ndef fullmodel2base64(model):\n buffer = io.BytesIO()\n torch.save(model, buffer)\n bg = buffer.getvalue()\n return base64.b64encode(bg).decode()\n\ndef base642fullmodel(modbase64):\n inputrpc = bytes(modbase64.encode())\n inputrpc_ = base64.b64decode(inputrpc)\n loadmodel = torch.load(io.BytesIO(inputrpc_))\n return loadmodel\n\n\nmodel_list = []\n\nf = open(sys.argv[1], \"r\")\n\nmodels = f.read().split(\",\")\n\nf.close()\n\nprint(models)\n\nfor m in models:\n model_list.append(base642fullmodel(m))\n\nnew_model_state = model_list[0].state_dict()\n\n#sum the weight of the model\nfor m in model_list[1:]:\n state_m = m.state_dict()\n for key in state_m:\n new_model_state[key] += state_m[key]\n\n#average the model weight\nfor key in new_model_state:\n new_model_state[key] /= len(model_list)\n\n\nnew_model = model_list[0]\nnew_model.load_state_dict(new_model_state)\n\noutput = fullmodel2base64(new_model)\n\nprint(output)\n"
] | [
[
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lanagarmire/granatumx | [
"3dee3a8fb2ba851c31a9f6338aef1817217769f9"
] | [
"g_packages/deepImpute/docker/deepimpute/deepimpute/multinet.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport binascii\nimport warnings\nimport tempfile\nfrom math import ceil\nfrom multiprocessing import cpu_count, sharedctypes\nfrom multiprocessing.pool import Pool\nfrom sklearn.metrics import r2_score\n\nfrom deepimpute.net import Net\nfrom deepimpute.normalizer import Normalizer\nfrom deepimpute.util import get_input_genes,get_target_genes\nfrom deepimpute.util import score_model\n\ndef newCoreInitializer(arr_to_populate):\n global sharedArray\n sharedArray = arr_to_populate\n\ndef trainNet(in_out, NN_param_i, data_i, labels):\n features, targets = in_out\n\n net = Net(**NN_param_i)\n net.fit(data_i, targetGenes=targets, predictorGenes=features, labels=labels)\n\n # retrieve the array\n params = list(NN_param_i.keys()) + ['targetGenes', 'NNid', 'predictorGenes']\n args2return = [(attr, getattr(net, attr)) for attr in params]\n return {k: v if k[0] != '_' else (k[1:], v) for k, v in args2return}\n\ndef predictNet(data_i, NN_param_i, labels):\n net = Net(**NN_param_i)\n data_i_ok = pd.DataFrame(np.reshape(data_i, list(map(len, labels))),\n index=labels[0], columns=labels[1])\n return net.predict(data_i_ok)\n\ndef trainOrPredict(args):\n in_out, NN_param_i, labels, mode = args\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n data_i = np.ctypeslib.as_array(sharedArray)\n if mode == \"predict\":\n return predictNet(data_i, NN_param_i, labels)\n return trainNet(in_out, NN_param_i, data_i, labels)\n\n\nclass MultiNet(object):\n def __init__(self, n_cores=4, predictorLimit=10, preproc='log_or_exp', runDir=os.path.join(tempfile.gettempdir(),'run'), seed=0, **NN_params):\n self._maxcores = n_cores\n self.predictorLimit = predictorLimit\n self.norm = Normalizer.fromName(preproc)\n self.runDir = runDir\n self.seed = seed\n self.NN_params = NN_params\n self.seed = seed\n self.NN_params['seed'] = seed\n\n if 'dims' not in self.NN_params.keys():\n self.NN_params['dims'] = [20,500]\n\n @property\n def maxcores(self):\n if self._maxcores == 'all':\n return cpu_count()\n else:\n return self._maxcores\n\n @maxcores.setter\n def maxcores(self, value):\n self._maxcores = value\n\n def get_params(self, deep=False):\n return self.__dict__\n\n def setIDandRundir(self,data):\n # set runID\n runID = binascii.b2a_hex(os.urandom(5))\n if type(runID) is bytes:\n runID = runID.decode()\n self.NN_params['runDir'] = os.path.join(self.runDir, str(runID))\n\n def getCores(self,NN_genes):\n n_runs = int(ceil(1.*len(NN_genes) / self.NN_params['dims'][1]))\n n_cores = min(self.maxcores, n_runs)\n self.NN_params['n_cores'] = max(1, int(self.maxcores / n_cores))\n return n_runs,n_cores\n\n def fit(self, data, NN_lim='auto', cell_subset=None):\n np.random.seed(seed=self.seed)\n\n df = pd.DataFrame(data)\n\n self.setIDandRundir(df)\n\n # Change the output dimension if the data has too few genes\n if df.shape[1] < self.NN_params['dims'][1]:\n self.NN_params['dims'][1] = df.shape[1]\n\n # Choose genes to impute\n genes_sort = df.quantile(.99).sort_values(ascending=False)\n NN_genes = get_target_genes(genes_sort,NN_lim=NN_lim)\n\n df_to_impute = df[NN_genes]\n\n n_runs,n_cores = self.getCores(NN_genes)\n\n\n # ------------------------# Subnetworks #------------------------#\n\n predictors = np.intersect1d(genes_sort.index[genes_sort>self.predictorLimit], NN_genes)\n print('Using {} genes as potential predictors'.format(len(predictors)))\n\n n_choose = int(len(NN_genes)/self.NN_params['dims'][1])\n\n subGenelists = np.random.choice(NN_genes,\n [n_choose, self.NN_params['dims'][1]],\n replace=False).tolist()\n if n_choose < n_runs:\n # Special case: for the last run, the output layer will have less nodes\n selectedGenes = np.reshape(subGenelists, -1)\n subGenelists.append(np.setdiff1d(NN_genes, selectedGenes).tolist())\n\n # ------------------------# Extracting input genes #------------------------#\n\n corrMatrix = 1 - np.abs(pd.DataFrame(np.corrcoef(df_to_impute.T),\n index=NN_genes, columns=NN_genes)[predictors])\n\n in_out_genes = get_input_genes(df_to_impute,self.NN_params['dims'],distanceMatrix=corrMatrix,\n targets=subGenelists,predictorLimit=self.predictorLimit)\n\n # ------------------------# Subsets for fitting #------------------------#\n\n n_cells = df_to_impute.shape[0]\n\n if type(cell_subset) is float or cell_subset == 1:\n n_cells = int(cell_subset * n_cells)\n\n elif type(cell_subset) is int:\n n_cells = cell_subset\n\n self.trainCells = df_to_impute.sample(n_cells,replace=False).index\n\n print('Starting training with {} cells ({:.1%}) on {} threads ({} cores/thread).'.\n format(n_cells, 1.*n_cells/df_to_impute.shape[0], n_cores, self.NN_params['n_cores']))\n\n # -------------------# Preprocessing (if any) #--------------------#\n\n df_to_impute = self.norm.fit(df_to_impute).transform(df_to_impute)\n\n # -------------------# Share matrix between subprocesses #--------------------#\n\n ''' Create memory chunk and put the matrix in it '''\n idx, cols = self.trainCells, df_to_impute.columns\n trainData = df_to_impute.loc[self.trainCells, :].values\n\n ''' Parallelize process with shared array '''\n childJobs = [(in_out, self.NN_params, (idx, cols), 'train')\n for in_out in in_out_genes]\n\n output_dicts = self.runOnMultipleCores(n_cores, trainData.flatten(), childJobs)\n\n self.networks = []\n for dictionnary in output_dicts:\n self.networks.append(Net(**dictionnary))\n\n return self\n\n def runOnMultipleCores(self, cores, data, childJobs):\n sharedArray = sharedctypes.RawArray('d', data)\n\n pool = Pool(processes=cores, initializer=newCoreInitializer, initargs=(sharedArray,))\n output_dicts = pool.map(trainOrPredict, childJobs)\n pool.close()\n pool.join()\n return output_dicts\n\n\n def predict(self, data, imputed_only=False, restore_pos_values=True):\n\n df = pd.DataFrame(data)\n\n ''' Create memory chunk and put the matrix in it '''\n idx, cols = df.index, df.columns\n df_norm = self.norm.fit(df).transform(df).values.flatten()\n\n ''' Parallelize process with shared array '''\n childJobs = [((12, 15), net.__dict__, (idx, cols), 'predict')\n for net in self.networks]\n\n output_dicts = self.runOnMultipleCores(self.maxcores, df_norm, childJobs)\n\n Y_imputed = pd.concat(output_dicts, axis=1)\n Y_not_imputed = df[[gene for gene in df.columns if gene not in Y_imputed.columns]]\n Y_total = self.norm.transform(pd.concat([Y_imputed, Y_not_imputed], axis=1)[df.columns],\n rev=True)\n if restore_pos_values:\n Y_total = Y_total.mask(df>0,df)\n if imputed_only:\n Y_total = Y_total[Y_imputed.columns]\n\n if type(data) == type(pd.DataFrame()):\n return Y_total\n else:\n return Y_total.values\n\n def score(self, data, metric=r2_score):\n imputedGenes = list(zip(*[ net.targetGenes for net in self.networks ]))\n return score_model(self,pd.DataFrame(data),metric=r2_score, cols=imputedGenes)\n"
] | [
[
"pandas.concat",
"numpy.random.seed",
"numpy.random.choice",
"numpy.ctypeslib.as_array",
"numpy.reshape",
"pandas.DataFrame",
"numpy.setdiff1d",
"numpy.intersect1d",
"numpy.corrcoef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
XinChCh/singa | [
"93fd9da72694e68bfe3fb29d0183a65263d238a1"
] | [
"test/python/test_tensor.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# =============================================================================\nfrom __future__ import division\n\nimport math\nimport unittest\nimport random\nimport numpy as np\n\nfrom singa import tensor\nfrom singa import singa_wrap as singa_api\nfrom singa import autograd\n\nfrom cuda_helper import gpu_dev, cpu_dev\n\n\nclass TestTensorMethods(unittest.TestCase):\n\n def setUp(self):\n self.shape = (2, 3)\n self.t = tensor.Tensor(self.shape)\n self.s = tensor.Tensor(self.shape)\n self.t.set_value(0)\n self.s.set_value(0)\n\n def test_tensor_fields(self):\n t = self.t\n shape = self.shape\n self.assertTupleEqual(t.shape, shape)\n self.assertEqual(t.shape[0], shape[0])\n self.assertEqual(t.shape[1], shape[1])\n self.assertEqual(tensor.product(shape), 2 * 3)\n self.assertEqual(t.ndim(), 2)\n self.assertEqual(t.size(), 2 * 3)\n self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))\n self.assertFalse(t.is_transpose())\n\n def test_unary_operators(self):\n t = self.t\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)\n t += 1.23\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)\n t -= 0.23\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)\n t *= 2.5\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)\n t /= 2\n self.assertAlmostEqual(\n tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)\n\n def test_binary_operators(self):\n t = self.t\n t += 3.2\n s = self.s\n s += 2.1\n a = t + s\n self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)\n a = t - s\n self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)\n a = t * s\n self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)\n ''' not implemented yet\n a = t / s\n self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)\n '''\n\n def test_comparison_operators(self):\n t = self.t\n t += 3.45\n a = t < 3.45\n self.assertEqual(tensor.to_numpy(a)[0, 0], 0)\n a = t <= 3.45\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n a = t > 3.45\n self.assertEqual(tensor.to_numpy(a)[0, 0], 0)\n a = t >= 3.45\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n a = t == 3.45\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n a = tensor.lt(t, 3.45)\n self.assertEqual(tensor.to_numpy(a)[0, 0], 0)\n a = tensor.le(t, 3.45)\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n a = tensor.gt(t, 3.45)\n self.assertEqual(tensor.to_numpy(a)[0, 0], 0)\n a = tensor.ge(t, 3.45)\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n a = tensor.eq(t, 3.45)\n self.assertEqual(tensor.to_numpy(a)[0, 0], 1)\n\n def test_tensor_copy(self):\n t = tensor.Tensor((2, 3))\n t += 1.23\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)\n tc = t.copy()\n tdc = t.deepcopy()\n self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)\n self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)\n t += 1.23\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)\n self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)\n self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)\n\n def test_copy_data(self):\n t = self.t\n t += 1.23\n s = self.s\n s += 5.43\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)\n tensor.copy_data_to_from(t, s, 2)\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)\n self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)\n\n def test_global_method(self):\n t = self.t\n t += 12.34\n a = tensor.log(t)\n self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))\n\n def test_random(self):\n x = tensor.Tensor((1000,))\n x.gaussian(1, 0.01)\n self.assertAlmostEqual(tensor.average(x), 1, 3)\n\n def test_radd(self):\n x = tensor.Tensor((3,))\n x.set_value(1)\n y = 1 + x\n self.assertEqual(tensor.average(y), 2.)\n\n def test_rsub(self):\n x = tensor.Tensor((3,))\n x.set_value(1)\n y = 1 - x\n self.assertEqual(tensor.average(y), 0.)\n\n def test_rmul(self):\n x = tensor.Tensor((3,))\n x.set_value(1)\n y = 2 * x\n self.assertEqual(tensor.average(y), 2.)\n\n def test_rdiv(self):\n x = tensor.Tensor((3,))\n x.set_value(1)\n y = 2 / x\n self.assertEqual(tensor.average(y), 2.)\n\n def matmul_high_dim_helper(self, dev):\n configs = [\n [(1, 12, 7, 64), (1, 12, 64, 7)],\n [(1, 7, 768), (768, 768)],\n ]\n print()\n for config in configs:\n X = np.random.random(config[0]).astype(np.float32)\n x = tensor.from_numpy(X)\n x.to_device(dev)\n\n W = np.random.random(config[1]).astype(np.float32)\n w = tensor.from_numpy(W)\n w.to_device(dev)\n\n y_t = np.matmul(X, W)\n y = autograd.matmul(x, w)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)\n\n def test_matmul_high_dim_cpu(self):\n self.matmul_high_dim_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_matmul_high_dim_gpu(self):\n self.matmul_high_dim_helper(gpu_dev)\n\n def test_tensor_inplace_api(self):\n \"\"\" tensor inplace methods alter internal state and also return self\n \"\"\"\n x = tensor.Tensor((3,))\n y = x.set_value(1)\n self.assertTrue(y is x)\n\n x = tensor.Tensor((3,))\n y = x.uniform(1, 2)\n self.assertTrue(y is x)\n\n x = tensor.Tensor((3,))\n y = x.bernoulli(1)\n self.assertTrue(y is x)\n\n x = tensor.Tensor((3,))\n y = x.gaussian(1, 2)\n self.assertTrue(y is x)\n\n def test_numpy_convert(self):\n a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)\n t = tensor.from_numpy(a)\n b = tensor.to_numpy(t)\n self.assertEqual(np.sum(a - b), 0)\n\n a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)\n t = tensor.from_numpy(a)\n b = tensor.to_numpy(t)\n self.assertEqual(np.sum(a - b), 0.)\n\n def test_transpose(self):\n a = np.array(\n [1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])\n a = np.reshape(a, (2, 3, 2))\n ta = tensor.from_numpy(a)\n\n A1 = np.transpose(a)\n tA1 = tensor.transpose(ta)\n TA1 = tensor.to_numpy(tA1)\n A2 = np.transpose(a, [0, 2, 1])\n tA2 = tensor.transpose(ta, [0, 2, 1])\n TA2 = tensor.to_numpy(tA2)\n\n np.testing.assert_array_almost_equal(TA1, A1)\n np.testing.assert_array_almost_equal(TA2, A2)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_gpu_6d_transpose(self,dev=gpu_dev):\n s0 = (2,3,4,5,6,7)\n axes1=[5,4,3,2,1,0]\n s1 = (2,7,6,5,4,3)\n s2 = (2,4,3,5,7,6)\n a = np.random.random(s1)\n\n ta = tensor.from_numpy(a)\n ta.to_device(dev)\n\n ta = tensor.reshape(ta,s1)\n ta = tensor.transpose(ta,axes1)\n ta = tensor.reshape(ta,s2)\n\n a = np.reshape(a,s1)\n a = np.transpose(a,axes1)\n a = np.reshape(a,s2)\n\n np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)\n\n def test_einsum(self):\n\n a = np.array(\n [1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])\n a = np.reshape(a, (2, 3, 2))\n ta = tensor.from_numpy(a)\n\n res1 = np.einsum('kij,kij->kij', a, a)\n tres1 = tensor.einsum('kij,kij->kij', ta, ta)\n Tres1 = tensor.to_numpy(tres1)\n res2 = np.einsum('kij,kih->kjh', a, a)\n tres2 = tensor.einsum('kij,kih->kjh', ta, ta)\n Tres2 = tensor.to_numpy(tres2)\n\n self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)\n self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)\n\n def test_repeat(self):\n\n a = np.array(\n [1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])\n a = np.reshape(a, (2, 3, 2))\n ta = tensor.from_numpy(a)\n\n ta_repeat1 = tensor.repeat(ta, 2, axis=None)\n a_repeat1 = np.repeat(a, 2, axis=None)\n Ta_repeat1 = tensor.to_numpy(ta_repeat1)\n ta_repeat2 = tensor.repeat(ta, 4, axis=1)\n a_repeat2 = np.repeat(a, 4, axis=1)\n Ta_repeat2 = tensor.to_numpy(ta_repeat2)\n\n self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)\n self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)\n\n def test_sum(self):\n a = np.array(\n [1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])\n a = np.reshape(a, (2, 3, 2))\n ta = tensor.from_numpy(a)\n\n a_sum0 = np.sum(a)\n ta_sum0 = tensor.sum(ta)\n Ta_sum0 = tensor.to_numpy(ta_sum0)\n a_sum1 = np.sum(a, axis=1)\n ta_sum1 = tensor.sum(ta, axis=1)\n Ta_sum1 = tensor.to_numpy(ta_sum1)\n a_sum2 = np.sum(a, axis=2)\n ta_sum2 = tensor.sum(ta, axis=2)\n Ta_sum2 = tensor.to_numpy(ta_sum2)\n\n self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)\n self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)\n self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)\n\n def test_tensordot(self):\n a = np.array(\n [1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])\n a = np.reshape(a, (2, 3, 2))\n\n ta = tensor.from_numpy(a)\n\n res1 = np.tensordot(a, a, axes=1)\n tres1 = tensor.tensordot(ta, ta, axes=1)\n Tres1 = tensor.to_numpy(tres1)\n self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)\n np.testing.assert_array_almost_equal(Tres1, res1)\n\n res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))\n tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))\n np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)\n\n def test_reshape(self):\n a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],\n [[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])\n ta = tensor.from_numpy(a)\n tb = tensor.reshape(ta, [2, 6])\n self.assertAlmostEqual(tb.shape[0], 2., places=3)\n self.assertAlmostEqual(tb.shape[1], 6., places=3)\n np.testing.assert_array_almost_equal(tensor.to_numpy(tb),\n a.reshape((2, 6)))\n\n def test_transpose_then_reshape(self):\n a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],\n [[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])\n TRANSPOSE_AXES = (2, 0, 1)\n RESHAPE_DIMS = (2, 6)\n\n ta = tensor.from_numpy(a)\n ta = ta.transpose(TRANSPOSE_AXES)\n ta = ta.reshape(RESHAPE_DIMS)\n\n np.testing.assert_array_almost_equal(\n tensor.to_numpy(ta),\n np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))\n\n def _concatenate_helper(self, dev):\n np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)\n np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)\n np3 = np.concatenate((np1, np2), axis=3)\n\n t1 = tensor.Tensor(device=dev, data=np1)\n t2 = tensor.Tensor(device=dev, data=np2)\n\n t3 = tensor.concatenate((t1, t2), 3)\n\n np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)\n\n def test_concatenate_cpu(self):\n self._concatenate_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_concatenate_gpu(self):\n self._concatenate_helper(gpu_dev)\n\n def _subscription_helper(self, dev):\n np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)\n sg_tensor = tensor.Tensor(device=dev, data=np1)\n sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]\n np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),\n np1[1:3, :, 1:, :-1])\n\n def test_subscription_cpu(self):\n self._subscription_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_subscription_gpu(self):\n self._subscription_helper(gpu_dev)\n\n def _ceil_helper(self, dev):\n\n np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)\n np1 = np1 * 10\n np2 = np.ceil(np1)\n\n t1 = tensor.Tensor(device=dev, data=np1)\n\n t2 = tensor.ceil(t1)\n\n np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)\n\n def test_ceil_cpu(self):\n self._ceil_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_ceil_gpu(self):\n self._ceil_helper(gpu_dev)\n\n def _astype_helper(self, dev):\n shape1 = [2, 3]\n shape2 = [3, 2]\n\n np_flt = np.random.random(shape1).astype(np.float32)\n np_flt = np_flt * 10 - 5\n\n np_int = np_flt.astype(np.int32)\n np_flt2 = np_int.astype(np.float32)\n\n t2 = tensor.Tensor(device=dev, data=np_flt)\n t2 = t2.as_type('int')\n np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)\n\n t1 = t2.reshape(shape2)\n np.testing.assert_array_almost_equal(tensor.to_numpy(t1),\n np_int.reshape(shape2))\n\n t1 = t1.as_type('float')\n np.testing.assert_array_almost_equal(tensor.to_numpy(t1),\n np_flt2.reshape(shape2))\n\n def test_astype_cpu(self):\n self._astype_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_astype_gpu(self):\n self._astype_helper(gpu_dev)\n\n def _3d_matmul_helper(self, dev):\n np_x1 = np.random.randn(2, 3, 4).astype(np.float32)\n np_x2 = np.random.randn(2, 4, 3).astype(np.float32)\n x1 = tensor.from_numpy(np_x1)\n x1.to_device(dev)\n x2 = tensor.from_numpy(np_x2)\n x2.to_device(dev)\n y = autograd.matmul(x1, x2)\n np_y = np.matmul(np_x1, np_x2)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)\n\n np_x1 = np.random.randn(2, 3, 4).astype(np.float32)\n np_x2 = np.random.randn(2, 4, 5).astype(np.float32)\n x1 = tensor.from_numpy(np_x1)\n x1.to_device(dev)\n x2 = tensor.from_numpy(np_x2)\n x2.to_device(dev)\n y = autograd.matmul(x1, x2)\n np_y = np.matmul(np_x1, np_x2)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)\n\n def test_3d_matmul_cpu(self):\n self._3d_matmul_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_3d_matmul_gpu(self):\n self._3d_matmul_helper(gpu_dev)\n\n def _4d_matmul_helper(self, dev):\n np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)\n np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)\n x1 = tensor.from_numpy(np_x1)\n x1.to_device(dev)\n x2 = tensor.from_numpy(np_x2)\n x2.to_device(dev)\n y = autograd.matmul(x1, x2)\n np_y = np.matmul(np_x1, np_x2)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)\n\n np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)\n np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)\n x1 = tensor.from_numpy(np_x1)\n x1.to_device(dev)\n x2 = tensor.from_numpy(np_x2)\n x2.to_device(dev)\n y = autograd.matmul(x1, x2)\n np_y = np.matmul(np_x1, np_x2)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)\n\n def test_4d_matmul_cpu(self):\n self._4d_matmul_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_4d_matmul_gpu(self):\n self._4d_matmul_helper(gpu_dev)\n\n def _matmul_transpose_helper(self, dev):\n\n X = np.random.random((1, 256, 12, 64)).astype(np.float32)\n x = tensor.from_numpy(X)\n x.to_device(dev)\n\n W = np.random.random((1, 256, 12, 64)).astype(np.float32)\n w = tensor.from_numpy(W)\n w.to_device(dev)\n\n X = np.transpose(X, (0, 2, 1, 3))\n W = np.transpose(W, (0, 2, 1, 3))\n W = np.transpose(W, (0, 1, 3, 2))\n Y = np.matmul(X, W)\n\n x = autograd.transpose(x, (0, 2, 1, 3))\n w = autograd.transpose(w, (0, 2, 1, 3))\n w = autograd.transpose(w, (0, 1, 3, 2))\n y = autograd.matmul(x, w)\n\n np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)\n np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)\n\n def test_matmul_transpose_cpu(self):\n self._matmul_transpose_helper(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_matmul_transpose_gpu(self):\n self._matmul_transpose_helper(gpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_gaussian_gpu(self, dev=gpu_dev):\n x = tensor.Tensor((3, 5, 3, 5), device=dev)\n x.gaussian(0, 1)\n x = tensor.Tensor((4, 5, 3, 2), device=dev)\n x.gaussian(0, 1)\n\n def _kfloat32_int(self, dev=gpu_dev):\n np.random.seed(0)\n x_val = np.random.random((2, 3)).astype(np.float32) * 10\n x = tensor.from_numpy(x_val)\n x.to_device(dev)\n scalar = np.random.random((1,))[0] * 100\n y = x + scalar\n self.assertEqual(y.dtype, tensor.float32)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_kfloat32_int_gpu(self):\n self._kfloat32_int(gpu_dev)\n\n def test_kfloat32_int_cpu(self):\n self._kfloat32_int(cpu_dev)\n\n def _kint_float(self, dev=gpu_dev):\n np.random.seed(0)\n x_val = np.random.randint(0, 10, (2, 3))\n x = tensor.from_numpy(x_val)\n x.to_device(dev)\n scalar = random.random() * 100\n y = x + scalar\n self.assertEqual(y.dtype, tensor.float32)\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_kint_float_gpu(self):\n self._kint_float(gpu_dev)\n\n def test_kint_float_cpu(self):\n self._kint_float(cpu_dev)\n\n def _kint_kint(self, dev=gpu_dev):\n a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],\n [1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],\n [[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],\n [1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],\n [[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],\n [-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],\n dtype=np.int32)\n b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],\n [7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],\n [[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],\n [-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],\n [[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],\n [18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],\n dtype=np.int32)\n ta = tensor.from_numpy(a_np)\n tb = tensor.from_numpy(b_np)\n ta.to_device(dev)\n tb.to_device(dev)\n y = ta - tb\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)\n\n def test_kint_kint_cpu(self, dev=cpu_dev):\n self._kint_kint(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_kint_kint_gpu(self, dev=gpu_dev):\n self._kint_kint(gpu_dev)\n\n def _kint_kint_bc(self, dev=gpu_dev):\n a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],\n [1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],\n [[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],\n [1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],\n [[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],\n [-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],\n dtype=np.int32)\n b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],\n [7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],\n dtype=np.int32)\n ta = tensor.from_numpy(a_np)\n tb = tensor.from_numpy(b_np)\n ta.to_device(dev)\n tb.to_device(dev)\n y = ta - tb\n np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)\n\n def test_kint_kint_bc_cpu(self, dev=cpu_dev):\n self._kint_kint_bc(cpu_dev)\n\n @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')\n def test_kint_kint_bc_gpu(self, dev=gpu_dev):\n self._kint_kint_bc(gpu_dev)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.einsum",
"numpy.random.seed",
"numpy.asarray",
"numpy.reshape",
"numpy.matmul",
"numpy.concatenate",
"numpy.ceil",
"numpy.random.randint",
"numpy.tensordot",
"numpy.random.randn",
"numpy.transpose",
"numpy.repeat",
"numpy.array",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyperfraise/action-detection | [
"a3ee263ed701ed251cd0a79830ef796889ff366e"
] | [
"ssn_dataset.py"
] | [
"import torch.utils.data as data\n\nimport os\nimport os.path\nfrom numpy.random import randint\nfrom ops.io import load_proposal_file\nfrom transforms import *\nfrom ops.utils import temporal_iou\n\n\nclass SSNInstance:\n def __init__(\n self,\n start_frame,\n end_frame,\n video_frame_count,\n fps=1,\n label=None,\n best_iou=None,\n overlap_self=None,\n ):\n self.start_frame = start_frame\n self.end_frame = min(end_frame, video_frame_count)\n self._label = label\n self.fps = fps\n\n self.coverage = (end_frame - start_frame) / video_frame_count\n\n self.best_iou = best_iou\n self.overlap_self = overlap_self\n\n self.loc_reg = None\n self.size_reg = None\n\n def compute_regression_targets(self, gt_list, fg_thresh):\n if self.best_iou < fg_thresh:\n # background proposals do not need this\n return\n\n # find the groundtruth instance with the highest IOU\n ious = [\n temporal_iou(\n (self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)\n )\n for gt in gt_list\n ]\n best_gt_id = np.argmax(ious)\n\n best_gt = gt_list[best_gt_id]\n\n prop_center = (self.start_frame + self.end_frame) / 2\n gt_center = (best_gt.start_frame + best_gt.end_frame) / 2\n\n prop_size = self.end_frame - self.start_frame + 1\n gt_size = best_gt.end_frame - best_gt.start_frame + 1\n\n # get regression target:\n # (1). center shift propotional to the proposal duration\n # (2). logarithm of the groundtruth duration over proposal duraiton\n\n self.loc_reg = (gt_center - prop_center) / prop_size\n try:\n self.size_reg = math.log(gt_size / prop_size)\n except:\n print((gt_size, prop_size, self.start_frame, self.end_frame))\n raise\n\n @property\n def start_time(self):\n return self.start_frame / self.fps\n\n @property\n def end_time(self):\n return self.end_frame / self.fps\n\n @property\n def label(self):\n return self._label if self._label is not None else -1\n\n @property\n def regression_targets(self):\n return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]\n\n\nclass SSNVideoRecord:\n def __init__(self, prop_record):\n self._data = prop_record\n\n frame_count = int(self._data[1])\n\n # build instance record\n self.gt = [\n SSNInstance(\n int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0\n )\n for x in self._data[2]\n if int(x[2]) > int(x[1])\n ]\n\n self.gt = list([x for x in self.gt if x.start_frame < frame_count])\n\n self.proposals = [\n SSNInstance(\n int(x[3]),\n int(x[4]),\n frame_count,\n label=int(x[0]),\n best_iou=float(x[1]),\n overlap_self=float(x[2]),\n )\n for x in self._data[3]\n if int(x[4]) > int(x[3])\n ]\n\n self.proposals = list(\n [x for x in self.proposals if x.start_frame < frame_count]\n )\n\n @property\n def id(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n\n def get_fg(self, fg_thresh, with_gt=True):\n fg = [p for p in self.proposals if p.best_iou > fg_thresh]\n if with_gt:\n fg.extend(self.gt)\n\n for x in fg:\n x.compute_regression_targets(self.gt, fg_thresh)\n return fg\n\n def get_negatives(\n self,\n incomplete_iou_thresh,\n bg_iou_thresh,\n bg_coverage_thresh=0.01,\n incomplete_overlap_thresh=0.7,\n ):\n\n tag = [0] * len(self.proposals)\n\n incomplete_props = []\n background_props = []\n\n for i in range(len(tag)):\n if (\n self.proposals[i].best_iou < incomplete_iou_thresh\n and self.proposals[i].overlap_self > incomplete_overlap_thresh\n ):\n tag[i] = 1 # incomplete\n incomplete_props.append(self.proposals[i])\n\n for i in range(len(tag)):\n if (\n tag[i] == 0\n and self.proposals[i].best_iou < bg_iou_thresh\n and self.proposals[i].coverage > bg_coverage_thresh\n ):\n background_props.append(self.proposals[i])\n return incomplete_props, background_props\n\n\nclass SSNDataSet(data.Dataset):\n def __init__(\n self,\n root_path,\n prop_file=None,\n body_seg=5,\n aug_seg=2,\n video_centric=True,\n new_length=1,\n modality=\"RGB\",\n image_tmpl=\"img_{:05d}.jpg\",\n transform=None,\n random_shift=True,\n test_mode=False,\n prop_per_video=8,\n fg_ratio=1,\n bg_ratio=1,\n incomplete_ratio=6,\n fg_iou_thresh=0.7,\n bg_iou_thresh=0.01,\n incomplete_iou_thresh=0.3,\n bg_coverage_thresh=0.02,\n incomplete_overlap_thresh=0.7,\n gt_as_fg=True,\n reg_stats=None,\n test_interval=6,\n verbose=True,\n exclude_empty=True,\n epoch_multiplier=1,\n ):\n\n self.root_path = root_path\n self.prop_file = prop_file\n self.verbose = verbose\n\n self.body_seg = body_seg\n self.aug_seg = aug_seg\n self.video_centric = video_centric\n self.exclude_empty = exclude_empty\n self.epoch_multiplier = epoch_multiplier\n\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.test_interval = test_interval\n\n self.fg_iou_thresh = fg_iou_thresh\n self.incomplete_iou_thresh = incomplete_iou_thresh\n self.bg_iou_thresh = bg_iou_thresh\n\n self.bg_coverage_thresh = bg_coverage_thresh\n self.incomplete_overlap_thresh = incomplete_overlap_thresh\n\n self.starting_ratio = 0.5\n self.ending_ratio = 0.5\n\n self.gt_as_fg = gt_as_fg\n\n denum = fg_ratio + bg_ratio + incomplete_ratio\n\n self.fg_per_video = int(prop_per_video * (fg_ratio / denum))\n self.bg_per_video = int(prop_per_video * (bg_ratio / denum))\n self.incomplete_per_video = (\n prop_per_video - self.fg_per_video - self.bg_per_video\n )\n\n self._parse_prop_file(stats=reg_stats)\n\n def _load_image(self, directory, idx):\n if self.modality == \"RGB\" or self.modality == \"RGBDiff\":\n return [\n Image.open(\n os.path.join(directory, self.image_tmpl.format(idx))\n ).convert(\"RGB\")\n ]\n elif self.modality == \"Flow\":\n x_img = Image.open(\n os.path.join(directory, self.image_tmpl.format(\"x\", idx))\n ).convert(\"L\")\n y_img = Image.open(\n os.path.join(directory, self.image_tmpl.format(\"y\", idx))\n ).convert(\"L\")\n\n return [x_img, y_img]\n\n def _parse_prop_file(self, stats=None):\n prop_info = load_proposal_file(self.prop_file)\n\n self.video_list = [SSNVideoRecord(p) for p in prop_info]\n\n if self.exclude_empty:\n self.video_list = list([x for x in self.video_list if len(x.gt) > 0])\n\n self.video_dict = {v.id: v for v in self.video_list}\n\n # construct three pools:\n # 1. Foreground\n # 2. Background\n # 3. Incomplete\n\n self.fg_pool = []\n self.bg_pool = []\n self.incomp_pool = []\n\n for v in self.video_list:\n self.fg_pool.extend(\n [(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]\n )\n\n incomp, bg = v.get_negatives(\n self.incomplete_iou_thresh,\n self.bg_iou_thresh,\n self.bg_coverage_thresh,\n self.incomplete_overlap_thresh,\n )\n\n self.incomp_pool.extend([(v.id, prop) for prop in incomp])\n self.bg_pool.extend([(v.id, prop) for prop in bg])\n\n if stats is None:\n self._compute_regresssion_stats()\n else:\n self.stats = stats\n\n if self.verbose:\n print(\n (\n \"\"\"\n \n SSNDataset: Proposal file {prop_file} parsed.\n \n There are {pnum} usable proposals from {vnum} videos.\n {fnum} foreground proposals\n {inum} incomplete_proposals\n {bnum} background_proposals\n \n Sampling config:\n FG/BG/INC: {fr}/{br}/{ir}\n Video Centric: {vc}\n \n Epoch size multiplier: {em}\n \n Regression Stats:\n Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}\n Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}\n \"\"\".format(\n prop_file=self.prop_file,\n pnum=len(self.fg_pool)\n + len(self.bg_pool)\n + len(self.incomp_pool),\n fnum=len(self.fg_pool),\n inum=len(self.incomp_pool),\n bnum=len(self.bg_pool),\n fr=self.fg_per_video,\n br=self.bg_per_video,\n ir=self.incomplete_per_video,\n vnum=len(self.video_dict),\n vc=self.video_centric,\n stats=self.stats,\n em=self.epoch_multiplier,\n )\n )\n )\n else:\n print(\n (\n \"\"\"\n SSNDataset: Proposal file {prop_file} parsed. \n \"\"\".format(\n prop_file=self.prop_file\n )\n )\n )\n\n def _video_centric_sampling(self, video):\n\n fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)\n incomp, bg = video.get_negatives(\n self.incomplete_iou_thresh,\n self.bg_iou_thresh,\n self.bg_coverage_thresh,\n self.incomplete_overlap_thresh,\n )\n\n def sample_video_proposals(\n proposal_type, video_id, video_pool, requested_num, dataset_pool\n ):\n if len(video_pool) == 0:\n # if there is nothing in the video pool, go fetch from the dataset pool\n return [\n (dataset_pool[x], proposal_type)\n for x in np.random.choice(\n len(dataset_pool), requested_num, replace=False\n )\n ]\n else:\n replicate = len(video_pool) < requested_num\n idx = np.random.choice(\n len(video_pool), requested_num, replace=replicate\n )\n return [((video_id, video_pool[x]), proposal_type) for x in idx]\n\n out_props = []\n out_props.extend(\n sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)\n ) # sample foreground\n out_props.extend(\n sample_video_proposals(\n 1, video.id, incomp, self.incomplete_per_video, self.incomp_pool\n )\n ) # sample incomp.\n out_props.extend(\n sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)\n ) # sample background\n\n return out_props\n\n def _random_sampling(self):\n out_props = []\n\n out_props.extend(\n [\n (x, 0)\n for x in np.random.choice(\n self.fg_pool, self.fg_per_video, replace=False\n )\n ]\n )\n out_props.extend(\n [\n (x, 1)\n for x in np.random.choice(\n self.incomp_pool, self.incomplete_per_video, replace=False\n )\n ]\n )\n out_props.extend(\n [\n (x, 2)\n for x in np.random.choice(\n self.bg_pool, self.bg_per_video, replace=False\n )\n ]\n )\n\n return out_props\n\n def _sample_indices(self, valid_length, num_seg):\n \"\"\"\n\n :param record: VideoRecord\n :return: list\n \"\"\"\n\n average_duration = (valid_length + 1) // num_seg\n if average_duration > 0:\n # normal cases\n offsets = np.multiply(list(range(num_seg)), average_duration) + randint(\n average_duration, size=num_seg\n )\n elif valid_length > num_seg:\n offsets = np.sort(randint(valid_length, size=num_seg))\n else:\n offsets = np.zeros((num_seg,))\n\n return offsets\n\n def _get_val_indices(self, valid_length, num_seg):\n\n if valid_length > num_seg:\n tick = valid_length / float(num_seg)\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])\n else:\n offsets = np.zeros((num_seg,))\n\n return offsets\n\n def _sample_ssn_indices(self, prop, frame_cnt):\n start_frame = prop.start_frame + 1\n end_frame = prop.end_frame\n\n duration = end_frame - start_frame + 1\n assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)\n valid_length = duration - self.new_length\n\n valid_starting = max(1, start_frame - int(duration * self.starting_ratio))\n valid_ending = min(\n frame_cnt - self.new_length + 1,\n end_frame + int(duration * self.ending_ratio),\n )\n\n valid_starting_length = start_frame - valid_starting - self.new_length + 1\n valid_ending_length = valid_ending - end_frame - self.new_length + 1\n\n starting_scale = (valid_starting_length + self.new_length - 1) / (\n duration * self.starting_ratio\n )\n ending_scale = (valid_ending_length + self.new_length - 1) / (\n duration * self.ending_ratio\n )\n\n # get starting\n starting_offsets = (\n self._sample_indices(valid_starting_length, self.aug_seg)\n if self.random_shift\n else self._get_val_indices(valid_starting_length, self.aug_seg)\n ) + valid_starting\n course_offsets = (\n self._sample_indices(valid_length, self.body_seg)\n if self.random_shift\n else self._get_val_indices(valid_length, self.body_seg)\n ) + start_frame\n ending_offsets = (\n self._sample_indices(valid_ending_length, self.aug_seg)\n if self.random_shift\n else self._get_val_indices(valid_ending_length, self.aug_seg)\n ) + end_frame\n\n offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))\n stage_split = [\n self.aug_seg,\n self.aug_seg + self.body_seg,\n self.aug_seg * 2 + self.body_seg,\n ]\n return offsets, starting_scale, ending_scale, stage_split\n\n def _load_prop_data(self, prop):\n\n # read frame count\n frame_cnt = self.video_dict[prop[0][0]].num_frames\n\n # sample segment indices\n prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(\n prop[0][1], frame_cnt\n )\n\n # turn prop into standard format\n\n # get label\n if prop[1] == 0:\n label = prop[0][1].label\n elif prop[1] == 1:\n label = prop[0][1].label # incomplete\n elif prop[1] == 2:\n label = 0 # background\n else:\n raise ValueError()\n frames = []\n for idx, seg_ind in enumerate(prop_indices):\n p = int(seg_ind)\n for x in range(self.new_length):\n frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))\n\n # get regression target\n if prop[1] == 0:\n reg_targets = prop[0][1].regression_targets\n reg_targets = (\n (reg_targets[0] - self.stats[0][0]) / self.stats[1][0],\n (reg_targets[1] - self.stats[0][1]) / self.stats[1][1],\n )\n else:\n reg_targets = (0.0, 0.0)\n\n return (\n frames,\n label,\n reg_targets,\n starting_scale,\n ending_scale,\n stage_split,\n prop[1],\n )\n\n def _compute_regresssion_stats(self):\n if self.verbose:\n print(\"computing regression target normalizing constants\")\n targets = []\n for video in self.video_list:\n fg = video.get_fg(self.fg_iou_thresh, False)\n for p in fg:\n targets.append(list(p.regression_targets))\n\n self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))\n\n def get_test_data(self, video, test_interval, gen_batchsize=4):\n props = video.proposals\n video_id = video.id\n frame_cnt = video.num_frames\n frame_ticks = (\n np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1\n )\n\n num_sampled_frames = len(frame_ticks)\n\n # avoid empty proposal list\n if len(props) == 0:\n props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))\n\n # process proposals to subsampled sequences\n rel_prop_list = []\n proposal_tick_list = []\n scaling_list = []\n for proposal in props:\n rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt\n rel_duration = rel_prop[1] - rel_prop[0]\n rel_starting_duration = rel_duration * self.starting_ratio\n rel_ending_duration = rel_duration * self.ending_ratio\n rel_starting = rel_prop[0] - rel_starting_duration\n rel_ending = rel_prop[1] + rel_ending_duration\n\n real_rel_starting = max(0.0, rel_starting)\n real_rel_ending = min(1.0, rel_ending)\n\n starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration\n ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration\n\n proposal_ticks = (\n int(real_rel_starting * num_sampled_frames),\n int(rel_prop[0] * num_sampled_frames),\n int(rel_prop[1] * num_sampled_frames),\n int(real_rel_ending * num_sampled_frames),\n )\n\n rel_prop_list.append(rel_prop)\n proposal_tick_list.append(proposal_ticks)\n scaling_list.append((starting_scaling, ending_scaling))\n\n # load frames\n # Since there are many frames for each video during testing, instead of returning the read frames,\n # we return a generator which gives the frames in small batches, this lower the memory burden\n # and runtime overhead. Usually setting batchsize=4 would fit most cases.\n def frame_gen(batchsize):\n frames = []\n cnt = 0\n for idx, seg_ind in enumerate(frame_ticks):\n p = int(seg_ind)\n for x in range(self.new_length):\n frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))\n cnt += 1\n\n if cnt % batchsize == 0:\n frames = self.transform(frames)\n yield frames\n frames = []\n\n if len(frames):\n frames = self.transform(frames)\n yield frames\n\n return (\n frame_gen(gen_batchsize),\n len(frame_ticks),\n torch.from_numpy(np.array(rel_prop_list)),\n torch.from_numpy(np.array(proposal_tick_list)),\n torch.from_numpy(np.array(scaling_list)),\n )\n\n def get_training_data(self, index):\n if self.video_centric:\n video = self.video_list[index]\n props = self._video_centric_sampling(video)\n else:\n props = self._random_sampling()\n\n out_frames = []\n out_prop_len = []\n out_prop_scaling = []\n out_prop_type = []\n out_prop_labels = []\n out_prop_reg_targets = []\n out_stage_split = []\n for idx, p in enumerate(props):\n prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(\n p\n )\n\n processed_frames = self.transform(prop_frames)\n out_frames.append(processed_frames)\n out_prop_len.append(self.body_seg + 2 * self.aug_seg)\n out_prop_scaling.append([starting_scale, ending_scale])\n out_prop_labels.append(prop_label)\n out_prop_reg_targets.append(reg_targets)\n out_prop_type.append(prop_type)\n out_stage_split.append(stage_split)\n\n out_prop_len = torch.from_numpy(np.array(out_prop_len))\n out_prop_scaling = torch.from_numpy(\n np.array(out_prop_scaling, dtype=np.float32)\n )\n out_prop_labels = torch.from_numpy(np.array(out_prop_labels))\n out_prop_reg_targets = torch.from_numpy(\n np.array(out_prop_reg_targets, dtype=np.float32)\n )\n out_prop_type = torch.from_numpy(np.array(out_prop_type))\n out_stage_split = torch.from_numpy(np.array(out_stage_split))\n out_frames = torch.cat(out_frames)\n return (\n out_frames,\n out_prop_len,\n out_prop_scaling,\n out_prop_type,\n out_prop_labels,\n out_prop_reg_targets,\n out_stage_split,\n )\n\n def get_all_gt(self):\n gt_list = []\n for video in self.video_list:\n vid = video.id\n gt_list.extend(\n [\n [\n vid,\n x.label - 1,\n x.start_frame / video.num_frames,\n x.end_frame / video.num_frames,\n ]\n for x in video.gt\n ]\n )\n return gt_list\n\n def __getitem__(self, index):\n real_index = index % len(self.video_list)\n if self.test_mode:\n return self.get_test_data(self.video_list[real_index], self.test_interval)\n else:\n return self.get_training_data(real_index)\n\n def __len__(self):\n return len(self.video_list) * self.epoch_multiplier\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
delldu/EDSR | [
"98752b57a3091e693c523e710380d369f9913041"
] | [
"src/model/vdsr.py"
] | [
"from model import common\n\nimport torch.nn as nn\nimport torch.nn.init as init\n\nurl = {\n 'r20f64': ''\n}\n\ndef make_model(args, parent=False):\n return VDSR(args)\n\nclass VDSR(nn.Module):\n def __init__(self, args, conv=common.default_conv):\n super(VDSR, self).__init__()\n\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n url_name = 'r{}f{}'.format(n_resblocks, n_feats)\n if url_name in url:\n self.url = url[url_name]\n else:\n self.url = None\n\n self.sub_mean = common.MeanShift(args.rgb_range)\n self.add_mean = common.MeanShift(args.rgb_range, sign=1)\n\n def basic_block(in_channels, out_channels, act):\n return common.BasicBlock(\n conv, in_channels, out_channels, kernel_size,\n bias=True, bn=False, act=act\n )\n\n # define body module\n m_body = []\n m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))\n for _ in range(n_resblocks - 2):\n m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))\n m_body.append(basic_block(n_feats, args.n_colors, None))\n\n self.body = nn.Sequential(*m_body)\n\n def forward(self, x):\n x = self.sub_mean(x)\n res = self.body(x)\n res += x\n x = self.add_mean(res)\n\n return x \n\n\n# cd ..(src), export PYTHONPATH=`pwd`\n# if __name__ == '__main__':\n# import torch\n# import utility\n# from option import args\n\n# torch.manual_seed(args.seed)\n# checkpoint = utility.checkpoint(args)\n\n# print(args)\n# model = VDSR(args)\n# print(model)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/PyBaMM | [
"4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190",
"4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190",
"4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190",
"4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190",
"4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190"
] | [
"tests/unit/test_parameters/test_current_functions.py",
"pybamm/expression_tree/binary_operators.py",
"pybamm/expression_tree/scalar.py",
"tests/integration/test_models/test_full_battery_models/test_lithium_ion/test_external/test_external_current_collector.py",
"tests/integration/test_models/test_full_battery_models/test_lead_acid/test_loqs.py"
] | [
"#\n# Tests for current input functions\n#\nimport pybamm\nimport numbers\nimport unittest\nimport numpy as np\n\n\nclass TestCurrentFunctions(unittest.TestCase):\n def test_constant_current(self):\n # test simplify\n current = pybamm.electrical_parameters.current_with_time\n parameter_values = pybamm.ParameterValues(\n {\n \"Typical current [A]\": 2,\n \"Typical timescale [s]\": 1,\n \"Current function [A]\": 2,\n }\n )\n processed_current = parameter_values.process_symbol(current)\n self.assertIsInstance(processed_current.simplify(), pybamm.Scalar)\n\n def test_get_current_data(self):\n # test process parameters\n dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time\n parameter_values = pybamm.ParameterValues(\n {\n \"Typical current [A]\": 2,\n \"Typical timescale [s]\": 1,\n \"Current function [A]\": \"[current data]car_current\",\n }\n )\n dimensional_current_eval = parameter_values.process_symbol(dimensional_current)\n\n def current(t):\n return dimensional_current_eval.evaluate(t=t)\n\n standard_tests = StandardCurrentFunctionTests([current], always_array=True)\n standard_tests.test_all()\n\n def test_user_current(self):\n # create user-defined sin function\n def my_fun(t, A, omega):\n return A * pybamm.sin(2 * np.pi * omega * t)\n\n # choose amplitude and frequency\n A = pybamm.electrical_parameters.I_typ\n omega = pybamm.Parameter(\"omega\")\n\n def current(t):\n return my_fun(t, A, omega)\n\n # set and process parameters\n parameter_values = pybamm.ParameterValues(\n {\n \"Typical current [A]\": 2,\n \"Typical timescale [s]\": 1,\n \"omega\": 3,\n \"Current function [A]\": current,\n }\n )\n dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time\n dimensional_current_eval = parameter_values.process_symbol(dimensional_current)\n\n def user_current(t):\n return dimensional_current_eval.evaluate(t=t)\n\n # check output types\n standard_tests = StandardCurrentFunctionTests([user_current])\n standard_tests.test_all()\n\n # check output correct value\n time = np.linspace(0, 3600, 600)\n np.testing.assert_array_almost_equal(\n user_current(time), 2 * np.sin(2 * np.pi * 3 * time)\n )\n\n\nclass StandardCurrentFunctionTests(object):\n def __init__(self, function_list, always_array=False):\n self.function_list = function_list\n self.always_array = always_array\n\n def test_output_type(self):\n for function in self.function_list:\n if self.always_array is True:\n assert isinstance(function(0), np.ndarray)\n else:\n assert isinstance(function(0), numbers.Number)\n assert isinstance(function(np.zeros(3)), np.ndarray)\n assert isinstance(function(np.zeros([3, 3])), np.ndarray)\n\n def test_all(self):\n self.test_output_type()\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n",
"#\n# Binary operator classes\n#\nimport pybamm\n\nimport numpy as np\nimport numbers\nfrom scipy.sparse import issparse, csr_matrix\n\n\ndef is_scalar_zero(expr):\n \"\"\"\n Utility function to test if an expression evaluates to a constant scalar zero\n \"\"\"\n if expr.is_constant():\n result = expr.evaluate_ignoring_errors(t=None)\n return isinstance(result, numbers.Number) and result == 0\n else:\n return False\n\n\ndef is_matrix_zero(expr):\n \"\"\"\n Utility function to test if an expression evaluates to a constant matrix zero\n \"\"\"\n if expr.is_constant():\n result = expr.evaluate_ignoring_errors(t=None)\n return (issparse(result) and result.count_nonzero() == 0) or (\n isinstance(result, np.ndarray) and np.all(result == 0)\n )\n else:\n return False\n\n\ndef is_scalar_one(expr):\n \"\"\"\n Utility function to test if an expression evaluates to a constant scalar one\n \"\"\"\n if expr.is_constant():\n result = expr.evaluate_ignoring_errors(t=None)\n return isinstance(result, numbers.Number) and result == 1\n else:\n return False\n\n\ndef is_matrix_one(expr):\n \"\"\"\n Utility function to test if an expression evaluates to a constant matrix one\n \"\"\"\n if expr.is_constant():\n result = expr.evaluate_ignoring_errors(t=None)\n return (issparse(result) and np.all(result.toarray() == 1)) or (\n isinstance(result, np.ndarray) and np.all(result == 1)\n )\n else:\n return False\n\n\ndef zeros_of_shape(shape):\n \"\"\"\n Utility function to create a scalar zero, or a vector or matrix of zeros of\n the correct shape\n \"\"\"\n if shape == ():\n return pybamm.Scalar(0)\n else:\n if len(shape) == 1 or shape[1] == 1:\n return pybamm.Vector(np.zeros(shape))\n else:\n return pybamm.Matrix(csr_matrix(shape))\n\n\nclass BinaryOperator(pybamm.Symbol):\n \"\"\"A node in the expression tree representing a binary operator (e.g. `+`, `*`)\n\n Derived classes will specify the particular operator\n\n **Extends**: :class:`Symbol`\n\n Parameters\n ----------\n\n name : str\n name of the node\n left : :class:`Symbol` or :class:`Number`\n lhs child node (converted to :class:`Scalar` if Number)\n right : :class:`Symbol` or :class:`Number`\n rhs child node (converted to :class:`Scalar` if Number)\n\n \"\"\"\n\n def __init__(self, name, left, right):\n left, right = self.format(left, right)\n\n domain = self.get_children_domains(left.domain, right.domain)\n auxiliary_domains = self.get_children_auxiliary_domains([left, right])\n super().__init__(\n name,\n children=[left, right],\n domain=domain,\n auxiliary_domains=auxiliary_domains,\n )\n self.left = self.children[0]\n self.right = self.children[1]\n\n def format(self, left, right):\n \"Format children left and right into compatible form\"\n # Turn numbers into scalars\n if isinstance(left, numbers.Number):\n left = pybamm.Scalar(left)\n if isinstance(right, numbers.Number):\n right = pybamm.Scalar(right)\n\n # Check both left and right are pybamm Symbols\n if not (isinstance(left, pybamm.Symbol) and isinstance(right, pybamm.Symbol)):\n raise NotImplementedError(\n \"\"\"'{}' not implemented for symbols of type {} and {}\"\"\".format(\n self.__class__.__name__, type(left), type(right)\n )\n )\n\n # Do some broadcasting in special cases, to avoid having to do this manually\n if left.domain != [] and right.domain != []:\n if (\n left.domain != right.domain\n and \"secondary\" in right.auxiliary_domains\n and left.domain == right.auxiliary_domains[\"secondary\"]\n ):\n left = pybamm.PrimaryBroadcast(left, right.domain)\n if (\n right.domain != left.domain\n and \"secondary\" in left.auxiliary_domains\n and right.domain == left.auxiliary_domains[\"secondary\"]\n ):\n right = pybamm.PrimaryBroadcast(right, left.domain)\n\n return left, right\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"{!s} {} {!s}\".format(self.left, self.name, self.right)\n\n def get_children_domains(self, ldomain, rdomain):\n \"Combine domains from children in appropriate way\"\n if ldomain == rdomain:\n return ldomain\n elif ldomain == []:\n return rdomain\n elif rdomain == []:\n return ldomain\n else:\n raise pybamm.DomainError(\n \"\"\"\n children must have same (or empty) domains, but left.domain is '{}'\n and right.domain is '{}'\n \"\"\".format(\n ldomain, rdomain\n )\n )\n\n def new_copy(self):\n \"\"\" See :meth:`pybamm.Symbol.new_copy()`. \"\"\"\n\n # process children\n new_left = self.left.new_copy()\n new_right = self.right.new_copy()\n\n # make new symbol, ensure domain(s) remain the same\n out = self._binary_new_copy(new_left, new_right)\n out.copy_domains(self)\n\n return out\n\n def _binary_new_copy(self, left, right):\n \"Default behaviour for new_copy\"\n return self.__class__(left, right)\n\n def evaluate(self, t=None, y=None, y_dot=None, inputs=None, known_evals=None):\n \"\"\" See :meth:`pybamm.Symbol.evaluate()`. \"\"\"\n if known_evals is not None:\n id = self.id\n try:\n return known_evals[id], known_evals\n except KeyError:\n left, known_evals = self.left.evaluate(t, y, y_dot, inputs, known_evals)\n right, known_evals = self.right.evaluate(\n t, y, y_dot, inputs, known_evals\n )\n value = self._binary_evaluate(left, right)\n known_evals[id] = value\n return value, known_evals\n else:\n left = self.left.evaluate(t, y, y_dot, inputs)\n right = self.right.evaluate(t, y, y_dot, inputs)\n return self._binary_evaluate(left, right)\n\n def _evaluate_for_shape(self):\n \"\"\" See :meth:`pybamm.Symbol.evaluate_for_shape()`. \"\"\"\n left = self.children[0].evaluate_for_shape()\n right = self.children[1].evaluate_for_shape()\n return self._binary_evaluate(left, right)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" Calculate the jacobian of a binary operator. \"\"\"\n raise NotImplementedError\n\n def _binary_simplify(self, new_left, new_right):\n \"\"\" Simplify a binary operator. Default behaviour: unchanged\"\"\"\n return self._binary_new_copy(new_left, new_right)\n\n def _binary_evaluate(self, left, right):\n \"\"\" Perform binary operation on nodes 'left' and 'right'. \"\"\"\n raise NotImplementedError\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return self.left.evaluates_on_edges(dimension) or self.right.evaluates_on_edges(\n dimension\n )\n\n\nclass Power(BinaryOperator):\n \"\"\"A node in the expression tree representing a `**` power operator\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(\"**\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n # apply chain rule and power rule\n base, exponent = self.orphans\n # derivative if variable is in the base\n diff = exponent * (base ** (exponent - 1)) * base.diff(variable)\n # derivative if variable is in the exponent (rare, check separately to avoid\n # unecessarily big tree)\n if any(variable.id == x.id for x in exponent.pre_order()):\n diff += (base ** exponent) * pybamm.log(base) * exponent.diff(variable)\n return diff\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # apply chain rule and power rule\n left, right = self.orphans\n if left.evaluates_to_number() and right.evaluates_to_number():\n return pybamm.Scalar(0)\n elif right.evaluates_to_number():\n return (right * left ** (right - 1)) * left_jac\n elif left.evaluates_to_number():\n return (left ** right * pybamm.log(left)) * right_jac\n else:\n return (left ** (right - 1)) * (\n right * left_jac + left * pybamm.log(left) * right_jac\n )\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n # don't raise RuntimeWarning for NaNs\n with np.errstate(invalid=\"ignore\"):\n return left ** right\n\n def _binary_simplify(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_simplify()`. \"\"\"\n\n # anything to the power of zero is one\n if is_scalar_zero(right):\n return pybamm.Scalar(1)\n\n # zero to the power of anything is zero\n if is_scalar_zero(left):\n return pybamm.Scalar(0)\n\n # anything to the power of one is itself\n if is_scalar_one(right):\n return left\n\n return self.__class__(left, right)\n\n\nclass Addition(BinaryOperator):\n \"\"\"A node in the expression tree representing an addition operator\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(\"+\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n return self.left.diff(variable) + self.right.diff(variable)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n return left_jac + right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n return left + right\n\n def _binary_simplify(self, left, right):\n \"\"\"\n See :meth:`pybamm.BinaryOperator._binary_simplify()`.\n\n Note\n ----\n We check for scalars first, then matrices. This is because\n (Zero Matrix) + (Zero Scalar)\n should return (Zero Matrix), not (Zero Scalar).\n \"\"\"\n\n # anything added by a scalar zero returns the other child\n if is_scalar_zero(left):\n return right\n if is_scalar_zero(right):\n return left\n # Check matrices after checking scalars\n if is_matrix_zero(left):\n if isinstance(right, pybamm.Scalar):\n return pybamm.Array(right.value * np.ones(left.shape_for_testing))\n else:\n return right\n if is_matrix_zero(right):\n if isinstance(left, pybamm.Scalar):\n return pybamm.Array(left.value * np.ones(right.shape_for_testing))\n else:\n return left\n\n return pybamm.simplify_addition_subtraction(self.__class__, left, right)\n\n\nclass Subtraction(BinaryOperator):\n \"\"\"A node in the expression tree representing a subtraction operator\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n\n super().__init__(\"-\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n return self.left.diff(variable) - self.right.diff(variable)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n return left_jac - right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n return left - right\n\n def _binary_simplify(self, left, right):\n \"\"\"\n See :meth:`pybamm.BinaryOperator._binary_simplify()`.\n\n Note\n ----\n We check for scalars first, then matrices. This is because\n (Zero Matrix) - (Zero Scalar)\n should return (Zero Matrix), not -(Zero Scalar).\n \"\"\"\n\n # anything added by a scalar zero returns the other child\n if is_scalar_zero(left):\n return -right\n if is_scalar_zero(right):\n return left\n # Check matrices after checking scalars\n if is_matrix_zero(left):\n if isinstance(right, pybamm.Scalar):\n return pybamm.Array(-right.value * np.ones(left.shape_for_testing))\n else:\n return -right\n if is_matrix_zero(right):\n if isinstance(left, pybamm.Scalar):\n return pybamm.Array(left.value * np.ones(right.shape_for_testing))\n else:\n return left\n\n return pybamm.simplify_addition_subtraction(self.__class__, left, right)\n\n\nclass Multiplication(BinaryOperator):\n \"\"\"\n A node in the expression tree representing a multiplication operator\n (Hadamard product). Overloads cases where the \"*\" operator would usually return a\n matrix multiplication (e.g. scipy.sparse.coo.coo_matrix)\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n\n super().__init__(\"*\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n # apply product rule\n left, right = self.orphans\n return left.diff(variable) * right + left * right.diff(variable)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # apply product rule\n left, right = self.orphans\n if left.evaluates_to_number() and right.evaluates_to_number():\n return pybamm.Scalar(0)\n elif left.evaluates_to_number():\n return left * right_jac\n elif right.evaluates_to_number():\n return right * left_jac\n else:\n return right * left_jac + left * right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n\n if issparse(left):\n return csr_matrix(left.multiply(right))\n elif issparse(right):\n # Hadamard product is commutative, so we can switch right and left\n return csr_matrix(right.multiply(left))\n else:\n return left * right\n\n def _binary_simplify(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_simplify()`. \"\"\"\n\n # simplify multiply by scalar zero, being careful about shape\n if is_scalar_zero(left):\n return zeros_of_shape(right.shape_for_testing)\n if is_scalar_zero(right):\n return zeros_of_shape(left.shape_for_testing)\n\n # if one of the children is a zero matrix, we have to be careful about shapes\n if is_matrix_zero(left) or is_matrix_zero(right):\n shape = (left * right).shape\n return zeros_of_shape(shape)\n\n # anything multiplied by a scalar one returns itself\n if is_scalar_one(left):\n return right\n if is_scalar_one(right):\n return left\n\n return pybamm.simplify_multiplication_division(self.__class__, left, right)\n\n\nclass MatrixMultiplication(BinaryOperator):\n \"\"\"A node in the expression tree representing a matrix multiplication operator\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n\n super().__init__(\"@\", left, right)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n # We shouldn't need this\n raise NotImplementedError(\n \"diff not implemented for symbol of type 'MatrixMultiplication'\"\n )\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # We only need the case where left is an array and right\n # is a (slice of a) state vector, e.g. for discretised spatial\n # operators of the form D @ u (also catch cases of (-D) @ u)\n left, right = self.orphans\n if isinstance(left, pybamm.Array) or (\n isinstance(left, pybamm.Negate) and isinstance(left.child, pybamm.Array)\n ):\n left = pybamm.Matrix(csr_matrix(left.evaluate()))\n return left @ right_jac\n else:\n raise NotImplementedError(\n \"\"\"jac of 'MatrixMultiplication' is only\n implemented for left of type 'pybamm.Array',\n not {}\"\"\".format(\n left.__class__\n )\n )\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n return left @ right\n\n def _binary_simplify(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_simplify()`. \"\"\"\n if is_matrix_zero(left) or is_matrix_zero(right):\n shape = (left @ right).shape\n return zeros_of_shape(shape)\n\n return pybamm.simplify_multiplication_division(self.__class__, left, right)\n\n\nclass Division(BinaryOperator):\n \"\"\"A node in the expression tree representing a division operator\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(\"/\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n # apply quotient rule\n top, bottom = self.orphans\n return (top.diff(variable) * bottom - top * bottom.diff(variable)) / bottom ** 2\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # apply quotient rule\n left, right = self.orphans\n if left.evaluates_to_number() and right.evaluates_to_number():\n return pybamm.Scalar(0)\n elif left.evaluates_to_number():\n return -left / right ** 2 * right_jac\n elif right.evaluates_to_number():\n return left_jac / right\n else:\n return (right * left_jac - left * right_jac) / right ** 2\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n\n if issparse(left):\n return csr_matrix(left.multiply(1 / right))\n else:\n if isinstance(right, numbers.Number) and right == 0:\n # don't raise RuntimeWarning for NaNs\n with np.errstate(invalid=\"ignore\"):\n return left * np.inf\n else:\n return left / right\n\n def _binary_simplify(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_simplify()`. \"\"\"\n\n # zero divided by zero returns nan scalar\n if is_scalar_zero(left) and is_scalar_zero(right):\n return pybamm.Scalar(np.nan)\n\n # zero divided by anything returns zero (being careful about shape)\n if is_scalar_zero(left):\n return zeros_of_shape(right.shape_for_testing)\n\n # matrix zero divided by anything returns matrix zero (i.e. itself)\n if is_matrix_zero(left):\n return left\n\n # anything divided by zero returns inf\n if is_scalar_zero(right):\n if left.shape_for_testing == ():\n return pybamm.Scalar(np.inf)\n else:\n return pybamm.Array(np.inf * np.ones(left.shape_for_testing))\n\n # anything divided by one is itself\n if is_scalar_one(right):\n return left\n\n return pybamm.simplify_multiplication_division(self.__class__, left, right)\n\n\nclass Inner(BinaryOperator):\n \"\"\"\n A node in the expression tree which represents the inner (or dot) product. This\n operator should be used to take the inner product of two mathematical vectors\n (as opposed to the computational vectors arrived at post-discretisation) of the\n form v = v_x e_x + v_y e_y + v_z e_z where v_x, v_y, v_z are scalars\n and e_x, e_y, e_z are x-y-z-directional unit vectors. For v and w mathematical\n vectors, inner product returns v_x * w_x + v_y * w_y + v_z * w_z. In addition,\n for some spatial discretisations mathematical vector quantities (such as\n i = grad(phi) ) are evaluated on a different part of the grid to mathematical\n scalars (e.g. for finite volume mathematical scalars are evaluated on the nodes but\n mathematical vectors are evaluated on cell edges). Therefore, inner also transfers\n the inner product of the vector onto the scalar part of the grid if required\n by a particular discretisation.\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(\"inner product\", left, right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n # apply product rule\n left, right = self.orphans\n return left.diff(variable) * right + left * right.diff(variable)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # apply product rule\n left, right = self.orphans\n if left.evaluates_to_number() and right.evaluates_to_number():\n return pybamm.Scalar(0)\n elif left.evaluates_to_number():\n return left * right_jac\n elif right.evaluates_to_number():\n return right * left_jac\n else:\n return right * left_jac + left * right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n\n if issparse(left):\n return left.multiply(right)\n elif issparse(right):\n # Hadamard product is commutative, so we can switch right and left\n return right.multiply(left)\n else:\n return left * right\n\n def _binary_simplify(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_simplify()`. \"\"\"\n\n # simplify multiply by scalar zero, being careful about shape\n if is_scalar_zero(left):\n return zeros_of_shape(right.shape_for_testing)\n if is_scalar_zero(right):\n return zeros_of_shape(left.shape_for_testing)\n\n # if one of the children is a zero matrix, we have to be careful about shapes\n if is_matrix_zero(left) or is_matrix_zero(right):\n shape = (left * right).shape\n return zeros_of_shape(shape)\n\n # anything multiplied by a scalar one returns itself\n if is_scalar_one(left):\n return right\n if is_scalar_one(right):\n return left\n\n return pybamm.simplify_multiplication_division(self.__class__, left, right)\n\n def evaluates_on_edges(self, dimension):\n \"\"\" See :meth:`pybamm.Symbol.evaluates_on_edges()`. \"\"\"\n return False\n\n\ndef inner(left, right):\n \"\"\"\n Return inner product of two symbols.\n \"\"\"\n return pybamm.Inner(left, right)\n\n\nclass Heaviside(BinaryOperator):\n \"\"\"A node in the expression tree representing a heaviside step function.\n\n Adding this operation to the rhs or algebraic equations in a model can often cause a\n discontinuity in the solution. For the specific cases listed below, this will be\n automatically handled by the solver. In the general case, you can explicitly tell\n the solver of discontinuities by adding a :class:`Event` object with\n :class:`EventType` DISCONTINUITY to the model's list of events.\n\n In the case where the Heaviside function is of the form `pybamm.t < x`, `pybamm.t <=\n x`, `x < pybamm.t`, or `x <= pybamm.t`, where `x` is any constant equation, this\n DISCONTINUITY event will automatically be added by the solver.\n\n **Extends:** :class:`BinaryOperator`\n \"\"\"\n\n def __init__(self, name, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(name, left, right)\n\n def diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol.diff()`. \"\"\"\n # Heaviside should always be multiplied by something else so hopefully don't\n # need to worry about shape\n return pybamm.Scalar(0)\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n # Heaviside should always be multiplied by something else so hopefully don't\n # need to worry about shape\n return pybamm.Scalar(0)\n\n\nclass EqualHeaviside(Heaviside):\n \"A heaviside function with equality (return 1 when left = right)\"\n\n def __init__(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator.__init__()`. \"\"\"\n super().__init__(\"<=\", left, right)\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"{!s} <= {!s}\".format(self.left, self.right)\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n # don't raise RuntimeWarning for NaNs\n with np.errstate(invalid=\"ignore\"):\n return left <= right\n\n\nclass NotEqualHeaviside(Heaviside):\n \"A heaviside function without equality (return 0 when left = right)\"\n\n def __init__(self, left, right):\n super().__init__(\"<\", left, right)\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"{!s} < {!s}\".format(self.left, self.right)\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n # don't raise RuntimeWarning for NaNs\n with np.errstate(invalid=\"ignore\"):\n return left < right\n\n\nclass Minimum(BinaryOperator):\n \" Returns the smaller of two objects \"\n\n def __init__(self, left, right):\n super().__init__(\"minimum\", left, right)\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"minimum({!s}, {!s})\".format(self.left, self.right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n left, right = self.orphans\n return (left <= right) * left.diff(variable) + (left > right) * right.diff(\n variable\n )\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n left, right = self.orphans\n return (left <= right) * left_jac + (left > right) * right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n # don't raise RuntimeWarning for NaNs\n return np.minimum(left, right)\n\n\nclass Maximum(BinaryOperator):\n \" Returns the smaller of two objects \"\n\n def __init__(self, left, right):\n super().__init__(\"maximum\", left, right)\n\n def __str__(self):\n \"\"\" See :meth:`pybamm.Symbol.__str__()`. \"\"\"\n return \"maximum({!s}, {!s})\".format(self.left, self.right)\n\n def _diff(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._diff()`. \"\"\"\n left, right = self.orphans\n return (left >= right) * left.diff(variable) + (left < right) * right.diff(\n variable\n )\n\n def _binary_jac(self, left_jac, right_jac):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_jac()`. \"\"\"\n left, right = self.orphans\n return (left >= right) * left_jac + (left < right) * right_jac\n\n def _binary_evaluate(self, left, right):\n \"\"\" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. \"\"\"\n # don't raise RuntimeWarning for NaNs\n return np.maximum(left, right)\n\n\ndef minimum(left, right):\n \"\"\"\n Returns the smaller of two objects. Not to be confused with :meth:`pybamm.min`,\n which returns min function of child.\n \"\"\"\n return pybamm.simplify_if_constant(Minimum(left, right), keep_domains=True)\n\n\ndef maximum(left, right):\n \"\"\"\n Returns the larger of two objects. Not to be confused with :meth:`pybamm.max`,\n which returns max function of child.\n \"\"\"\n return pybamm.simplify_if_constant(Maximum(left, right), keep_domains=True)\n\n\ndef source(left, right, boundary=False):\n \"\"\"A convinience function for creating (part of) an expression tree representing\n a source term. This is necessary for spatial methods where the mass matrix\n is not the identity (e.g. finite element formulation with piecwise linear\n basis functions). The left child is the symbol representing the source term\n and the right child is the symbol of the equation variable (currently, the\n finite element formulation in PyBaMM assumes all functions are constructed\n using the same basis, and the matrix here is constructed accoutning for the\n boundary conditions of the right child). The method returns the matrix-vector\n product of the mass matrix (adjusted to account for any Dirichlet boundary\n conditions imposed the the right symbol) and the discretised left symbol.\n\n Parameters\n ----------\n\n left : :class:`Symbol`\n The left child node, which represents the expression for the source term.\n right : :class:`Symbol`\n The right child node. This is the symbol whose boundary conditions are\n accounted for in the construction of the mass matrix.\n boundary : bool, optional\n If True, then the mass matrix should is assembled over the boundary,\n corresponding to a source term which only acts on the boundary of the\n domain. If False (default), the matrix is assembled over the entire domain,\n corresponding to a source term in the bulk.\n\n \"\"\"\n # Broadcast if left is number\n if isinstance(left, numbers.Number):\n left = pybamm.PrimaryBroadcast(left, \"current collector\")\n\n if left.domain != [\"current collector\"] or right.domain != [\"current collector\"]:\n raise pybamm.DomainError(\n \"\"\"'source' only implemented in the 'current collector' domain,\n but symbols have domains {} and {}\"\"\".format(\n left.domain, right.domain\n )\n )\n if boundary:\n return pybamm.BoundaryMass(right) @ left\n else:\n return pybamm.Mass(right) @ left\n",
"#\n# Scalar class\n#\nimport pybamm\nimport numpy as np\n\n\nclass Scalar(pybamm.Symbol):\n \"\"\"A node in the expression tree representing a scalar value\n\n **Extends:** :class:`Symbol`\n\n Parameters\n ----------\n\n value : numeric\n the value returned by the node when evaluated\n name : str, optional\n the name of the node. Defaulted to ``str(value)``\n if not provided\n domain : iterable of str, optional\n list of domains the parameter is valid over, defaults to empty list\n\n \"\"\"\n\n def __init__(self, value, name=None, domain=[]):\n \"\"\"\n\n \"\"\"\n # set default name if not provided\n self.value = value\n if name is None:\n name = str(self.value)\n\n super().__init__(name, domain=domain)\n\n @property\n def value(self):\n \"\"\"the value returned by the node when evaluated\"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = np.float64(value)\n\n def set_id(self):\n \"\"\" See :meth:`pybamm.Symbol.set_id()`. \"\"\"\n # We must include the value in the hash, since different scalars can be\n # indistinguishable by class, name and domain alone\n self._id = hash(\n (self.__class__, self.name) + tuple(self.domain) + tuple(str(self._value))\n )\n\n def _base_evaluate(self, t=None, y=None, y_dot=None, inputs=None):\n \"\"\" See :meth:`pybamm.Symbol._base_evaluate()`. \"\"\"\n return self._value\n\n def _jac(self, variable):\n \"\"\" See :meth:`pybamm.Symbol._jac()`. \"\"\"\n return pybamm.Scalar(0)\n\n def new_copy(self):\n \"\"\" See :meth:`pybamm.Symbol.new_copy()`. \"\"\"\n return Scalar(self.value, self.name, self.domain)\n",
"#\n# Tests for external submodels\n#\nimport pybamm\nimport unittest\nimport numpy as np\n\n\nclass TestExternalCC(unittest.TestCase):\n @unittest.skipIf(not pybamm.have_idaklu(), \"idaklu solver is not installed\")\n def test_2p1d(self):\n model_options = {\n \"current collector\": \"potential pair\",\n \"dimensionality\": 2,\n \"external submodels\": [\"current collector\"],\n }\n model = pybamm.lithium_ion.DFN(model_options)\n yz_pts = 3\n var_pts = {\n pybamm.standard_spatial_vars.x_n: 4,\n pybamm.standard_spatial_vars.x_s: 4,\n pybamm.standard_spatial_vars.x_p: 4,\n pybamm.standard_spatial_vars.r_n: 4,\n pybamm.standard_spatial_vars.r_p: 4,\n pybamm.standard_spatial_vars.y: yz_pts,\n pybamm.standard_spatial_vars.z: yz_pts,\n }\n solver = pybamm.IDAKLUSolver()\n sim = pybamm.Simulation(model, var_pts=var_pts, solver=solver)\n\n # Simulate 100 seconds\n t_eval = np.linspace(0, 100, 3)\n\n for i in np.arange(1, len(t_eval) - 1):\n dt = t_eval[i + 1] - t_eval[i]\n\n # provide phi_s_n and i_cc\n phi_s_n = np.zeros((yz_pts ** 2, 1))\n i_boundary_cc = np.ones((yz_pts ** 2, 1))\n external_variables = {\n \"Negative current collector potential\": phi_s_n,\n \"Current collector current density\": i_boundary_cc,\n }\n\n sim.step(dt, external_variables=external_variables)\n\n # obtain phi_s_n from the pybamm solution at the current time\n phi_s_p = sim.get_variable_array(\"Positive current collector potential\")\n\n self.assertTrue(phi_s_p.shape, (yz_pts ** 2, 1))\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n unittest.main()\n",
"#\n# Tests for the lead-acid LOQS model\n#\nimport pybamm\nimport tests\n\nimport unittest\nimport numpy as np\n\n\nclass TestLOQS(unittest.TestCase):\n def test_basic_processing(self):\n model = pybamm.lead_acid.LOQS()\n modeltest = tests.StandardModelTest(model)\n modeltest.test_all()\n\n def test_optimisations(self):\n model = pybamm.lead_acid.LOQS()\n optimtest = tests.OptimisationsTest(model)\n\n original = optimtest.evaluate_model()\n simplified = optimtest.evaluate_model(simplify=True)\n using_known_evals = optimtest.evaluate_model(use_known_evals=True)\n simp_and_known = optimtest.evaluate_model(simplify=True, use_known_evals=True)\n simp_and_python = optimtest.evaluate_model(simplify=True, to_python=True)\n np.testing.assert_array_almost_equal(original, simplified)\n np.testing.assert_array_almost_equal(original, using_known_evals)\n np.testing.assert_array_almost_equal(original, simp_and_known)\n np.testing.assert_array_almost_equal(original, simp_and_python)\n\n def test_set_up(self):\n model = pybamm.lead_acid.LOQS()\n optimtest = tests.OptimisationsTest(model)\n optimtest.set_up_model(simplify=False, to_python=True)\n optimtest.set_up_model(simplify=True, to_python=True)\n optimtest.set_up_model(simplify=False, to_python=False)\n optimtest.set_up_model(simplify=True, to_python=False)\n\n def test_charge(self):\n model = pybamm.lead_acid.LOQS()\n parameter_values = model.default_parameter_values\n parameter_values.update({\"Current function [A]\": -1})\n modeltest = tests.StandardModelTest(model, parameter_values=parameter_values)\n modeltest.test_all()\n\n def test_zero_current(self):\n model = pybamm.lead_acid.LOQS()\n parameter_values = model.default_parameter_values\n parameter_values.update({\"Current function [A]\": 0})\n modeltest = tests.StandardModelTest(model, parameter_values=parameter_values)\n modeltest.test_all()\n\n def test_basic_processing_with_convection(self):\n model = pybamm.lead_acid.LOQS({\"convection\": \"uniform transverse\"})\n modeltest = tests.StandardModelTest(model)\n modeltest.test_all()\n\n def test_thermal(self):\n options = {\"thermal\": \"lumped\"}\n model = pybamm.lead_acid.LOQS(options)\n modeltest = tests.StandardModelTest(model)\n modeltest.test_all()\n\n options = {\"thermal\": \"x-full\"}\n model = pybamm.lead_acid.LOQS(options)\n modeltest = tests.StandardModelTest(model)\n modeltest.test_all()\n\n def test_basic_processing_1plus1D(self):\n options = {\"current collector\": \"potential pair\", \"dimensionality\": 1}\n model = pybamm.lead_acid.LOQS(options)\n var = pybamm.standard_spatial_vars\n var_pts = {\n var.x_n: 5,\n var.x_s: 5,\n var.x_p: 5,\n var.y: 5,\n var.z: 5,\n }\n modeltest = tests.StandardModelTest(model, var_pts=var_pts)\n modeltest.test_all(skip_output_tests=True)\n\n options = {\n \"current collector\": \"potential pair\",\n \"dimensionality\": 1,\n \"convection\": \"full transverse\",\n }\n model = pybamm.lead_acid.LOQS(options)\n modeltest = tests.StandardModelTest(model, var_pts=var_pts)\n modeltest.test_all(skip_output_tests=True)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n unittest.main()\n"
] | [
[
"numpy.zeros",
"numpy.linspace",
"numpy.sin"
],
[
"numpy.maximum",
"numpy.minimum",
"scipy.sparse.issparse",
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.all",
"numpy.errstate",
"numpy.zeros"
],
[
"numpy.float64"
],
[
"numpy.zeros",
"numpy.linspace",
"numpy.ones"
],
[
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YongLiuLab/BrainRadiomicsTools | [
"19b440acd554ee920857c306442b6d2c411dca88",
"19b440acd554ee920857c306442b6d2c411dca88"
] | [
"Core/hippoSeg/LiviaNet/startTraining.py",
"Core/hippoSeg/startTesting.py"
] | [
"\"\"\" \nCopyright (c) 2016, Jose Dolz .All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n OTHER DEALINGS IN THE SOFTWARE.\n\nJose Dolz. Dec, 2016.\nemail: [email protected]\nLIVIA Department, ETS, Montreal.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom Modules.IO.sampling import getSamplesSubepoch\n\nfrom Modules.General.Utils import dump_model_to_gzip_file\nfrom Modules.General.Utils import getImagesSet\nfrom Modules.General.Utils import load_model_from_gzip_file\nfrom Modules.Parsers.parsersUtils import parserConfigIni\nfrom startTesting import segmentVolume\n\n\ndef startTraining(networkModelName,configIniName):\n print (\" ************************************************ STARTING TRAINING **************************************************\")\n print (\" ********************** Starting training model (Reading parameters) **********************\")\n\n myParserConfigIni = parserConfigIni()\n \n myParserConfigIni.readConfigIniFile(configIniName,1)\n \n # Image type (0: Nifti, 1: Matlab)\n imageType = myParserConfigIni.imageTypesTrain\n\n print (\" --- Do training in {} epochs with {} subEpochs each...\".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))\n print (\"-------- Reading Images names used in training/validation -------------\")\n##-----##\n # from sklearn.model_selection import KFold\n # import numpy as np\n # y1 = myParserConfigIni.indexesForTraining\n # #x1 = myParserConfigIni.indexesForValidation\n # kf = KFold(n_splits= 5)\n #\n # for train_index, test_index in kf.split(y1):\n # print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n # y, x = np.array(y1)[train_index], np.array(y1)[test_index]\n##-----##\n # from sklearn.model_selection import LeavePOut\n # lpo = LeavePOut(p=5)\n # y1 = myParserConfigIni.indexesForTraining\n # for train, test in lpo.split(y1):\n # y, x = np.array(y1)[train], np.array(y1)[test]\n##-----train##\n from sklearn.cross_validation import LeaveOneOut\n loo = LeaveOneOut(4)\n y1 = myParserConfigIni.indexesForTraining\n x1 = myParserConfigIni.indexesForValidation\n for train_index, test_index in loo:\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n y, x = np.array(y1)[train_index], np.array(y1)[test_index]\n##------he\n # from sklearn.model_selection import train_test_split\n # X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)\n\n # -- Get list of images used for training -- #\n\n (imageNames_Train, names_Train) = getImagesSet(myParserConfigIni.imagesFolder,y) # Images\n (groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth\n (roiNames_Train, roi_names_Train) = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI\n\n # -- Get list of images used for validation -- #\n (imageNames_Val, names_Val) = getImagesSet(myParserConfigIni.imagesFolder,x) # Images\n (groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth\n (roiNames_Val, roi_names_Val) = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI\n\n # Print names\n print (\" ================== Images for training ================\")\n for i in range(0,len(names_Train)):\n if len(roi_names_Train) > 0:\n print(\" Image({}): {} | GT: {} | ROI {} \".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))\n else:\n print(\" Image({}): {} | GT: {} \".format(i,names_Train[i], gt_names_Train[i] ))\n print (\" ================== Images for validation ================\")\n for i in range(0,len(names_Val)):\n if len(roi_names_Train) > 0:\n print(\" Image({}): {} | GT: {} | ROI {} \".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))\n else:\n print(\" Image({}): {} | GT: {} \".format(i,names_Val[i], gt_names_Val[i]))\n print (\" ===============================================================\")\n \n # --------------- Load my LiviaNet3D object --------------- \n print (\" ... Loading model from {}\".format(networkModelName))\n myLiviaNet3D = load_model_from_gzip_file(networkModelName)\n print (\" ... Network architecture successfully loaded....\")\n\n # Asign parameters to loaded Net\n myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs\n myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs\n myLiviaNet3D.numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch\n myLiviaNet3D.firstEpochChangeLR = myParserConfigIni.firstEpochChangeLR\n myLiviaNet3D.frequencyChangeLR = myParserConfigIni.frequencyChangeLR\n \n numberOfEpochs = myLiviaNet3D.numberOfEpochs\n numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs\n numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch\n \n # --------------- -------------- --------------- \n # --------------- Start TRAINING --------------- \n # --------------- -------------- --------------- \n # Get sample dimension values\n receptiveField = myLiviaNet3D.receptiveField\n sampleSize_Train = myLiviaNet3D.sampleSize_Train\n\n trainingCost = []\n\n if myParserConfigIni.applyPadding == 1:\n applyPadding = True\n else:\n applyPadding = False\n \n learningRateModifiedEpoch = 0\n \n # Run over all the (remaining) epochs and subepochs\n for e_i in xrange(numberOfEpochs):\n # Recover last trained epoch\n numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained\n \n print(\" ============== EPOCH: {}/{} =================\".format(numberOfEpochsTrained+1,numberOfEpochs))\n\n costsOfEpoch = []\n \n for subE_i in xrange(numberOfSubEpochs): \n epoch_nr = subE_i+1\n print (\" --- SubEPOCH: {}/{}\".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))\n\n # Get all the samples that will be used in this sub-epoch\n [imagesSamplesAll,\n gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,\n imageNames_Train,\n groundTruthNames_Train,\n roiNames_Train,\n imageType,\n sampleSize_Train,\n receptiveField,\n applyPadding\n )\n\n # Variable that will contain weights for the cost function\n # --- In its current implementation, all the classes have the same weight\n weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')\n \n numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size \n \n myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)\n myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)\n \n costsOfBatches = []\n evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype=\"int32\")\n \n for b_i in xrange(numberBatches):\n # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)\n costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)\n meanBatchCostError = costErrors[0]\n costsOfBatches.append(meanBatchCostError)\n myLiviaNet3D.updateLayersMatricesBatchNorm() \n\n \n #======== Calculate and Report accuracy over subepoch\n meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)\n print(\" ---------- Cost of this subEpoch: {}\".format(meanCostOfSubepoch))\n \n # Release data\n myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype=\"float32\"))\n myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype=\"float32\"))\n\n # Get mean cost epoch\n costsOfEpoch.append(meanCostOfSubepoch)\n\n meanCostOfEpoch = sum(costsOfEpoch) / float(numberOfSubEpochs)\n \n # Include the epoch cost to the main training cost and update current mean \n trainingCost.append(meanCostOfEpoch)\n currentMeanCost = sum(trainingCost) / float(str( e_i + 1))\n \n print(\" ---------- Training on Epoch #\" + str(e_i) + \" finished ----------\" )\n print(\" ---------- Cost of Epoch: {} / Mean training error {}\".format(meanCostOfEpoch,currentMeanCost))\n print(\" -------------------------------------------------------- \" )\n \n # ------------- Update Learning Rate if required ----------------#\n\n if e_i >= myLiviaNet3D.firstEpochChangeLR :\n if learningRateModifiedEpoch == 0:\n currentLR = myLiviaNet3D.learning_rate.get_value()\n newLR = currentLR / 2.0\n myLiviaNet3D.learning_rate.set_value(newLR)\n print(\" ... Learning rate has been changed from {} to {}\".format(currentLR, newLR))\n learningRateModifiedEpoch = e_i\n else:\n if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):\n currentLR = myLiviaNet3D.learning_rate.get_value()\n newLR = currentLR / 2.0\n myLiviaNet3D.learning_rate.set_value(newLR)\n print(\" ... Learning rate has been changed from {} to {}\".format(currentLR, newLR))\n learningRateModifiedEpoch = e_i\n \n # ---------------------- Start validation ---------------------- #\n \n numberImagesToSegment = len(imageNames_Val)\n print(\" ********************** Starting validation **********************\")\n\n # Run over the images to segment \n for i_d in xrange(numberImagesToSegment) :\n print(\"------------- Segmenting subject: {} ....total: {}/{}... -------------\".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))\n strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]\n \n segmentVolume(myLiviaNet3D,\n i_d,\n imageNames_Val, # Full path\n names_Val, # Only image name\n groundTruthNames_Val,\n roiNames_Val,\n imageType,\n applyPadding,\n receptiveField, \n sampleSize_Train,\n strideValues,\n myLiviaNet3D.batch_Size,\n 0 # Validation (0) or testing (1)\n )\n \n \n print(\" ********************** Validation DONE ********************** \")\n\n # ------ In this point the training is done at Epoch n ---------#\n # Increase number of epochs trained\n myLiviaNet3D.numberOfEpochsTrained += 1\n\n # --------------- Save the model --------------- \n BASE_DIR = os.getcwd()\n path_Temp = os.path.join(BASE_DIR,'outputFiles')\n netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)\n netFolderName = os.path.join(netFolderName,'Networks')\n\n modelFileName = netFolderName + \"/\" + myLiviaNet3D.networkName + \"_Epoch\" + str (myLiviaNet3D.numberOfEpochsTrained)\n dump_model_to_gzip_file(myLiviaNet3D, modelFileName)\n \n strFinal = \" Network model saved in \" + netFolderName + \" as \" + myLiviaNet3D.networkName + \"_Epoch\" + str (myLiviaNet3D.numberOfEpochsTrained)\n print (strFinal)\n\n print(\"................ The whole Training is done.....\")\n print(\" ************************************************************************************ \")\n",
"# coding=UTF-8\n\"\"\"\nCopyright (c) 2016, Jose Dolz .All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n OTHER DEALINGS IN THE SOFTWARE.\n\nJose Dolz. Dec, 2016.\nemail: [email protected]\nLIVIA Department, ETS, Montreal.\n\"\"\"\n\nimport numpy as np\nimport os\n\n# from Modules.General.Evaluation import computeDice\nfrom .LiviaNet.Modules.General.Utils import getImagesSet\nfrom .LiviaNet.Modules.General.Utils import load_model_from_gzip_file\nfrom .imgOp import applyUnpadding\nfrom .loadData import load_imagesSinglePatient\nfrom .savedata import saveImageAsNifti\nfrom .savedata import saveImageAsMatlab\nfrom .sampling import *\nfrom .LiviaNet.Modules.Parsers.parsersUtils import parserConfigIni\n\n\n\ndef segmentVolume(myNetworkModel,\n i_d,\n out_path,\n imageNames_Test,\n names_Test,\n groundTruthNames_Test,\n roiNames_Test,\n imageType,\n padInputImagesBool,\n receptiveField,\n sampleSize_Test,\n strideVal,\n batch_Size,\n task # Validation (0) or testing (1)\n ):\n # Get info from the network model\n networkName = myNetworkModel.networkName\n folderName = myNetworkModel.folderName\n n_classes = myNetworkModel.n_classes\n sampleSize_Test = myNetworkModel.sampleSize_Test\n receptiveField = myNetworkModel.receptiveField\n outputShape = myNetworkModel.lastLayer.outputShapeTest[2:]\n batch_Size = myNetworkModel.batch_Size\n padInputImagesBool = True\n\n # Get half sample size\n sampleHalf = []\n for h_i in range(3):\n sampleHalf.append((receptiveField[h_i] - 1) / 2)\n\n # Load the images to segment\n [imgSubject,\n gtLabelsImage,\n roi,\n paddingValues] = load_imagesSinglePatient(i_d,\n imageNames_Test,\n groundTruthNames_Test,\n roiNames_Test,\n padInputImagesBool,\n receptiveField,\n sampleSize_Test,\n imageType,\n )\n\n # Get image dimensions\n imgDims = list(imgSubject.shape)\n\n [sampleCoords] = sampleWholeImage(imgSubject,\n roi,\n sampleSize_Test,\n strideVal,\n batch_Size\n )\n\n numberOfSamples = len(sampleCoords)\n sampleID = 0\n numberOfBatches = numberOfSamples / batch_Size\n\n # The probability-map that will be constructed by the predictions.\n probMaps = np.zeros([n_classes] + imgDims, dtype=\"float32\")\n\n # Run over all the batches\n for b_i in range(int(numberOfBatches)):\n\n # Get samples for batch b_i\n\n sampleCoords_b = sampleCoords[b_i * batch_Size: (b_i + 1) * batch_Size]\n\n [imgSamples] = extractSamples(imgSubject,\n sampleCoords_b,\n sampleSize_Test,\n receptiveField)\n\n # Load the data of the batch on the GPU\n myNetworkModel.testingData_x.set_value(imgSamples, borrow=True)\n\n # Call the testing Theano function\n predictions = myNetworkModel.networkModel_Test(0)\n\n predOutput = predictions[-1]\n\n # --- Now we can generate the probability maps from the predictions ----\n # Run over all the regions\n for r_i in range(batch_Size):\n sampleCoords_i = sampleCoords[sampleID]\n coords = [sampleCoords_i[0][0], sampleCoords_i[1][0], sampleCoords_i[2][0]]\n\n # Get the min and max coords\n xMin = coords[0] + sampleHalf[0]\n xMax = coords[0] + sampleHalf[0] + strideVal[0]\n\n yMin = coords[1] + sampleHalf[1]\n yMax = coords[1] + sampleHalf[1] + strideVal[1]\n\n zMin = coords[2] + sampleHalf[2]\n zMax = coords[2] + sampleHalf[2] + strideVal[2]\n\n probMaps[:, int(xMin):int(xMax), int(yMin):int(yMax), int(zMin):int(zMax)] = predOutput[r_i]\n\n sampleID += 1\n\n # Release data\n myNetworkModel.testingData_x.set_value(np.zeros([1, 1, 1, 1, 1], dtype=\"float32\"))\n\n # Segmentation has been done in this point.\n\n # Now: Save the data\n # Get the segmentation from the probability maps ---\n segmentationImage = np.argmax(probMaps, axis=0)\n\n # Save Result:\n npDtypeForPredictedImage = np.dtype(np.int16)\n suffixToAdd = \"_Segm\"\n\n # Apply unpadding if specified\n if padInputImagesBool == True:\n segmentationRes = applyUnpadding(segmentationImage, paddingValues)\n else:\n segmentationRes = segmentationImage\n\n # Generate folders to store the model\n BASE_DIR = os.getcwd() # 获取当前路径的字符串\n path_Temp = os.path.join(BASE_DIR, 'outputFiles') # 返回路径的字符串\n\n\n\n # For the predictions\n predlFolderName = os.path.join(path_Temp, myNetworkModel.folderName)\n predlFolderName = os.path.join(predlFolderName, 'Pred')\n if task == 0:\n predTestFolderName = os.path.join(predlFolderName, 'Validation')\n else:\n predTestFolderName = os.path.join(predlFolderName, 'Testing')\n\n # get outpath\n print (out_path)\n predTestFolderName=out_path\n ###\n nameToSave = predTestFolderName + '/Segmentation_' + names_Test[i_d]\n\n\n # Save Segmentation image\n\n print(\" ... Saving segmentation result...\"),\n if imageType == 0: # nifti\n imageTypeToSave = np.dtype(np.int16)\n saveImageAsNifti(segmentationRes,\n nameToSave,\n imageNames_Test[i_d],\n imageTypeToSave)\n else: # Matlab\n # Cast to int8 for saving purposes\n saveImageAsMatlab(segmentationRes.astype('int8'),\n nameToSave)\n\n # Save the prob maps for each class (except background)\n for c_i in range(1, n_classes):\n\n nameToSave = predTestFolderName + '/ProbMap_class_' + str(c_i) + '_' + names_Test[i_d]\n\n probMapClass = probMaps[c_i, :, :, :]\n\n # Apply unpadding if specified\n if padInputImagesBool == True:\n probMapClassRes = applyUnpadding(probMapClass, paddingValues)\n else:\n probMapClassRes = probMapClass\n\n print(\" ... Saving prob map for class {}...\".format(str(c_i))),\n if imageType == 0: # nifti\n imageTypeToSave = np.dtype(np.float32)\n saveImageAsNifti(probMapClassRes,\n nameToSave,\n imageNames_Test[i_d],\n imageTypeToSave)\n else:\n # Cast to float32 for saving purposes\n saveImageAsMatlab(probMapClassRes.astype('float32'),\n nameToSave)\n\n # If segmentation done during evaluation, get dice\n # if task == 0:\n # print(\" ... Computing Dice scores: \")\n # DiceArray = computeDice(segmentationImage, gtLabelsImage)\n # for d_i in xrange(len(DiceArray)):\n # print(\" -------------- DSC (Class {}) : {}\".format(str(d_i + 1), DiceArray[d_i]))\n\n\n\"\"\" Main segmentation function \"\"\"\n\n\ndef startTesting(networkModelName,\n configIniName\n ,out_path):\n padInputImagesBool = True # from config ini\n print (\" ****************************************** STARTING SEGMENTATION ******************************************\")\n\n print (\" ********************** Starting segmentation **********************\")\n myParserConfigIni = parserConfigIni()\n myParserConfigIni.readConfigIniFile(configIniName, 2)\n\n print (\" -------- Images to segment -------------\")\n\n print (\" -------- Reading Images names for segmentation -------------\")\n\n # -- Get list of images used for testing -- #\n (imageNames_Test, names_Test) = getImagesSet(myParserConfigIni.imagesFolder,\n myParserConfigIni.indexesToSegment) # Images\n (groundTruthNames_Test, gt_names_Test) = getImagesSet(myParserConfigIni.GroundTruthFolder,\n myParserConfigIni.indexesToSegment) # Ground truth\n (roiNames_Test, roi_names_Test) = getImagesSet(myParserConfigIni.ROIFolder,\n [0]) # ROI\n\n # --------------- Load my LiviaNet3D object ---------------\n print (\" ... Loading model from {}\".format(networkModelName))\n\n myLiviaNet3D = load_model_from_gzip_file(networkModelName)\n print (\" ... Network architecture successfully loaded....\")\n\n # Get info from the network model\n networkName = myLiviaNet3D.networkName\n folderName = myLiviaNet3D.folderName\n n_classes = myLiviaNet3D.n_classes\n sampleSize_Test = myLiviaNet3D.sampleSize_Test\n receptiveField = myLiviaNet3D.receptiveField\n outputShape = myLiviaNet3D.lastLayer.outputShapeTest[2:]\n batch_Size = myLiviaNet3D.batch_Size\n padInputImagesBool = myParserConfigIni.applyPadding\n imageType = myParserConfigIni.imageTypes\n numberImagesToSegment = len(imageNames_Test)\n\n strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]\n\n # Run over the images to segment\n for i_d in range(numberImagesToSegment):\n print(\"********************** Segmenting subject: {} ....total: {}/{}...**********************\".format(\n names_Test[i_d], str(i_d + 1), str(numberImagesToSegment)))\n\n segmentVolume(myLiviaNet3D,\n i_d,\n out_path,#outputpath by zy\n imageNames_Test, # Full path\n names_Test, # Only image name\n groundTruthNames_Test,\n roiNames_Test,\n imageType,\n padInputImagesBool,\n receptiveField,\n sampleSize_Test,\n strideValues,\n batch_Size,\n 1 # Validation (0) or testing (1)\n )\n\n print(\" **************************************************************************************************** \")\n"
] | [
[
"numpy.array",
"numpy.zeros",
"sklearn.cross_validation.LeaveOneOut",
"numpy.ones"
],
[
"numpy.argmax",
"numpy.zeros",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
T3p/policy-optimization | [
"77006545779823737c4ca3b19e9d80506015c132",
"77006545779823737c4ca3b19e9d80506015c132",
"77006545779823737c4ca3b19e9d80506015c132",
"77006545779823737c4ca3b19e9d80506015c132"
] | [
"potion/envs/minigolf.py",
"potion/envs/lq.py",
"script/spg_gridworld.py",
"script/gpomdp_cheetah.py"
] | [
"from numbers import Number\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport math as m\nfrom scipy.stats import norm\n\n\"\"\"\nMinigolf task.\nReferences\n----------\n - Penner, A. R. \"The physics of putting.\" Canadian Journal of Physics 80.2 (2002): 83-96.\n\"\"\"\n\n\nclass MiniGolf(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.min_pos = 0.0\n self.max_pos = 20.0\n self.min_action = 1e-5\n self.max_action = 10.0\n self.putter_length = 1.0 # [0.7:1.0]\n self.friction = 0.131 # [0.065:0.196]\n self.hole_size = 0.10 # [0.10:0.15]\n self.sigma_noise = 0.3\n self.ball_radius = 0.02135\n self.min_variance = 1e-2 # Minimum variance for computing the densities\n\n # gym attributes\n self.viewer = None\n low = np.array([self.min_pos])\n high = np.array([self.max_pos])\n self.action_space = spaces.Box(low=self.min_action,\n high=self.max_action,\n shape=(1,), dtype=float)\n self.observation_space = spaces.Box(low=low, high=high, dtype=float)\n\n # initialize state\n self.seed()\n self.reset()\n\n def setParams(self, env_param):\n self.putter_length = env_param[0]\n self.friction = env_param[1]\n self.hole_size = env_param[2]\n self.sigma_noise = m.sqrt(env_param[-1])\n\n def step(self, action, render=False):\n action = np.clip(action, self.min_action, self.max_action / 2)\n\n noise = 10\n while abs(noise) > 1:\n noise = self.np_random.randn() * self.sigma_noise\n u = action * self.putter_length * (1 + noise)\n\n deceleration = 5 / 7 * self.friction * 9.81\n\n t = u / deceleration\n xn = self.state - u * t + 0.5 * deceleration * t ** 2\n\n reward = 0\n done = True\n if self.state > 0:\n reward = -1\n done = False\n elif self.state < -4:\n reward = -100\n\n self.state = xn\n\n return self.get_state(), float(reward), done, {'state': self.get_state(), 'action': action, 'danger': float(self.state) < -4}\n\n # Custom param for transfer\n\n def getEnvParam(self):\n return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),\n np.ravel(self.sigma_noise ** 2)])\n\n def reset(self, state=None):\n if state is None:\n self.state = np.array([self.np_random.uniform(low=self.min_pos,\n high=self.max_pos)])\n else:\n self.state = np.array(state)\n\n return self.get_state()\n\n def get_state(self):\n return np.array(self.state)\n\n def get_true_state(self):\n \"\"\"For testing purposes\"\"\"\n return np.array(self.state)\n\n def clip_state(self, state):\n return state\n # return np.clip(state, self.min_pos, self.max_pos)\n\n def clip_action(self, action):\n return action\n # return np.clip(action, self.min_action, self.max_action)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def getDensity_old(self, env_parameters, state, action, next_state):\n\n if state < next_state:\n return 0\n\n action = np.clip(action, self.min_action, self.max_action / 2)\n action = 1e-8 if action == 0 else action\n\n putter_length = env_parameters[0]\n friction = env_parameters[1]\n sigma_noise = env_parameters[-1]\n deceleration = 5 / 7 * friction * 9.81\n\n u = np.sqrt(2 * deceleration * (state - next_state))\n noise = (u / (action * putter_length) - 1) / sigma_noise\n\n return norm.pdf(noise)\n\n def density_old(self, env_parameters, state, action, next_state):\n \"\"\"\n :param env_parameters: list of env_params\n :param state: NxTx1\n :param action: NxT\n :param next_state: NxTx1\n :return: pdf NxTx1xn_param\n \"\"\"\n assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4\n\n mask = state < next_state\n action = np.clip(action, self.min_action, self.max_action / 2)\n action[action == 0] = 1e-8\n pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))\n diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later\n\n for i in range(env_parameters.shape[0]):\n deceleration = 5 / 7 * env_parameters[i, 1] * 9.81\n u = np.sqrt(2 * deceleration * diff[:, :, :, i])\n noise = (u / (action[:, :, np.newaxis, i] * env_parameters[i, 0]) - 1) / env_parameters[i, -1]\n pdf[:, :, :, i] = norm.pdf(noise) * (1 - mask[:, :, :, i]) # set to zero impossible transitions\n\n return pdf[:, :, 0, :]\n\n def densityCurrent_old(self, state, action, next_state):\n \"\"\"\n :param state: NxTx1\n :param action: NxT\n :param next_state: NxTx1\n :return: pdf NxTx1xn_param\n \"\"\"\n\n assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3\n\n mask = state < next_state\n action = np.clip(action, self.min_action, self.max_action / 2)\n action[action == 0] = 1e-8\n diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later\n\n deceleration = 5 / 7 * self.friction * 9.81\n u = np.sqrt(2 * deceleration * diff)\n noise = (u / (action[:, :, np.newaxis] * self.putter_length) - 1) / self.sigma_noise\n pdf = norm.pdf(noise) * (1 - mask) # set to zero impossible transitions\n\n return pdf[:, :, 0]\n\n def stepDenoisedCurrent_old(self, state, action):\n \"\"\"\n Computes steps without noise.\n \"\"\"\n\n assert state.ndim == 3 and action.ndim == 2\n\n action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]\n u = action * self.putter_length\n deceleration = 5 / 7 * self.friction * 9.81\n t = u / deceleration\n return state - u * t + 0.5 * deceleration * t ** 2\n\n def stepDenoisedCurrent(self, state, action):\n \"\"\"\n Computes the mean transitions.\n \"\"\"\n\n assert state.ndim == 3 and action.ndim == 2\n\n action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]\n u = action * self.putter_length\n deceleration = 5 / 7 * self.friction * 9.81\n return state - 0.5 * u ** 2 * (1 + self.sigma_noise ** 2) / deceleration\n\n def variance(self, action):\n \"\"\"\n Next-state variance given the action\n \"\"\"\n\n assert action.ndim == 2\n\n deceleration = 5 / 7 * self.friction * 9.81\n action = np.clip(action, self.min_action, self.max_action / 2)\n k = action ** 2 * self.putter_length ** 2 / (2 * deceleration)\n return 2 * k ** 2 * self.sigma_noise ** 2 * (self.sigma_noise ** 2 + 2) + self.min_variance\n\n def densityCurrent(self, state, action, next_state):\n \"\"\"\n :param state: NxTx1\n :param action: NxT\n :param next_state: NxTx1\n :return: pdf NxTx1xn_param\n \"\"\"\n\n assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3\n\n mean_ns = self.stepDenoisedCurrent(state, action)\n var_ns = self.variance(action)\n return norm.pdf((next_state - mean_ns)[:, :, 0] / np.sqrt(var_ns))\n\n def density(self, env_parameters, state, action, next_state):\n \"\"\"\n :param env_parameters: list of env_params\n :param state: NxTx1\n :param action: NxT\n :param next_state: NxTx1\n :return: pdf NxTx1xn_param\n \"\"\"\n assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4\n\n action = np.clip(action, self.min_action, self.max_action / 2)\n pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))\n\n for i in range(env_parameters.shape[0]):\n deceleration = 5 / 7 * env_parameters[i, 1] * 9.81\n k = action ** 2 * env_parameters[i, 0] ** 2 / (2 * deceleration)\n\n # Compute mean next-state\n mean_ns = state[:, :, :, i] - k[:, :, np.newaxis, i] * (1 + env_parameters[i, -1])\n # Compute variance next-state\n var_ns = 2 * k[:, :, np.newaxis, i] ** 2 * env_parameters[i, -1] * (\n env_parameters[i, -1] + 2) + self.min_variance\n\n pdf[:, :, :, i] = norm.pdf((next_state[:, :, :, i] - mean_ns) / np.sqrt(var_ns))\n\n return pdf[:, :, 0, :]\n\n\nclass ComplexMiniGolf(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.horizon = 20\n self.gamma = 0.99\n\n self.min_pos = 0.0\n self.max_pos = 20.0\n self.min_action = 1e-5\n self.max_action = 10.0\n self.putter_length = 1.0 # [0.7:1.0]\n # self.friction = 0.131 # [0.065:0.196]\n self.friction_low = 0.131\n self.friction_high = 0.19 # 0.190\n self.hole_size = 0.10 # [0.10:0.15]\n self.sigma_noise = 0.3\n self.ball_radius = 0.02135\n self.min_variance = 1e-2 # Minimum variance for computing the densities\n\n # gym attributes\n self.viewer = None\n low = np.array([self.min_pos])\n high = np.array([self.max_pos])\n self.action_space = spaces.Box(low=self.min_action,\n high=self.max_action,\n shape=(1,))\n self.observation_space = spaces.Box(low=low, high=high)\n\n # initialize state\n self.seed()\n self.reset()\n\n def setParams(self, env_param):\n self.putter_length = env_param[0]\n self.friction = env_param[1]\n self.hole_size = env_param[2]\n self.sigma_noise = m.sqrt(env_param[-1])\n\n def computeFriction(self, state):\n # if state < (self.max_pos - self.min_pos) / 3:\n # friction = self.friction_low\n # elif state < (self.max_pos - self.min_pos) * 2 / 3:\n # friction = self.friction_low\n # else:\n # friction = self.friction_high\n # return friction\n delta_f = self.friction_high - self.friction_low\n delta_p = self.max_pos - self.min_pos\n return self.friction_low + (delta_f / delta_p) * state\n\n def step(self, action, render=False):\n action = np.clip(action, self.min_action, self.max_action / 2)\n\n noise = 10\n while abs(noise) > 1:\n noise = self.np_random.randn() * self.sigma_noise\n u = action * self.putter_length * (1 + noise)\n\n friction = self.computeFriction(self.state)\n\n deceleration = 5 / 7 * friction * 9.81\n\n t = u / deceleration\n xn = self.state - u * t + 0.5 * deceleration * t ** 2\n\n # reward = 0\n # done = True\n # if u < v_min:\n # reward = -1\n # done = False\n # elif u > v_max:\n # reward = -100\n\n reward = 0\n done = True\n if self.state > 0:\n reward = -1\n done = False\n elif self.state < -4:\n reward = -100\n\n state = self.state\n self.state = xn\n\n # TODO the last three values should not be used\n return self.get_state(), float(reward), done, {\"state\": state, \"next_state\": self.state, \"action\": action}\n\n # Custom param for transfer\n\n def getEnvParam(self):\n return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),\n np.ravel(self.sigma_noise ** 2)])\n\n def reset(self, state=None):\n # TODO change reset\n if state is None:\n self.state = np.array([self.np_random.uniform(low=self.min_pos,\n high=self.max_pos)])\n else:\n self.state = np.array(state)\n\n return self.get_state()\n\n def get_state(self):\n return np.array(self.state)\n\n def get_true_state(self):\n \"\"\"For testing purposes\"\"\"\n return np.array(self.state)\n\n def clip_state(self, state):\n return state\n # return np.clip(state, self.min_pos, self.max_pos)\n\n def clip_action(self, action):\n return action\n # return np.clip(action, self.min_action, self.max_action)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reward(self, state, action, next_state):\n # FIXME: two problems. (1,probably fixed) When the next_state is less than state. (2) reward of -100 is never returned\n friction = self.computeFriction(state)\n deceleration = 5 / 7 * friction * 9.81\n\n u = np.sqrt(2 * deceleration * max((state - next_state), 0))\n\n v_min = np.sqrt(10 / 7 * friction * 9.81 * state)\n v_max = np.sqrt((2 * self.hole_size - self.ball_radius) ** 2 * (9.81 / (2 * self.ball_radius)) + v_min ** 2)\n\n reward = 0\n done = True\n if u < v_min:\n reward = -1\n done = False\n elif u > v_max:\n reward = -100\n\n return reward, done",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport math\nfrom numbers import Number\n\nclass LQ(gym.Env):\n \"\"\"\n Gym environment implementing an LQR problem\n\n s_{t+1} = A s_t + B a_t + noise\n r_{t+1} = - s_t^T Q s_t - a_t^T R a_t\n\n Run script to compute optimal policy parameters\n \"\"\" \n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.ds = 1 #state dimension\n self.da = 1 #action dimension\n self.horizon = 10 #task horizon (reset is not automatic!)\n self.gamma = 0.9 #discount factor\n self.max_pos = np.inf * np.ones(self.ds) #max state for clipping\n self.max_action = np.inf * np.ones(self.da) #max action for clipping \n self.sigma_noise = 0 * np.eye(self.ds) #std dev of environment noise\n self.A = np.eye(self.ds)\n self.B = np.eye(self.ds, self.da)\n self.Q = 1 * np.eye(self.ds)\n self.R = 1 * np.eye(self.da)\n\n #Gym attributes\n self.viewer = None\n self.action_space = spaces.Box(low=-self.max_action,\n high=self.max_action, \n dtype=np.float32)\n self.observation_space = spaces.Box(low=-self.max_pos, \n high=self.max_pos,\n dtype=np.float32)\n \n #Initialize state\n self.seed()\n self.reset()\n\n def step(self, action, render=False):\n u = np.clip(np.ravel(np.atleast_1d(action)), -self.max_action, self.max_action)\n noise = np.dot(self.sigma_noise, self.np_random.randn(self.ds))\n xn = np.clip(np.dot(self.A, self.state.T) + np.dot(self.B, u) + noise, -self.max_pos, self.max_pos)\n cost = np.dot(self.state,\n np.dot(self.Q, self.state)) + \\\n np.dot(u, np.dot(self.R, u))\n\n self.state = xn.ravel()\n self.timestep += 1\n \n return self.get_state(), -np.asscalar(cost), self.timestep >= self.horizon, {'danger':0} #done after fixed horizon (manual reset)\n\n def reset(self, state=None):\n \"\"\"\n By default, uniform initialization \n \"\"\"\n self.timestep = 0\n if state is None:\n self.state = np.array(self.np_random.uniform(low=-1.,#self.max_pos,\n high=1.))#self.max_pos))\n else:\n self.state = np.array(state)\n\n return self.get_state()\n\n def get_state(self):\n return np.array(self.state)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def render(self, mode='human', close=False):\n if self.ds not in [1, 2]:\n return\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n screen_width = 600\n world_width = math.ceil((self.max_pos[0] * 2) * 1.5)\n xscale = screen_width / world_width\n ballradius = 3\n \n if self.ds == 1: \n screen_height = 400\n else:\n world_height = math.ceil((self.max_pos[1] * 2) * 1.5)\n screen_height = math.ceil(xscale * world_height)\n yscale = screen_height / world_height\n\n if self.viewer is None:\n clearance = 0 # y-offset\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n mass = rendering.make_circle(ballradius * 2)\n mass.set_color(.8, .3, .3)\n mass.add_attr(rendering.Transform(translation=(0, clearance)))\n self.masstrans = rendering.Transform()\n mass.add_attr(self.masstrans)\n self.viewer.add_geom(mass)\n if self.ds == 1:\n self.track = rendering.Line((0, 100), (screen_width, 100))\n else:\n self.track = rendering.Line((0, screen_height / 2), (screen_width, screen_height / 2))\n self.track.set_color(0.5, 0.5, 0.5)\n self.viewer.add_geom(self.track)\n zero_line = rendering.Line((screen_width / 2, 0),\n (screen_width / 2, screen_height))\n zero_line.set_color(0.5, 0.5, 0.5)\n self.viewer.add_geom(zero_line)\n\n x = self.state[0]\n ballx = x * xscale + screen_width / 2.0\n if self.ds == 1:\n bally = 100\n else:\n y = self.state[1]\n bally = y * yscale + screen_height / 2.0\n self.masstrans.set_translation(ballx, bally)\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def _computeP2(self, K):\n \"\"\"\n This function computes the Riccati equation associated to the LQG\n problem.\n Args:\n K (matrix): the matrix associated to the linear controller a = K s\n\n Returns:\n P (matrix): the Riccati Matrix\n\n \"\"\"\n I = np.eye(self.Q.shape[0], self.Q.shape[1])\n if np.array_equal(self.A, I) and np.array_equal(self.B, I):\n P = (self.Q + np.dot(K.T, np.dot(self.R, K))) / (I - self.gamma *\n (I + 2 * K + K **\n 2))\n else:\n tolerance = 0.0001\n converged = False\n P = np.eye(self.Q.shape[0], self.Q.shape[1])\n while not converged:\n Pnew = self.Q + self.gamma * np.dot(self.A.T,\n np.dot(P, self.A)) + \\\n self.gamma * np.dot(K.T, np.dot(self.B.T,\n np.dot(P, self.A))) + \\\n self.gamma * np.dot(self.A.T,\n np.dot(P, np.dot(self.B, K))) + \\\n self.gamma * np.dot(K.T,\n np.dot(self.B.T,\n np.dot(P, np.dot(self.B,\n K)))) + \\\n np.dot(K.T, np.dot(self.R, K))\n converged = np.max(np.abs(P - Pnew)) < tolerance\n P = Pnew\n return P\n\n def computeOptimalK(self):\n \"\"\"\n This function computes the optimal linear controller associated to the\n LQG problem (a = K * s).\n\n Returns:\n K (matrix): the optimal controller\n\n \"\"\"\n P = np.eye(self.Q.shape[0], self.Q.shape[1])\n for i in range(100):\n K = -self.gamma * np.dot(np.linalg.inv(\n self.R + self.gamma * (np.dot(self.B.T, np.dot(P, self.B)))),\n np.dot(self.B.T, np.dot(P, self.A)))\n P = self._computeP2(K)\n K = -self.gamma * np.dot(np.linalg.inv(self.R + self.gamma *\n (np.dot(self.B.T,\n np.dot(P, self.B)))),\n np.dot(self.B.T, np.dot(P, self.A)))\n return K\n\n def computeJ(self, K, Sigma=1., n_random_x0=10000):\n \"\"\"\n This function computes the discounted reward associated to the provided\n linear controller (a = K s + \\epsilon, \\epsilon \\sim N(0,\\Sigma)).\n Args:\n K (matrix): the controller matrix\n Sigma (matrix): covariance matrix of the zero-mean noise added to\n the controller action\n n_random_x0: the number of samples to draw in order to average over\n the initial state\n\n Returns:\n J (float): The discounted reward\n\n \"\"\"\n if isinstance(K, Number):\n K = np.array([K]).reshape(1, 1)\n if isinstance(Sigma, Number):\n Sigma = np.array([Sigma]).reshape(1, 1)\n\n P = self._computeP2(K)\n temp = np.dot(\n Sigma, (self.R + self.gamma * np.dot(self.B.T,\n np.dot(P, self.B))))\n temp = np.trace(temp) if np.ndim(temp) > 1 else temp\n W = (1 / (1 - self.gamma)) * temp\n\n #Closed-form expectation in the scalar case:\n if np.size(K)==1:\n return min(0,np.asscalar(-self.max_pos**2*P/3 - W))\n\n #Monte Carlo estimation for higher dimensions\n J = 0.0\n for i in range(n_random_x0):\n self.reset()\n x0 = self.get_state()\n J -= np.dot(x0.T, np.dot(P, x0)) \\\n + W\n J /= n_random_x0\n return min(0,J)\n\n def grad_K(self, K, Sigma):\n \"\"\"\n Policy gradient (wrt K) of Gaussian linear policy with mean K s\n and covariance Sigma.\n Scalar case only\n \"\"\"\n I = np.eye(self.Q.shape[0], self.Q.shape[1])\n if not np.array_equal(self.A, I) or not np.array_equal(self.B, I):\n raise NotImplementedError\n if not isinstance(K,Number) or not isinstance(Sigma, Number):\n raise NotImplementedError\n theta = np.asscalar(np.array(K))\n sigma = np.asscalar(np.array(Sigma))\n\n den = 1 - self.gamma*(1 + 2*theta + theta**2)\n dePdeK = 2*(theta*self.R/den + self.gamma*(self.Q + theta**2*self.R)*(1+theta)/den**2)\n return np.asscalar(- dePdeK*(self.max_pos**2/3 + self.gamma*sigma/(1 - self.gamma)))\n\n def grad_Sigma(self, K, Sigma=None):\n \"\"\"\n Policy gradient wrt (adaptive) covariance Sigma\n \"\"\"\n I = np.eye(self.Q.shape[0], self.Q.shape[1])\n if not np.array_equal(self.A, I) or not np.array_equal(self.B, I):\n raise NotImplementedError\n if not isinstance(K,Number) or not isinstance(Sigma, Number):\n raise NotImplementedError\n\n K = np.array(K)\n P = self._computeP2(K)\n return np.asscalar(-(self.R + self.gamma*P)/(1 - self.gamma))\n\n def grad_mixed(self, K, Sigma=None):\n \"\"\"\n Mixed-derivative policy gradient for K and Sigma\n \"\"\"\n I = np.eye(self.Q.shape[0], self.Q.shape[1])\n if not np.array_equal(self.A, I) or not np.array_equal(self.B, I):\n raise NotImplementedError\n if not isinstance(K,Number) or not isinstance(Sigma, Number):\n raise NotImplementedError\n theta = np.asscalar(np.array(K))\n\n den = 1 - self.gamma*(1 + 2*theta + theta**2)\n dePdeK = 2*(theta*self.R/den + self.gamma*(self.Q + theta**2*self.R)*(1+theta)/den**2)\n\n return np.asscalar(-dePdeK*self.gamma/(1 - self.gamma))\n\n def computeQFunction(self, x, u, K, Sigma, n_random_xn=100):\n \"\"\"\n This function computes the Q-value of a pair (x,u) given the linear\n controller Kx + epsilon where epsilon \\sim N(0, Sigma).\n Args:\n x (int, array): the state\n u (int, array): the action\n K (matrix): the controller matrix\n Sigma (matrix): covariance matrix of the zero-mean noise added to\n the controller action\n n_random_xn: the number of samples to draw in order to average over\n the next state\n\n Returns:\n Qfun (float): The Q-value in the given pair (x,u) under the given\n controller\n\n \"\"\"\n if isinstance(x, Number):\n x = np.array([x])\n if isinstance(u, Number):\n u = np.array([u])\n if isinstance(K, Number):\n K = np.array([K]).reshape(1, 1)\n if isinstance(Sigma, Number):\n Sigma = np.array([Sigma]).reshape(1, 1)\n\n P = self._computeP2(K)\n Qfun = 0\n for i in range(n_random_xn):\n noise = self.np_random.randn() * self.sigma_noise\n action_noise = self.np_random.multivariate_normal(\n np.zeros(Sigma.shape[0]), Sigma, 1)\n nextstate = np.dot(self.A, x) + np.dot(self.B,\n u + action_noise) + noise\n Qfun -= np.dot(x.T, np.dot(self.Q, x)) + \\\n np.dot(u.T, np.dot(self.R, u)) + \\\n self.gamma * np.dot(nextstate.T, np.dot(P, nextstate)) + \\\n (self.gamma / (1 - self.gamma)) * \\\n np.trace(np.dot(Sigma,\n self.R + self.gamma *\n np.dot(self.B.T, np.dot(P, self.B))))\n Qfun = np.asscalar(Qfun) / n_random_xn\n return Qfun\n\n\nif __name__ == '__main__':\n \"\"\"\n Compute optimal parameters K for Gaussian policy with mean Ks\n and covariance matrix sigma_controller (1 by default)\n \"\"\"\n env = LQ()\n sigma_controller = 1 * np.ones(env.da)\n theta_star = env.computeOptimalK()\n print('theta^* = ', theta_star)\n print('J^* = ', env.computeJ(theta_star,sigma_controller))\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 16 14:47:33 2019\n\n@author: Matteo Papini\n\"\"\"\nimport torch\nimport gym\nimport potion.envs\nfrom potion.actors.discrete_policies import ShallowGibbsPolicy\nfrom potion.common.logger import Logger\nfrom potion.algorithms.safe import spg\nimport argparse\nimport re\nfrom potion.common.rllab_utils import rllab_env_from_name, Rllab2GymWrapper\nfrom potion.meta.smoothing_constants import gibbs_lip_const\nfrom potion.meta.error_bounds import hoeffding_bounded_score\n\n\n# Command line arguments\nparser = argparse.ArgumentParser(formatter_class\n =argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--name', help='Experiment name', type=str, default='SPG')\nparser.add_argument('--storage', help='root of log directories', type=str, default='..')\nparser.add_argument('--estimator', help='PG estimator (reinforce/gpomdp)', \n type=str, default='gpomdp')\nparser.add_argument('--baseline', help='control variate (avg/peters/zero)', \n type=str, default='peters')\nparser.add_argument('--seed', help='RNG seed', type=int, default=0)\nparser.add_argument('--env', help='Gym environment id', type=str, \n default='GridWorld-v0')\nparser.add_argument('--horizon', help='Task horizon', type=int, default=10)\nparser.add_argument('--max_samples', help='Maximum total samples', type=int, \n default=1e7)\nparser.add_argument('--mini_batchsize', help='(Minimum) batch size', type=int, \n default=100)\nparser.add_argument('--max_batchsize', help='Maximum batch size', type=int, \n default=100000)\nparser.add_argument('--disc', help='Discount factor', type=float, default=0.9)\nparser.add_argument('--conf', help='Confidence', type=float, default=0.8)\nparser.add_argument('--std_init', help='Initial policy std', type=float, \n default=1.)\nparser.add_argument('--max_feat', help='Maximum state feature', type=float, \n default=1.)\nparser.add_argument('--max_rew', help='Maximum reward', type=float, \n default=1.)\nparser.add_argument(\"--fast\", help=\"speed up\",\n action=\"store_true\")\nparser.add_argument(\"--no-fast\", help=\"Do not speed up\",\n action=\"store_false\")\nparser.add_argument(\"--render\", help=\"Render an episode\",\n action=\"store_true\")\nparser.add_argument(\"--no-render\", help=\"Do not render any episode\",\n action=\"store_false\")\nparser.add_argument(\"--temp\", help=\"Save logs in temp folder\",\n action=\"store_true\")\nparser.add_argument(\"--no-temp\", help=\"Save logs in logs folder\",\n action=\"store_false\")\nparser.set_defaults(fast=True, render=False, temp=False) \n\nargs = parser.parse_args()\n\n# Prepare\nif args.env.startswith('rllab'):\n env_rllab_class = rllab_env_from_name(args.env)\n env_rllab = env_rllab_class()\n env = Rllab2GymWrapper(env_rllab)\nelse:\n env = gym.make(args.env)\nenv.seed(args.seed)\n\nm = sum(env.observation_space.shape)\nd = sum(env.action_space.shape)\nmu_init = torch.zeros(m)\nlogstd_init = torch.log(torch.zeros(1) + args.std_init)\npolicy = ShallowGibbsPolicy(env, \n temp=1.)\n\nenvname = re.sub(r'[^a-zA-Z]', \"\", args.env)[:-1]\nenvname = re.sub(r'[^a-zA-Z]', \"\", args.env)[:-1].lower()\nlogname = envname + '_' + args.name + '_' + str(args.seed)\n\nif args.temp:\n logger = Logger(directory= args.storage + '/temp', name = logname, modes=['human', 'csv'])\nelse:\n logger = Logger(directory=args.storage + '/logs', name = logname, modes=['human', 'csv'])\n\n#Constants\nlip_const = gibbs_lip_const(args.max_feat, args.max_rew, args.disc, \n 1.)\nprint(lip_const)\nscore_bound = 2 * args.max_feat\nerr_bound = hoeffding_bounded_score(args.max_rew, score_bound, args.disc, args.horizon, \n dim=16, estimator=args.estimator)\n\n\n# Run\nspg(env, policy, args.horizon, lip_const, err_bound,\n fail_prob = 1. - args.conf,\n mini_batchsize = args.mini_batchsize,\n max_batchsize = args.max_batchsize,\n max_samples = args.max_samples,\n disc = args.disc,\n fast = args.fast,\n seed = args.seed,\n logger = logger,\n render = args.render,\n shallow = True,\n estimator = args.estimator,\n baseline = args.baseline,\n log_params=False,\n save_params=False)\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 16 14:47:33 2019\n\n@author: Matteo Papini\n\"\"\"\nimport torch\nimport gym\nimport potion.envs\nfrom potion.actors.continuous_policies import ShallowGaussianPolicy\nfrom potion.actors.discrete_policies import ShallowGibbsPolicy\nfrom potion.common.logger import Logger\nfrom potion.algorithms.reinforce import reinforce\nimport argparse\nimport re\nfrom potion.meta.steppers import ConstantStepper, RMSprop, Adam\nfrom gym.spaces.discrete import Discrete\nfrom potion.meta.smoothing_constants import gibbs_lip_const\n\n# Command line arguments\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--name', help='Experiment name', type=str, default='GPOMDP')\nparser.add_argument('--storage', help='root of log directories', type=str, default='..')\nparser.add_argument('--estimator', help='Policy gradient estimator (reinforce/gpomdp)', type=str, default='gpomdp')\nparser.add_argument('--baseline', help='baseline for policy gradient estimator (avg/peters/zero)', type=str, default='peters')\nparser.add_argument('--seed', help='RNG seed', type=int, default=0)\nparser.add_argument('--env', help='Gym environment id', type=str, default='HalfCheetah-v2')\nparser.add_argument('--horizon', help='Task horizon', type=int, default=100)\nparser.add_argument('--batchsize', help='Initial batch size', type=int, default=100)\nparser.add_argument('--iterations', help='Iterations', type=int, default=10000)\nparser.add_argument('--disc', help='Discount factor', type=float, default=0.9)\nparser.add_argument('--std', help='(Initial) policy std', type=float, default=1.)\nparser.add_argument('--stepper', help='Step size rule', type=str, default='constant')\nparser.add_argument('--step', help='Step size', type=float, default=1.)\nparser.add_argument('--ent', help='Entropy bonus coefficient', type=float, default=0.)\nparser.add_argument(\"--render\", help=\"Render an episode\",\n action=\"store_true\")\nparser.add_argument(\"--no-render\", help=\"Do not render any episode\",\n action=\"store_false\")\nparser.add_argument(\"--temp\", help=\"Save logs in temp folder\",\n action=\"store_true\")\nparser.add_argument(\"--no-temp\", help=\"Save logs in logs folder\",\n action=\"store_false\")\nparser.add_argument(\"--test\", help=\"Test on deterministic policy\",\n action=\"store_true\")\nparser.add_argument(\"--no-test\", help=\"Online learning only\",\n action=\"store_false\")\nparser.add_argument(\"--learnstd\", help=\"Learn std\",\n action=\"store_true\")\nparser.add_argument(\"--no-learnstd\", help=\"Don't learn std\",\n action=\"store_false\")\nparser.set_defaults(render=False, temp=False, learnstd=False, test=False) \n\nargs = parser.parse_args()\n\n# Prepare\n\nenv = gym.make(args.env)\nenv.seed(args.seed)\n\nif type(env.action_space) is Discrete:\n policy = ShallowGibbsPolicy(env, \n temp=1.)\nelse:\n m = sum(env.observation_space.shape)\n d = sum(env.action_space.shape)\n mu_init = torch.zeros(m*d)\n logstd_init = torch.log(torch.zeros(d) + args.std)\n policy = ShallowGaussianPolicy(m, d, \n mu_init=mu_init, \n logstd_init=logstd_init, \n learn_std=args.learnstd)\n\ntest_batchsize = args.batchsize if args.test else 0\n\nenvname = re.sub(r'[^a-zA-Z]', \"\", args.env)[:-1]\nenvname = re.sub(r'[^a-zA-Z]', \"\", args.env)[:-1].lower()\nlogname = envname + '_' + args.name + '_' + str(args.seed)\n\nif args.temp:\n logger = Logger(directory= args.storage + '/temp', name = logname, modes=['human', 'csv'])\nelse:\n logger = Logger(directory=args.storage + '/logs', name = logname, modes=['human', 'csv'])\n\nstep = args.step\nstep = 1. / gibbs_lip_const(1., 1., args.disc, 1.)\n\nif args.stepper == 'rmsprop':\n stepper = RMSprop()\nelif args.stepper == 'adam':\n stepper = Adam(alpha=step)\nelse:\n stepper = ConstantStepper(step)\n\n\n# Run\nreinforce(env, policy,\n horizon = args.horizon,\n stepper = stepper,\n batchsize = args.batchsize,\n iterations = args.iterations,\n disc = args.disc,\n entropy_coeff = args.ent,\n seed = args.seed,\n logger = logger,\n render = args.render,\n shallow = True,\n estimator = args.estimator,\n baseline = args.baseline,\n test_batchsize=test_batchsize,\n log_params=False,\n save_params=False)\n"
] | [
[
"numpy.sqrt",
"scipy.stats.norm.pdf",
"numpy.clip",
"numpy.abs",
"numpy.ravel",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.asscalar",
"numpy.abs",
"numpy.array_equal",
"numpy.eye",
"numpy.ones",
"numpy.atleast_1d",
"numpy.ndim",
"numpy.size",
"numpy.array",
"numpy.zeros",
"numpy.trace"
],
[
"torch.zeros"
],
[
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jenildesai25/WebScrapping | [
"41937094a7963d53ab09e3ceff055dca4a95f13f"
] | [
"WebScraping2.py"
] | [
"\n# Online References used :\n# https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py\n# https://www.crummy.com/software/BeautifulSoup/bs4/doc/\n# https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/\n# https://www.youtube.com/watch?v=XQgXKtPSzUI\n# https://www.youtube.com/watch?v=aIPqt-OdmS0\n# https://www.youtube.com/watch?v=XQgXKtPSzUI\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport os\nimport requests\nimport glob\nimport re\n\n\n\ndef scrape_data_for_actors():\n file_path = os.path.join(os.path.join(os.environ['USERPROFILE']),\n 'Desktop') # This is written in order to save the txt file in the user's specified location on the machine\n file_path = os.path.join(file_path,\n 'BoxOfficeMojo2_virti_bipin') # Folder name to be created where the file will be stored\n if not os.path.exists(str(file_path)):\n os.mkdir(str(file_path)) # If path does not exist create the path\n os.chdir(file_path) # Change the directory of the file path\n\n if len(glob.glob(\n \"*\")) != 0: # The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell\n file_list = glob.glob(\"*\")\n for file in file_list:\n os.remove(file)\n\n # The url of the BoxOffice Mojo to be scraped\n url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum=1&sort=sumgross&order=DESC&&p=.htm'\n pages_data = [] # List to store the pages data\n total_pages = []\n response = requests.get(url) # Get the response of the url after passing the user input\n soup = BeautifulSoup(response.content,\n 'html.parser') # Using the beautiful soup library to parse the html content and format it\n for page in soup.find_all('a', href=lambda href: href and \"page\" in href): # find the href in a tags\n pages_data.append(page['href']) # append the data in the pages_data list\n for page in pages_data:\n if 'page' in page: # If \"page\" found in href\n index = page.find('page') # Take the index of that page if found\n\n # print(\"Index\", index)\n if page[index:index + 10] not in total_pages:\n # For extracting the total number of pages\n total_pages.append(page[\n index:index + 10]) # for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination\n # print(\"Total Pages\", total_pages)\n average_gross_list = []\n for num in range(1, len(total_pages) + 1, 1):\n try:\n url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum={}&sort=sumgross&order=DESC&&p=.htm'.format(num) # This one works well\n # Get the Response\n print(\"Page number {}\".format(num))\n response_from_url = requests.get(url)\n html = response_from_url.text\n soup = BeautifulSoup(html,\n 'lxml') # lxml is a pretty extensive library written for parsing XML and HTML documents very quickly\n table = soup.find('table', {\"cellspacing\": \"1\"})\n # Using dataframes\n df = pd.read_html(str(table),skiprows=1)\n df = df[0]\n\n df = df.iloc[:, :6] # This is used to slice the dataframe to cut off the date sections.\n df.columns = ['rank', 'person', 'total gross', 'number of movies', 'Average', 'number 1 picture']\n df['id'] = ''\n\n id_list = []\n title_list = df['rank'].tolist()\n new_index = [i for i in range(1,len(title_list)+1)]\n df.index = new_index\n for link in soup.findAll('a', {'href': re.compile(\"\\?id=\")}):\n id_list.append(link.get('href'))\n\n id_list = [x.split('=')[1] for x in id_list]\n id_list = [x.split('.')[0] for x in id_list]\n id_list = id_list[1:]\n id_dict = dict(zip(title_list, id_list))\n\n for index in df.index:\n df.loc[index, 'id'] = id_dict[df.loc[index, 'rank']]\n\n df.to_csv(\"actors.csv\", index=False, mode='a')\n\n except Exception as e:\n print(e)\n continue\n\n\n file_list = glob.glob(\"*.csv\")\n df_container = []\n\n for file in file_list:\n df = pd.read_csv(file)\n df_container.append(df)\n\n df_combined = pd.concat(df_container)\n df_combined.to_csv(\"actors.txt\", index=False, sep=\"\\t\")\n\n df = pd.read_csv(\"actors.txt\", sep=\"\\t\")\n\n # Data Cleaning\n df['Average'] = df['Average'].apply(lambda x: x.replace('$', '')) # replace dollar signs\n df['Average'] = df['Average'].apply(lambda x: x.replace(',', '')) # replace commas\n\n df['Average'] = pd.to_numeric(df['Average'], errors='coerce')\n\n df = df.sort_values(by='Average', ascending=False)\n\n actor_with_highest_average_earning = df.iloc[0]['person']\n\n print(\"actor(s) with the highest average earnings per movie is {}\".format(actor_with_highest_average_earning))\n new_df = pd.read_csv(\"actors.txt\", sep=\"\\t\")\n\n new_df['number of movies'] = pd.to_numeric(new_df['number of movies'], errors='coerce')\n\n actor_most_movies = new_df.loc[new_df['number of movies'].idxmax()].person\n print(\"actor(s) with the maximum number of movies is {}\".format(actor_most_movies))\n\nif __name__ == '__main__':\n scrape_data_for_actors()\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
IKupriyanov-HORIS/lets-plot-docs | [
"30fd31cb03dc649a03518b0c9348639ebfe09d53"
] | [
"docs/_downloads/e0051c6e37b730111a06abd85529e288/plot__2d_distributions.py"
] | [
"\"\"\"\r\n2D Distributions\r\r\n================\r\r\n\r\r\nSome plots visualize a transformation of the original data set. Use a\r\r\nstat parameter to choose a common transformation to visualize.\r\r\n\r\r\nEach stat creates additional variables to map aesthetics to. These\r\r\nvariables use a common ..name.. syntax.\r\r\n\r\r\nLook at the examples of 2D distributions below.\r\r\n\r\n\"\"\"\r\n\r\n# sphinx_gallery_thumbnail_path = \"gallery_py\\_stats\\_2d_distributions.png\"\r\n\r\nimport pandas as pd\r\n\r\nfrom lets_plot import *\r\nLetsPlot.setup_html()\r\n\r\n# %%\r\n\r\ndf = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')\r\n\r\n# %%\r\n\r\nw, h = 400, 300\r\np = ggplot(df, aes('cty', 'hwy')) + ggsize(w, h)\r\np11 = p + geom_bin2d() + ggtitle('geom=\"bin2d\" + default stat')\r\np12 = p + geom_point(aes(color='..count..'), stat='bin2d', size=3, shape=15) + \\\r\n ggtitle('geom=\"point\" + stat=\"bin2d\"')\r\np21 = p + geom_density2d() + ggtitle('geom=\"density2d\" + default stat')\r\np22 = p + geom_point(stat='density2d', size=.5) + ggtitle('geom=\"point\" + stat=\"density2d\"')\r\n\r\nbunch = GGBunch()\r\nbunch.add_plot(p11, 0, 0)\r\nbunch.add_plot(p12, w, 0)\r\nbunch.add_plot(p21, 0, h)\r\nbunch.add_plot(p22, w, h)\r\nbunch"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.