code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#Amtrak Recursive ROute Writer (ARROW)
#cont- does not write initial .npz file, relies on existing partials
def main(newdata=False,
cont=False,
newredund=False,
arrive=True):
import json
import numpy as np
import os
import route_builder
import glob
import find_redundancy
local = 'F:/Python34/America_By_Train/'
rb = local+'route_builder/'
direc = 'C:/Users/Owner/Documents/GitHub/capecchi.github.io/posts/AmericaByTrain/'
if newdata or not os.path.isfile(local+'endpts.npz'):
with open(direc+'amtrak.geojson') as f:
data = json.load(f)
feats = data['features']
index = np.arange(len(feats))
strt = []
end = []
for i in index:
cc = feats[i]['geometry']['coordinates']
strt.append(cc[0])
end.append(cc[-1])
#NEED route GPS endpoints to look for
fcoords = local
#fraarcid
stpaulid = 182592 #keep east pt
stpaul_iarr_cid = 182614 #mark eastern segment as redundant so we only search west
portland_cid = 266301 #block southern route to Portland
seattleid = 241310 #keep south pt
laid = 211793 #keep south pt
palmspringsid = 263261 #keep west pt
neworleansid_end = 178659 #keep east pt NOTE does not connect to neworleans_start
neworleansid_start = 243859 #keep south or east pt
phillyid = 204870 #keep north pt
dcid = 164103 #keep south pt
chicagoid = 253079 #keep north pt
eb_block = np.array([],dtype=int)
cs_block = np.array([],dtype=int)
sl_block = np.array([],dtype=int)
cr_block = np.array([],dtype=int)
cl_block = np.array([],dtype=int)
for i in index:
cid = feats[i]['properties']['FRAARCID']
coords = feats[i]['geometry']['coordinates']
c1 = coords[0]
c2 = coords[-1]
if cid == stpaulid:
if c1[0] > c2[0]: stpaul = c1
else: stpaul = c2
if cid == stpaul_iarr_cid or cid == portland_cid:
eb_block = np.append(eb_block,i)
if cid == seattleid:
if c1[1] < c2[1]: seattle = c1
else: seattle = c2
if cid == laid:
if c1[1] < c2[1]: la = c1
else: la = c2
if cid == seattleid or cid == portland_cid or cid == 189128\
or cid == 244148 or cid == 254149:
cs_block = np.append(cs_block,i)
if cid == palmspringsid:
if c1[0] < c2[0]: palmsprings = c1
else: palmsprings = c2
if cid == neworleansid_end:
if c1[0] > c2[0]: neworleans_end = c1
else: neworleans_end = c2
if cid == 263258 or cid == 266284 or cid == 178673:
sl_block = np.append(sl_block,i)
if cid == neworleansid_start:
if c1[0] > c2[0]: neworleans_start = c1
else: neworleans_start = c2
if cid == phillyid:
if c1[1] > c2[1]: philly = c1
else: philly = c2
if cid == 243812 or cid == 204623 or cid == 169919 or cid == 169921\
or cid == 125491 or cid == 164053 or cid == 275062 or cid == 261822:
cr_block = np.append(cr_block,i)
if cid == dcid:
if c1[1] < c2[1]: dc = c1
else: dc = c2
if cid == chicagoid:
if c1[1] > c2[1]: chicago = c1
else: chicago = c2
if cid == 252822 or cid == 164114 or cid == 252939 or cid == 152297\
or cid == 197933 or cid == 197961 or cid == 192650 or cid == 192649\
or cid == 253070 or cid == 256677 or cid == 193489 or cid == 266257\
or cid == 266676:
cl_block = np.append(cl_block,i)
cid = [feats[i]['properties']['FRAARCID'] for i in index]
if newredund:
#Identify redundant track segments
fraarcid = [feats[i]['properties']['FRAARCID'] for i in index]
iredund = np.array([],dtype=int)
np.save(local+'redundant',iredund)
redundant = find_redundancy.main(index,strt,end,fraarcid,local)
#SAVE STUFF
np.savez(local+'endpts',index=index,strt=strt,end=end,cid=cid,
stpaul=stpaul,seattle=seattle,la=la,palmsprings=palmsprings,
neworleans_end=neworleans_end,neworleans_start=neworleans_start,
philly=philly,dc=dc,chicago=chicago,eb_block=eb_block,
cs_block=cs_block,sl_block=sl_block,cr_block=cr_block,cl_block=cl_block)
print('saved endpts arrays and city GPS coords')
else:
f=np.load(local+'endpts.npz')
index = f['index']
strt = f['strt']
end = f['end']
cid = f['cid']
stpaul = f['stpaul']
eb_block = f['eb_block']
seattle = f['seattle']
la = f['la']
cs_block = f['cs_block']
palmsprings = f['palmsprings']
neworleans_end = f['neworleans_end']
sl_block = f['sl_block']
neworleans_start = f['neworleans_start']
philly = f['philly']
cr_block = f['cr_block']
dc = f['dc']
chicago = f['chicago']
cl_block = f['cl_block']
#EMPIRE BUILDER
if 1:
print('finding EMPIRE BUILDER routes')
ptA = [stpaul]
iredund = np.load(local+'redundant.npy')
#for i in eb_block: iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,seattle,rb+'empire_builder',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#COAST STARLIGHT
if 0:
print('finding COAST STARLIGHT routes')
ptA = [seattle]
ptB = la
iredund = np.load(local+'redundant.npy')
for i in cs_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'coast_starlight',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#SUNSET LIMITED
if 0:
print('finding SUNSET LIMITED routes')
ptA = [palmsprings]
ptB = neworleans_end
iredund = np.load(local+'redundant.npy')
for i in sl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'sunset_limited',\
level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CRESCENT
if 0:
print('finding CRESCENT routes')
ptA = [neworleans_start]
ptB = philly
iredund = np.load(local+'redundant.npy')
for i in cr_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'crescent',level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CAPITOL LIMITED
if 0:
print('finding CAPITOL LIMITED routes')
ptA = [dc]
ptB = chicago
iredund = np.load(local+'redundant.npy')
for i in cl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'capitol_limited',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
|
[
"numpy.load",
"json.load",
"numpy.save",
"os.remove",
"numpy.append",
"os.path.isfile",
"numpy.array",
"glob.glob",
"find_redundancy.main",
"numpy.savez",
"route_builder.main"
] |
[((1579, 1602), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1587, 1602), True, 'import numpy as np\n'), ((1621, 1644), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1629, 1644), True, 'import numpy as np\n'), ((1663, 1686), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1671, 1686), True, 'import numpy as np\n'), ((1705, 1728), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1713, 1728), True, 'import numpy as np\n'), ((1747, 1770), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1755, 1770), True, 'import numpy as np\n'), ((4384, 4734), 'numpy.savez', 'np.savez', (["(local + 'endpts')"], {'index': 'index', 'strt': 'strt', 'end': 'end', 'cid': 'cid', 'stpaul': 'stpaul', 'seattle': 'seattle', 'la': 'la', 'palmsprings': 'palmsprings', 'neworleans_end': 'neworleans_end', 'neworleans_start': 'neworleans_start', 'philly': 'philly', 'dc': 'dc', 'chicago': 'chicago', 'eb_block': 'eb_block', 'cs_block': 'cs_block', 'sl_block': 'sl_block', 'cr_block': 'cr_block', 'cl_block': 'cl_block'}), "(local + 'endpts', index=index, strt=strt, end=end, cid=cid, stpaul\n =stpaul, seattle=seattle, la=la, palmsprings=palmsprings,\n neworleans_end=neworleans_end, neworleans_start=neworleans_start,\n philly=philly, dc=dc, chicago=chicago, eb_block=eb_block, cs_block=\n cs_block, sl_block=sl_block, cr_block=cr_block, cl_block=cl_block)\n", (4392, 4734), True, 'import numpy as np\n'), ((4847, 4876), 'numpy.load', 'np.load', (["(local + 'endpts.npz')"], {}), "(local + 'endpts.npz')\n", (4854, 4876), True, 'import numpy as np\n'), ((5552, 5584), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (5559, 5584), True, 'import numpy as np\n'), ((5657, 5680), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (5665, 5680), True, 'import numpy as np\n'), ((5761, 5784), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (5770, 5784), False, 'import glob\n'), ((6295, 6327), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (6302, 6327), True, 'import numpy as np\n'), ((6411, 6434), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (6419, 6434), True, 'import numpy as np\n'), ((6515, 6538), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6524, 6538), False, 'import glob\n'), ((7060, 7092), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (7067, 7092), True, 'import numpy as np\n'), ((7176, 7199), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7184, 7199), True, 'import numpy as np\n'), ((7280, 7303), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (7289, 7303), False, 'import glob\n'), ((7809, 7841), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (7816, 7841), True, 'import numpy as np\n'), ((7925, 7948), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7933, 7948), True, 'import numpy as np\n'), ((8029, 8052), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8038, 8052), False, 'import glob\n'), ((8520, 8552), 'numpy.load', 'np.load', (["(local + 'redundant.npy')"], {}), "(local + 'redundant.npy')\n", (8527, 8552), True, 'import numpy as np\n'), ((8636, 8659), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (8644, 8659), True, 'import numpy as np\n'), ((8740, 8763), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8749, 8763), False, 'import glob\n'), ((511, 547), 'os.path.isfile', 'os.path.isfile', (["(local + 'endpts.npz')"], {}), "(local + 'endpts.npz')\n", (525, 547), False, 'import os\n'), ((615, 627), 'json.load', 'json.load', (['f'], {}), '(f)\n', (624, 627), False, 'import json\n'), ((4201, 4224), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4209, 4224), True, 'import numpy as np\n'), ((4236, 4273), 'numpy.save', 'np.save', (["(local + 'redundant')", 'iredund'], {}), "(local + 'redundant', iredund)\n", (4243, 4273), True, 'import numpy as np\n'), ((4295, 4350), 'find_redundancy.main', 'find_redundancy.main', (['index', 'strt', 'end', 'fraarcid', 'local'], {}), '(index, strt, end, fraarcid, local)\n', (4315, 4350), False, 'import find_redundancy\n'), ((5701, 5745), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (5709, 5745), True, 'import numpy as np\n'), ((5958, 5980), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (5967, 5980), False, 'import os\n'), ((5993, 6089), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'seattle', "(rb + 'empire_builder')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, seattle, rb + 'empire_builder', level,\n iredund, arrive=arrive)\n", (6011, 6089), False, 'import route_builder\n'), ((6134, 6157), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6143, 6157), False, 'import glob\n'), ((6375, 6396), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (6384, 6396), True, 'import numpy as np\n'), ((6455, 6499), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (6463, 6499), True, 'import numpy as np\n'), ((6712, 6734), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (6721, 6734), False, 'import os\n'), ((6747, 6840), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'coast_starlight')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'coast_starlight', level, iredund,\n arrive=arrive)\n", (6765, 6840), False, 'import route_builder\n'), ((6885, 6908), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (6894, 6908), False, 'import glob\n'), ((7140, 7161), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (7149, 7161), True, 'import numpy as np\n'), ((7220, 7264), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (7228, 7264), True, 'import numpy as np\n'), ((7477, 7499), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (7486, 7499), False, 'import os\n'), ((7512, 7604), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'sunset_limited')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'sunset_limited', level, iredund,\n arrive=arrive)\n", (7530, 7604), False, 'import route_builder\n'), ((7649, 7672), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (7658, 7672), False, 'import glob\n'), ((7889, 7910), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (7898, 7910), True, 'import numpy as np\n'), ((7969, 8013), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (7977, 8013), True, 'import numpy as np\n'), ((8226, 8248), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (8235, 8248), False, 'import os\n'), ((8261, 8348), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'crescent')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'crescent', level, iredund, arrive=\n arrive)\n", (8279, 8348), False, 'import route_builder\n'), ((8359, 8382), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (8368, 8382), False, 'import glob\n'), ((8600, 8621), 'numpy.append', 'np.append', (['iredund', 'i'], {}), '(iredund, i)\n', (8609, 8621), True, 'import numpy as np\n'), ((8680, 8724), 'numpy.savez', 'np.savez', (["(rb + 'partial')"], {'ptA': 'ptA', 'iarr': 'iarr'}), "(rb + 'partial', ptA=ptA, iarr=iarr)\n", (8688, 8724), True, 'import numpy as np\n'), ((8937, 8959), 'os.remove', 'os.remove', (['partials[0]'], {}), '(partials[0])\n', (8946, 8959), False, 'import os\n'), ((8972, 9065), 'route_builder.main', 'route_builder.main', (['ptA', 'iarr', 'ptB', "(rb + 'capitol_limited')", 'level', 'iredund'], {'arrive': 'arrive'}), "(ptA, iarr, ptB, rb + 'capitol_limited', level, iredund,\n arrive=arrive)\n", (8990, 9065), False, 'import route_builder\n'), ((9110, 9133), 'glob.glob', 'glob.glob', (["(rb + '*.npz')"], {}), "(rb + '*.npz')\n", (9119, 9133), False, 'import glob\n'), ((2169, 2191), 'numpy.append', 'np.append', (['eb_block', 'i'], {}), '(eb_block, i)\n', (2178, 2191), True, 'import numpy as np\n'), ((2556, 2578), 'numpy.append', 'np.append', (['cs_block', 'i'], {}), '(cs_block, i)\n', (2565, 2578), True, 'import numpy as np\n'), ((2932, 2954), 'numpy.append', 'np.append', (['sl_block', 'i'], {}), '(sl_block, i)\n', (2941, 2954), True, 'import numpy as np\n'), ((3400, 3422), 'numpy.append', 'np.append', (['cr_block', 'i'], {}), '(cr_block, i)\n', (3409, 3422), True, 'import numpy as np\n'), ((3946, 3968), 'numpy.append', 'np.append', (['cl_block', 'i'], {}), '(cl_block, i)\n', (3955, 3968), True, 'import numpy as np\n'), ((5855, 5875), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (5862, 5875), True, 'import numpy as np\n'), ((6609, 6629), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (6616, 6629), True, 'import numpy as np\n'), ((7374, 7394), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (7381, 7394), True, 'import numpy as np\n'), ((8123, 8143), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (8130, 8143), True, 'import numpy as np\n'), ((8834, 8854), 'numpy.load', 'np.load', (['partials[0]'], {}), '(partials[0])\n', (8841, 8854), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
OXASL - Bayesian model fitting for ASL
The BASIL module is a little more complex than the other Workspace based
modules because of the number of options available and the need for flexibility
in how the modelling steps are run.
The main function is ``basil`` which performs model fitting on ASL data
in the Workspace ``asldata`` attribute.
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.infertiss = True
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
Because of the number of options possible for the modelling process, the
workspace attribute ``basil_options`` can be set as a dictionary of extra
options relevant only to Basil:
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.basil_options = {"infertiss" : True, "spatial" : True}
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
All options specified in basil_options are either consumed by Basil, or
if not passed directly to the model.
Copyright (c) 2008-2020 Univerisity of Oxford
"""
import sys
import math
import numpy as np
import scipy.ndimage
from fsl.wrappers import LOAD
from fsl.data.image import Image
from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg
from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions
def basil(wsp, prefit=True, **kwargs):
"""
For oxasl_deblur compatibility
"""
run(wsp, prefit, **kwargs)
def run(wsp, prefit=True, **kwargs):
"""
Run BASIL modelling on ASL data in a workspace
:param wsp: Workspace object
:param prefit: If True, run a pre-fitting step using the mean over repeats of the ASL data
Required workspace attributes
-----------------------------
- ``asldata`` : AslImage object
Optional workspace attributes
-----------------------------
- ``mask`` : Brain mask (fsl.Image)
- ``wp`` : If True, use 'white paper' mode (Alsop et al) - modifies some defaults and infers tissue component only
- ``infertiss`` : If True, infer tissue component (default: True)
- ``inferbat`` : If True, infer bolus arrival time (default: False)
- ``infertau`` : If True, infer bolus duration (default: False)
- ``inferart`` : If True, infer arterial component (default: False)
- ``infert1`` : If True, infer T1 (default: False)
- ``inferpc`` : If True, infer PC (default: False)
- ``t1``: Assumed/initial estimate for tissue T1 (default: 1.65 in white paper mode, 1.3 otherwise)
- ``t1b``: Assumed/initial estimate for blood T1 (default: 1.65)
- ``bat``: Assumed/initial estimate for bolus arrival time (s) (default 0 in white paper mode, 1.3 for CASL, 0.7 otherwise)
- ``t1im`` : T1 map as Image
- ``pgm`` : Grey matter partial volume map as Image
- ``pwm`` : White matter partial volume map as Image
- ``initmvn`` : MVN structure to use as initialization as Image
- ``spatial`` : If True, include final spatial VB step (default: False)
- ``onestep`` : If True, do all inference in a single step (default: False)
- ``basil_options`` : Optional dictionary of additional options for underlying model
"""
wsp.log.write("\nRunning BASIL Bayesian modelling on ASL data in '%s' data space\n" % wsp.ifnone("image_space", "native"))
# Single or Multi TI setup
if wsp.asldata.ntis == 1:
# Single TI data - don't try to infer arterial component of bolus duration, we don't have enough info
wsp.log.write(" - Operating in Single TI mode - no arterial component, fixed bolus duration\n")
wsp.inferart = False
wsp.infertau = False
batsd_default = 0.1
else:
# For multi TI/PLD data, set a more liberal prior for tissue ATT since we should be able to
# determine this from the data. NB this leaves the arterial BAT alone.
batsd_default = 1
if wsp.wp:
# White paper mode - this overrides defaults, but can be overwritten by command line
# specification of individual parameters
wsp.log.write(" - Analysis in white paper mode: T1 default=1.65, BAT default=0, voxelwise calibration\n")
t1_default = 1.65
bat_default = 0.0
else:
t1_default = 1.3
if wsp.asldata.casl:
bat_default = 1.3
else:
bat_default = 0.7
if wsp.t1 is None:
wsp.t1 = t1_default
if wsp.t1b is None:
wsp.t1b = 1.65
if wsp.bat is None:
wsp.bat = bat_default
if wsp.batsd is None:
wsp.batsd = batsd_default
if wsp.infertiss is None:
wsp.infertiss = True
# if we are doing CASL then fix the bolus duration, unless explicitly told us otherwise
if wsp.infertau is None:
wsp.infertau = not wsp.asldata.casl
# Pick up extra BASIL options
wsp.basil_options = dict(wsp.ifnone("basil_options", {}))
mask_policy = wsp.ifnone("basil_mask", "default")
if mask_policy in ("default", "dilated"):
wsp.log.write(" - Using pipeline analysis mask\n")
# Two possible locations for compatibility
if wsp.rois is not None and wsp.rois.mask is not None:
mask = wsp.rois.mask
else:
mask = wsp.mask
if mask_policy == "dilated":
# Use 3x3x3 kernel for compatibility with fslmaths default
wsp.log.write(" - Dilating mask for Basil analysis\n")
struct = scipy.ndimage.generate_binary_structure(3, 3)
mask = Image(scipy.ndimage.binary_dilation(mask.data, structure=struct).astype(np.int), header=mask.header)
elif mask_policy == "none":
wsp.log.write(" - Not using mask for Basil - will fit every voxel\n")
mask = Image(np.ones(wsp.asldata.data.shape[:3]), header=wsp.asldata.header)
else:
raise ValueError("Unrecognized mask policy: %s" % mask_policy)
# If we only have one volume, set a nominal noise prior as it is not possible to
# estimate from the data
if wsp.asldata.nvols / wsp.asldata.ntc == 1:
wsp.log.write(" - Restricting noise prior as only one ASL volume\n")
wsp.basil_options["prior-noise-stddev"] = 1.0
if prefit and max(wsp.asldata.rpts) > 1:
# Initial BASIL run on mean data
wsp.log.write(" - Doing initial fit on mean at each TI\n\n")
init_wsp = wsp.sub("init")
main_wsp = wsp.sub("main")
basil_fit(init_wsp, wsp.asldata.mean_across_repeats(), mask=mask)
wsp.basil_options["continue-from-mvn"] = wsp.init.finalstep.finalMVN
main_wsp.initmvn = wsp.basil_options["continue-from-mvn"]
else:
main_wsp = wsp
# Main run on full ASL data
wsp.log.write("\n - Doing fit on full ASL data\n\n")
basil_fit(main_wsp, wsp.asldata, mask=mask)
wsp.finalstep = main_wsp.finalstep
def basil_fit(wsp, asldata, mask=None):
"""
Run Bayesian model fitting on ASL data
See ``basil`` for details of workspace attributes used
:param wsp: Workspace object
:param asldata: AslImage object to use as input data
"""
if len(asldata.tes) > 1:
steps = basil_steps_multite(wsp, asldata, mask)
else:
steps = basil_steps(wsp, asldata, mask)
prev_result = None
wsp.asldata_diff = asldata.diff().reorder("rt")
wsp.basil_mask = mask
for idx, step in enumerate(steps):
step_wsp = wsp.sub("step%i" % (idx+1))
desc = "Step %i of %i: %s" % (idx+1, len(steps), step.desc)
if prev_result is not None:
desc += " - Initialise with step %i" % idx
step_wsp.log.write(desc + " ")
result = step.run(prev_result, log=wsp.log, fsllog=wsp.fsllog,
fabber_corelib=wsp.fabber_corelib, fabber_libs=wsp.fabber_libs,
fabber_coreexe=wsp.fabber_coreexe, fabber_exes=wsp.fabber_exes)
for key, value in result.items():
if key == "modelfit":
# Treat model fit specially - make it an AslImage and also output a mean
# across repeats version for comparison
value = wsp.asldata_diff.derived(value.data, header=value.header)
modelfit_mean = value.mean_across_repeats()
setattr(step_wsp, "modelfit_mean", modelfit_mean)
setattr(step_wsp, key, value)
if step_wsp.logfile is not None and step_wsp.savedir is not None:
step_wsp.set_item("logfile", step_wsp.logfile, save_fn=str)
prev_result = result
wsp.finalstep = step_wsp
wsp.log.write("\nEnd\n")
def _calc_slicedt(wsp, options):
"""
Calculate the slicedt for basil given that we may be quantifying in
a space other than the usual ASL space
We do this by generating a slice time offset image and transforming it
to quantification space. Since this could be rotated wrt to the asl data
we may need to warn if the resulting image has significant slice time variation
across X or Y axes
"""
img_space = wsp.ifnone("image_space", "native")
if img_space != "native":
asldata = options["data"]
_x, _y, z, _t = np.indices(list(asldata.data.shape[:3]) + [asldata.ntis,])
print(z.shape)
tis_arr = np.array(asldata.tis) + (z.astype(np.float32) * options["slicedt"])
print(tis_arr.shape)
tis_img = Image(tis_arr, header=options["data"].header)
wsp.tiimg = reg.change_space(wsp, tis_img, wsp.ifnone("image_space", "native"))
#print(ztrans.data)
print(wsp.tiimg.data.shape)
del options["slicedt"]
ti_idx = 1
while "ti%i" % ti_idx in options:
del options["ti%i" % ti_idx]
ti_idx += 1
options["tiimg"] = wsp.tiimg
def basil_steps(wsp, asldata, mask=None):
"""
Get the steps required for a BASIL run
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function. No workspace is required.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "aslrest",
"disp" : "none",
"exch" : "mix",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
"save-residuals" : wsp.ifnone("output_residuals", False),
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
for idx, ti in enumerate(asldata.tis):
options["ti%i" % (idx+1)] = ti
options["rpt%i" % (idx+1)] = asldata.rpts[idx]
# Bolus duration - use a single value where possible as cannot infer otherwise
taus = getattr(asldata, "taus", [1.8,])
if min(taus) == max(taus):
options["tau"] = taus[0]
else:
for idx, tau in enumerate(taus):
options["tau%i" % (idx+1)] = tau
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
_calc_slicedt(wsp, options)
if wsp.noiseprior:
# Use an informative noise prior
if wsp.noisesd is None:
snr = wsp.ifnone("snr", 10)
wsp.log.write(" - Using SNR of %f to set noise std dev\n" % snr)
# Estimate signal magntiude FIXME diffdata_mean is always 3D?
if wsp.diffdata_mean.ndim > 3:
datamax = np.amax(wsp.diffdata_mean.data, 3)
else:
datamax = wsp.diffdata_mean.data
brain_mag = np.mean(datamax.data[mask.data != 0])
# this will correspond to whole brain CBF (roughly) - about 0.5 of GM
noisesd = math.sqrt(brain_mag * 2 / snr)
else:
noisesd = wsp.noisesd
wsp.log.write(" - Using a prior noise sd of: %f\n" % noisesd)
options["prior-noise-stddev"] = noisesd
# Add Basil-specific options defined on the workspace
options.update(wsp.ifnone("basil_options", {}))
# Additional optional workspace arguments
for attr in ("t1", "t1b", "bat", "FA", "pwm", "pgm", "batsd"):
value = getattr(wsp, attr)
if value is not None:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
wsp.log.write("Dispersion model option is %s\n" % options["disp"])
wsp.log.write("Compartment exchange model option is %s\n" % options["exch"])
inferdisp = options["disp"] != "none"
inferexch = options["exch"] != "mix"
# Partial volume correction
pvcorr = "pgm" in options or "pwm" in options
if pvcorr:
if not wsp.infertiss:
raise ValueError("ERROR: PV correction is not compatible with --artonly option (there is no tissue component)")
options["incpve"] = True
if "pgm" not in options or "pwm" not in options:
raise ValueError("Only one partial volume map (GM / WM) was supplied for PV correctioN")
# Need a spatial step with more iterations for the PV correction
wsp.spatial = True
options_svb["max-iterations"] = 200
# Ignore partial volumes below 0.1
pgm_img = options.pop("pgm")
pwm_img = options.pop("pwm")
pgm = np.copy(pgm_img.data)
pwm = np.copy(pwm_img.data)
pgm[pgm < 0.1] = 0
pgm[pgm > 1] = 1
pwm[pwm < 0.1] = 0
pwm[pwm > 1] = 1
pgm = Image(pgm, header=pgm_img.header)
pwm = Image(pwm, header=pwm_img.header)
# Set general parameter inference and inclusion
if wsp.infertiss:
options["inctiss"] = True
if wsp.inferbat:
options["incbat"] = True
options["inferbat"] = True # Infer in first step
if wsp.inferart:
options["incart"] = True
if wsp.inferpc:
options["incpc"] = True
if wsp.infertau:
options["inctau"] = True
if wsp.infert1:
options["inct1"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im is not None:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
if wsp.infertiss:
components += " Tissue "
options["infertiss"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- ARTERIAL MODULE ---
if wsp.inferart:
components += " Arterial "
options["inferart"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "fblood", type=prior_type_mvs)
### --- BOLUS DURATION MODULE ---
if wsp.infertau:
components += " Bolus duration "
options["infertau"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- MODEL EXTENSIONS MODULE ---
# Add variable dispersion and/or exchange parameters and/or pre-capiliary
if inferdisp or inferexch or wsp.inferpc:
if inferdisp:
components += " dispersion"
options["inferdisp"] = True
if inferexch:
components += " exchange"
options["inferexch"] = True
if wsp.inferpc:
components += " pre-capiliary"
options["inferpc"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- T1 MODULE ---
if wsp.infert1:
components += " T1 "
options["infert1"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- PV CORRECTION MODULE ---
if pvcorr:
# setup ready for PV correction, which has to be done with spatial priors
components += " PVE"
options["pvcorr"] = True
# set the image priors for the PV maps
spriors = _add_prior(options, spriors, "pvgm", type="I", image=pgm)
spriors = _add_prior(options, spriors, "pvwm", type="I", image=pwm)
spriors = _add_prior(options, spriors, "fwm", type="M")
if steps:
# Add initialisaiton step for PV correction - ONLY if we have something to init from
steps.append(PvcInitStep(wsp, {"data" : asldata, "mask" : mask, "pgm" : pgm, "pwm" : pwm}, "PVC initialisation"))
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def basil_steps_multite(wsp, asldata, mask=None, **kwargs):
"""
Get the steps required for a BASIL run on multi-TE data
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "asl_multite",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
_list_option(options, asldata.tis, "ti")
# Pass multiple TEs
_list_option(options, asldata.tes, "te")
# Bolus duration must be constant for multi-TE model
if min(asldata.taus) != max(asldata.taus):
raise ValueError("Multi-TE model does not support variable bolus durations")
else:
options["tau"] = asldata.taus[0]
# Repeats must be constant for multi-TE model
if min(asldata.rpts) != max(asldata.rpts):
raise ValueError("Multi-TE model does not support variable repeats")
else:
options["repeats"] = asldata.rpts[0]
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
# Keyword arguments override options
options.update(kwargs)
# Additional optional workspace arguments
for attr in ("t1", "t1b", "t2", "t2b"):
value = getattr(wsp, attr)
if value is not None:
if attr.startswith("t2"):
# Model expects T2 in seconds not ms
options[attr] = float(value) / 1000
else:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
# Set general parameter inference and inclusion
if not wsp.infertiss:
wsp.log.write("WARNING: infertiss=False but ftiss is always inferred in multi-TE model\n")
if not wsp.inferbat:
wsp.log.write("WARNING: inferbat=False but BAT is always inferred in multi-TE model\n")
if wsp.inferart:
wsp.log.write("WARNING: inferart=True but multi-TE model does not support arterial component\n")
if wsp.infertau:
options["infertau"] = True
if wsp.infert1:
options["infert1"] = True
if wsp.infert2:
options["infert2"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
#if wsp.infertiss:
if True:
components += " Tissue"
### Inference options
if wsp.infertau:
components += " Bolus duration"
options["infertau"] = True
if wsp.infert1:
components += " T1"
options["infert1"] = True
if wsp.infertexch:
components += " Exchange time"
options["infertexch"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# Setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def _list_option(options, values, name):
for idx, value in enumerate(values):
options["%s%i" % (name, idx+1)] = value
def _add_prior(options, prior_idx, param, **kwargs):
options["PSP_byname%i" % prior_idx] = param
for key, value in kwargs.items():
options["PSP_byname%i_%s" % (prior_idx, key)] = value
return prior_idx + 1
class Step(object):
"""
A step in the Basil modelling process
"""
def __init__(self, wsp, options, desc):
self.options = dict(options)
self.desc = desc
# Need to convert all images to target image space
for key in list(options.keys()):
poss_img = self.options[key]
if isinstance(poss_img, Image):
image_space = wsp.ifnone("image_space", "native")
self.options[key] = reg.change_space(wsp, poss_img, image_space, mask=(key == 'mask'))
class FabberStep(Step):
"""
A Basil step which involves running Fabber
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Run Fabber, initialising it from the output of a previous step
"""
if prev_output is not None:
self.options["continue-from-mvn"] = prev_output["finalMVN"]
from .wrappers import fabber
ret = fabber(self.options, output=LOAD, progress_log=log, log=fsllog, **kwargs)
log.write("\n")
return ret
class PvcInitStep(Step):
"""
A Basil step which initialises partial volume correction
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Update the MVN from a previous step to include initial estimates
for PVC parameters
"""
log.write("Initialising partial volume correction...\n")
# set the inital GM amd WM values using a simple PV correction
wm_cbf_ratio = 0.4
# Modified pvgm map
temp_pgm = np.copy(self.options["pgm"].data)
temp_pgm[temp_pgm < 0.2] = 0.2
# First part of correction psuedo WM CBF term
prev_ftiss = prev_output["mean_ftiss"].data
wm_cbf_term = (prev_ftiss * wm_cbf_ratio) * self.options["pwm"].data
gmcbf_init = (prev_ftiss - wm_cbf_term) / temp_pgm
wmcbf_init = gmcbf_init * wm_cbf_ratio
mvn = prev_output["finalMVN"]
gmcbf_init = Image(gmcbf_init, header=mvn.header)
wmcbf_init = Image(wmcbf_init, header=mvn.header)
# HACK: This seems to be required to get the fslpy decorators to write
# the temporary file correctly
mask = Image(self.options["mask"].data, header=self.options["mask"].header)
# load these into the MVN
mvn = prev_output["finalMVN"]
from .wrappers import mvntool
params = prev_output["paramnames"]
mvn = mvntool(mvn, params.index("ftiss")+1, output=LOAD, mask=mask, write=True, valim=gmcbf_init, var=0.1, log=fsllog)["output"]
mvn = mvntool(mvn, params.index("fwm")+1, output=LOAD, mask=mask, write=True, valim=wmcbf_init, var=0.1, log=fsllog)["output"]
log.write("DONE\n")
return {"finalMVN" : mvn, "gmcbf_init" : gmcbf_init, "wmcbf_init" : wmcbf_init}
class BasilOptions(OptionCategory):
"""
BASIL option category
"""
def __init__(self):
OptionCategory.__init__(self, "basil")
def groups(self, parser):
groups = []
group = OptionGroup(parser, "BASIL options")
group.add_option("--infertau", help="Infer bolus duration", action="store_true", default=False)
group.add_option("--inferart", help="Infer macro vascular (arterial) signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--inferpc", help="Infer pre-capillary signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--infert1", help="Include uncertainty in T1 values", action="store_true", default=False)
group.add_option("--infertexch", help="Infer exchange time (multi-TE data only)", action="store_true", default=False)
group.add_option("--artonly", help="Remove tissue component and infer only arterial component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--fixbat", help="Fix bolus arrival time", action="store_false", default=True)
group.add_option("--batsd", help="Bolus arrival time standard deviation (s) - default 1.0 for multi-PLD, 0.1 otherwise", type=float)
group.add_option("--spatial", help="Add step that implements adaptive spatial smoothing on CBF", action="store_true", default=False)
group.add_option("--fast", help="Faster analysis (1=faster, 2=single step", type=int, default=0)
group.add_option("--noiseprior", help="Use an informative prior for the noise estimation", action="store_true", default=False)
group.add_option("--noisesd", help="Set a custom noise std. dev. for the nosie prior", type=float)
group.add_option("--basil-mask", help="Masking policy to use for Basil model fitting. Does not affect analysis mask used in rest of pipeline. 'dilate' means dilate the default analysis mask. 'none' means use no masking",
type="choice", choices=["default", "dilated", "none"])
group.add_option("--basil-options", "--fit-options", help="File containing additional options for model fitting step", type="optfile")
groups.append(group)
group = OptionGroup(parser, "Model options")
group.add_option("--disp", help="Model for label dispersion", default="none")
group.add_option("--exch", help="Model for tissue exchange (residue function)", default="mix")
groups.append(group)
group = OptionGroup(parser, "Partial volume correction / CBF estimation (enforces --spatial)")
group.add_option("--pgm", help="Gray matter PV map", type="image")
group.add_option("--pwm", help="White matter PV map", type="image")
groups.append(group)
group = OptionGroup(parser, "Special options")
group.add_option("--t1im", help="Voxelwise T1 tissue estimates", type="image")
group.add_option("--batim", "--attim", help="Voxelwise BAT (ATT) estimates in seconds", type="image")
groups.append(group)
return groups
def main():
"""
Entry point for BASIL command line application
"""
try:
parser = AslOptionParser(usage="basil -i <ASL input file> [options...]", version=__version__)
parser.add_category(image.AslImageOptions())
parser.add_category(BasilOptions())
parser.add_category(GenericOptions())
options, _ = parser.parse_args(sys.argv)
if not options.output:
options.output = "basil"
if not options.asldata:
sys.stderr.write("Input file not specified\n")
parser.print_help()
sys.exit(1)
asldata = AslImage(options.asldata, **parser.filter(options, "image"))
wsp = Workspace(savedir=options.output, **vars(options))
wsp.asldata = asldata
# Deal with --artonly
if wsp.artonly:
wsp.infertiss = False
wsp.inferart = True
# Adjust number of iterations based on fast option
if not wsp.fast:
num_iter, num_trials, onestep = 20, 10, False
elif wsp.fast == 1:
num_iter, num_trials, onestep = 5, 2, False
elif wsp.fast == 2:
num_iter, num_trials, onestep = 10, 5, True
else:
raise ValueError("Not a valid option for fast: %s" % str(wsp.fast))
wsp.max_iterations = num_iter
wsp.max_trials = num_trials
wsp.onestep = onestep
# Run BASIL processing, passing options as keyword arguments using **
basil(wsp)
except ValueError as exc:
sys.stderr.write("\nERROR: " + str(exc) + "\n")
sys.stderr.write("Use --help for usage information\n")
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"oxasl.image.AslImageOptions",
"numpy.copy",
"math.sqrt",
"oxasl.options.OptionGroup",
"numpy.ones",
"fsl.data.image.Image",
"numpy.amax",
"sys.stderr.write",
"numpy.mean",
"numpy.array",
"oxasl.reg.change_space",
"oxasl.options.AslOptionParser",
"oxasl.options.GenericOptions",
"sys.exit",
"oxasl.options.OptionCategory.__init__"
] |
[((9452, 9497), 'fsl.data.image.Image', 'Image', (['tis_arr'], {'header': "options['data'].header"}), "(tis_arr, header=options['data'].header)\n", (9457, 9497), False, 'from fsl.data.image import Image\n'), ((14347, 14368), 'numpy.copy', 'np.copy', (['pgm_img.data'], {}), '(pgm_img.data)\n', (14354, 14368), True, 'import numpy as np\n'), ((14383, 14404), 'numpy.copy', 'np.copy', (['pwm_img.data'], {}), '(pwm_img.data)\n', (14390, 14404), True, 'import numpy as np\n'), ((14523, 14556), 'fsl.data.image.Image', 'Image', (['pgm'], {'header': 'pgm_img.header'}), '(pgm, header=pgm_img.header)\n', (14528, 14556), False, 'from fsl.data.image import Image\n'), ((14571, 14604), 'fsl.data.image.Image', 'Image', (['pwm'], {'header': 'pwm_img.header'}), '(pwm, header=pwm_img.header)\n', (14576, 14604), False, 'from fsl.data.image import Image\n'), ((26244, 26277), 'numpy.copy', 'np.copy', (["self.options['pgm'].data"], {}), "(self.options['pgm'].data)\n", (26251, 26277), True, 'import numpy as np\n'), ((26668, 26704), 'fsl.data.image.Image', 'Image', (['gmcbf_init'], {'header': 'mvn.header'}), '(gmcbf_init, header=mvn.header)\n', (26673, 26704), False, 'from fsl.data.image import Image\n'), ((26726, 26762), 'fsl.data.image.Image', 'Image', (['wmcbf_init'], {'header': 'mvn.header'}), '(wmcbf_init, header=mvn.header)\n', (26731, 26762), False, 'from fsl.data.image import Image\n'), ((26897, 26965), 'fsl.data.image.Image', 'Image', (["self.options['mask'].data"], {'header': "self.options['mask'].header"}), "(self.options['mask'].data, header=self.options['mask'].header)\n", (26902, 26965), False, 'from fsl.data.image import Image\n'), ((27620, 27658), 'oxasl.options.OptionCategory.__init__', 'OptionCategory.__init__', (['self', '"""basil"""'], {}), "(self, 'basil')\n", (27643, 27658), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((27727, 27763), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""BASIL options"""'], {}), "(parser, 'BASIL options')\n", (27738, 27763), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((29833, 29869), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Model options"""'], {}), "(parser, 'Model options')\n", (29844, 29869), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30105, 30195), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Partial volume correction / CBF estimation (enforces --spatial)"""'], {}), "(parser,\n 'Partial volume correction / CBF estimation (enforces --spatial)')\n", (30116, 30195), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30389, 30427), 'oxasl.options.OptionGroup', 'OptionGroup', (['parser', '"""Special options"""'], {}), "(parser, 'Special options')\n", (30400, 30427), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((30783, 30872), 'oxasl.options.AslOptionParser', 'AslOptionParser', ([], {'usage': '"""basil -i <ASL input file> [options...]"""', 'version': '__version__'}), "(usage='basil -i <ASL input file> [options...]', version=\n __version__)\n", (30798, 30872), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((9336, 9357), 'numpy.array', 'np.array', (['asldata.tis'], {}), '(asldata.tis)\n', (9344, 9357), True, 'import numpy as np\n'), ((12408, 12445), 'numpy.mean', 'np.mean', (['datamax.data[mask.data != 0]'], {}), '(datamax.data[mask.data != 0])\n', (12415, 12445), True, 'import numpy as np\n'), ((12550, 12580), 'math.sqrt', 'math.sqrt', (['(brain_mag * 2 / snr)'], {}), '(brain_mag * 2 / snr)\n', (12559, 12580), False, 'import math\n'), ((30896, 30919), 'oxasl.image.AslImageOptions', 'image.AslImageOptions', ([], {}), '()\n', (30917, 30919), False, 'from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg\n'), ((30993, 31009), 'oxasl.options.GenericOptions', 'GenericOptions', ([], {}), '()\n', (31007, 31009), False, 'from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions\n'), ((31174, 31220), 'sys.stderr.write', 'sys.stderr.write', (['"""Input file not specified\n"""'], {}), "('Input file not specified\\n')\n", (31190, 31220), False, 'import sys\n'), ((31265, 31276), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (31273, 31276), False, 'import sys\n'), ((32275, 32329), 'sys.stderr.write', 'sys.stderr.write', (['"""Use --help for usage information\n"""'], {}), "('Use --help for usage information\\n')\n", (32291, 32329), False, 'import sys\n'), ((32338, 32349), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (32346, 32349), False, 'import sys\n'), ((5836, 5871), 'numpy.ones', 'np.ones', (['wsp.asldata.data.shape[:3]'], {}), '(wsp.asldata.data.shape[:3])\n', (5843, 5871), True, 'import numpy as np\n'), ((12282, 12316), 'numpy.amax', 'np.amax', (['wsp.diffdata_mean.data', '(3)'], {}), '(wsp.diffdata_mean.data, 3)\n', (12289, 12316), True, 'import numpy as np\n'), ((25138, 25202), 'oxasl.reg.change_space', 'reg.change_space', (['wsp', 'poss_img', 'image_space'], {'mask': "(key == 'mask')"}), "(wsp, poss_img, image_space, mask=key == 'mask')\n", (25154, 25202), False, 'from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg\n')]
|
import binascii
import numpy as np
import copy
from scapy.all import TCP, UDP, IP, IPv6, ARP, raw
def get_packet_matrix(packet):
"""
Transform a packet content into 1D array of bytes
Parameters
----------
packet : an IP packet
Returns
-------
1D ndarry of packet bytes
"""
hexst = binascii.hexlify(raw(packet))
fh = np.array([int(hexst[i:i+2],16) for i in range(0, len(hexst), 2)])
fh = np.uint8(fh)
return fh.reshape(-1)
def santize_packet_zeros(packet):
"""
This method sanitize a packet by annonymizing IP and MAC adresses
Parameters
----------
packet : a packet
Returns
-------
sanitized packet
"""
pkt = copy.deepcopy(packet)
ipv4='0.0.0.0'
ipv6='0000:00::00'
mac='00:00:00:00:00:00'
if pkt.haslayer(IPv6):
pkt[IPv6].src = ipv6
pkt[IPv6].dst = ipv6
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(IP) :
pkt[IP].src = ipv4
pkt[IP].dst = ipv4
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(ARP):
pkt[ARP].hwsrc = mac
pkt[ARP].hwdst = mac
pkt[ARP].psrc = ipv4
pkt[ARP].pdst = ipv4
else:
pass
return pkt
|
[
"copy.deepcopy",
"numpy.uint8",
"scapy.all.raw"
] |
[((444, 456), 'numpy.uint8', 'np.uint8', (['fh'], {}), '(fh)\n', (452, 456), True, 'import numpy as np\n'), ((718, 739), 'copy.deepcopy', 'copy.deepcopy', (['packet'], {}), '(packet)\n', (731, 739), False, 'import copy\n'), ((342, 353), 'scapy.all.raw', 'raw', (['packet'], {}), '(packet)\n', (345, 353), False, 'from scapy.all import TCP, UDP, IP, IPv6, ARP, raw\n')]
|
######################################################
#
# PyRAI2MD 2 module for thermostat in NVT ensemble
#
# Author <NAME>
# Sep 7 2021
#
######################################################
import numpy as np
def NoseHoover(traj):
""" Velocity scaling function in NVT ensemble (Nose Hoover thermostat)
Parameters: Type:
traj class trajectory class
Attribute: Type:
natom int number of atoms
temp float temperature
kinetic float kinetic energy
Vs list additional velocity information
kb float Boltzmann's constant
fs_to_au float unit conversion fs to au of time
"""
natom = traj.natom
kinetic = traj.kinetic
temp = traj.temp
size = traj.size
Vs = traj.Vs
kb = 3.16881 * 10**-6
fs_to_au = 2.4188843265857 * 10**-2
if len(Vs) == 0:
freq = 1 / (22 / fs_to_au) ## 22 fs to au Hz
Q1 = 3 * natom * temp * kb / freq**2
Q2 = temp * kb / freq**2
traj.Vs = [Q1, Q2, 0, 0]
else:
Q1, Q2, V1, V2 = Vs
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
s = np.exp(-V1 * size / 2)
traj.kinetic *= s**2
traj.velo *= s
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
traj.Vs = [Q1, Q2, V1, V2]
return traj
|
[
"numpy.exp"
] |
[((1333, 1355), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1339, 1355), True, 'import numpy as np\n'), ((1454, 1476), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1460, 1476), True, 'import numpy as np\n'), ((1489, 1511), 'numpy.exp', 'np.exp', (['(-V1 * size / 2)'], {}), '(-V1 * size / 2)\n', (1495, 1511), True, 'import numpy as np\n'), ((1583, 1605), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1589, 1605), True, 'import numpy as np\n'), ((1704, 1726), 'numpy.exp', 'np.exp', (['(-V2 * size / 8)'], {}), '(-V2 * size / 8)\n', (1710, 1726), True, 'import numpy as np\n')]
|
"""TrackML scoring metric"""
__authors__ = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return pandas.DataFrame.from_records(tracks, columns=cols)
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
|
[
"pandas.merge",
"numpy.true_divide",
"pandas.DataFrame.from_records"
] |
[((911, 1058), 'pandas.merge', 'pandas.merge', (["truth[['hit_id', 'particle_id', 'weight']]", "submission[['hit_id', 'track_id']]"], {'on': "['hit_id']", 'how': '"""left"""', 'validate': '"""one_to_one"""'}), "(truth[['hit_id', 'particle_id', 'weight']], submission[[\n 'hit_id', 'track_id']], on=['hit_id'], how='left', validate='one_to_one')\n", (923, 1058), False, 'import pandas\n'), ((4016, 4067), 'pandas.DataFrame.from_records', 'pandas.DataFrame.from_records', (['tracks'], {'columns': 'cols'}), '(tracks, columns=cols)\n', (4045, 4067), False, 'import pandas\n'), ((4488, 4545), 'numpy.true_divide', 'numpy.true_divide', (["tracks['major_nhits']", "tracks['nhits']"], {}), "(tracks['major_nhits'], tracks['nhits'])\n", (4505, 4545), False, 'import numpy\n'), ((4563, 4635), 'numpy.true_divide', 'numpy.true_divide', (["tracks['major_nhits']", "tracks['major_particle_nhits']"], {}), "(tracks['major_nhits'], tracks['major_particle_nhits'])\n", (4580, 4635), False, 'import numpy\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.compat import lzip
from pandas.core.dtypes.common import (
is_bool_dtype,
is_numeric_dtype,
is_timedelta64_dtype)
import warnings
import numpy as np
import ray
import itertools
class DataFrame(object):
def __init__(self, df, columns, index=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
df ([ObjectID]): The list of ObjectIDs that contain the dataframe
partitions.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
index (pandas.Index or list): The row index for this dataframe.
"""
assert(len(df) > 0)
self._df = df
self.columns = columns
# this _index object is a pd.DataFrame
# and we use that DataFrame's Index to index the rows.
self._lengths, self._index = _compute_length_and_index.remote(self._df)
if index is not None:
self.index = index
def __str__(self):
return repr(self)
def __repr__(self):
if sum(self._lengths) < 40:
result = repr(to_pandas(self))
return result
head = repr(to_pandas(self.head(20)))
tail = repr(to_pandas(self.tail(20)))
result = head + "\n...\n" + tail
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._index.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._index.index = new_index
index = property(_get_index, _set_index)
def _get__index(self):
"""Get the _index for this DataFrame.
Returns:
The default index.
"""
if isinstance(self._index_cache, ray.local_scheduler.ObjectID):
self._index_cache = ray.get(self._index_cache)
return self._index_cache
def _set__index(self, new__index):
"""Set the _index for this DataFrame.
Args:
new__index: The new default index to set.
"""
self._index_cache = new__index
_index = property(_get__index, _set__index)
def _compute_lengths(self):
"""Updates the stored lengths of DataFrame partions
"""
self._lengths = [_deploy_func.remote(_get_lengths, d)
for d in self._df]
def _get_lengths(self):
"""Gets the lengths for each partition and caches it if it wasn't.
Returns:
A list of integers representing the length of each partition.
"""
if isinstance(self._length_cache, ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
elif isinstance(self._length_cache, list) and \
isinstance(self._length_cache[0],
ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
return self._length_cache
def _set_lengths(self, lengths):
"""Sets the lengths of each partition for this DataFrame.
We use this because we can compute it when creating the DataFrame.
Args:
lengths ([ObjectID or Int]): A list of lengths for each
partition, in order.
"""
self._length_cache = lengths
_lengths = property(_get_lengths, _set_lengths)
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim, self._df[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ftypes, self._df[0]))
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.dtypes, self._df[0]))
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(self._map_partitions(lambda df: df.empty)._df)
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(
ray.get(self._map_partitions(lambda df: df.values)._df))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return (len(self.index), len(self.columns))
def _map_partitions(self, func, index=None):
"""Apply a function on each partition.
Args:
func (callable): The function to Apply.
Returns:
A new DataFrame containing the result of the function.
"""
assert(callable(func))
new_df = [_deploy_func.remote(func, part) for part in self._df]
if index is None:
index = self.index
return DataFrame(new_df, self.columns, index=index)
def _update_inplace(self, df=None, columns=None, index=None):
"""Updates the current DataFrame inplace
"""
assert(len(df) > 0)
if df:
self._df = df
if columns:
self.columns = columns
if index:
self.index = index
self._lengths, self._index = _compute_length_and_index.remote(self._df)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(self._df, new_cols, index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(self._df, new_cols, index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
assert(callable(func))
return self._map_partitions(lambda df: df.applymap(lambda x: func(x)))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(self._df, self.columns, index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
indices = self.index.unique()
chunksize = int(len(indices) / len(self._df))
partitions = [_shuffle.remote(df, indices, chunksize)
for df in self._df]
partitions = ray.get(partitions)
# Transpose the list of dataframes
# TODO find a better way
shuffle = []
for i in range(len(partitions[0])):
shuffle.append([])
for j in range(len(partitions)):
shuffle[i].append(partitions[j][i])
new_dfs = [_local_groupby.remote(part, axis=axis) for part in shuffle]
return DataFrame(new_dfs, self.columns, index=indices)
def reduce_by_index(self, func, axis=0):
"""Perform a reduction based on the row index.
Args:
func (callable): The function to call on the partition
after the groupby.
Returns:
A new DataFrame with the result of the reduction.
"""
return self.groupby(axis=axis)._map_partitions(
func, index=pd.unique(self.index))
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
intermediate_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
sum_of_partitions = self._map_partitions(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only),
index=intermediate_index)
return sum_of_partitions.reduce_by_index(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only))
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
return self._map_partitions(lambda df: df.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return self._map_partitions(lambda df: df.isin(values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isnull)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Note: Triggers a shuffle.
Returns:
A new DataFrame transposed from this DataFrame.
"""
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
temp_columns = self.index
local_transpose = self._map_partitions(
lambda df: df.transpose(*args, **kwargs), index=temp_index)
local_transpose.columns = temp_columns
# Sum will collapse the NAs from the groupby
df = local_transpose.reduce_by_index(
lambda df: df.apply(lambda x: x), axis=1)
# Reassign the columns within partition to self.index.
# We have to use _depoly_func instead of _map_partition due to
# new_labels argument
def _reassign_columns(df, new_labels):
df.columns = new_labels
return df
df._df = [
_deploy_func.remote(
_reassign_columns,
part,
self.index) for part in df._df]
return df
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
if how != 'any' and how != 'all':
raise ValueError("<how> not correctly set.")
def add(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def agg(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def aggregate(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.all(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.any(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def append(self, other, ignore_index=False, verify_integrity=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
if axis == 1:
return self.T.count(axis=0,
level=level,
numeric_only=numeric_only)
else:
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
collapsed_df = sum(
ray.get(
self._map_partitions(
lambda df: df.count(
axis=axis,
level=level,
numeric_only=numeric_only),
index=temp_index)._df))
return collapsed_df
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummax(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummin(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def describe(self, percentiles=None, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def divide(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
for i, idx in other._index.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._df[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._index.iloc[i]
curr_df = self._df[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def floordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
temp_df = self._map_partitions(lambda df: df.get(key, default=default))
return to_pandas(temp_df)
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_dtype_counts(), self._df[0]
)
)
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_ftype_counts(), self._df[0]
)
)
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes))
new_dfs = [self._df[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the first partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.head(num_to_transfer),
self._df[last_index]))
index = self._index.head(n).index
return DataFrame(new_dfs, self.columns, index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmax(axis=axis, skipna=skipna)))
else:
return self.T.idxmax(axis=1, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmin(axis=axis, skipna=skipna)))
else:
return self.T.idxmin(axis=1, skipna=skipna)
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
try:
len(value)
except TypeError:
value = [value for _ in range(len(self.index))]
if len(value) != len(self.index):
raise ValueError(
"Column length provided does not match DataFrame length.")
if loc < 0 or loc > len(self.columns):
raise ValueError(
"Location provided must be higher than 0 and lower than the "
"number of columns.")
if not allow_duplicates and column in self.columns:
raise ValueError(
"Column {} already exists in DataFrame.".format(column))
cumulative = np.cumsum(self._lengths)
partitions = [value[cumulative[i-1]:cumulative[i]]
for i in range(len(cumulative))
if i != 0]
partitions.insert(0, value[:cumulative[0]])
# Because insert is always inplace, we have to create this temp fn.
def _insert(_df, _loc, _column, _part, _allow_duplicates):
_df.insert(_loc, _column, _part, _allow_duplicates)
return _df
self._df = \
[_deploy_func.remote(_insert,
self._df[i],
loc,
column,
partitions[i],
allow_duplicates)
for i in range(len(self._df))]
self.columns = self.columns.insert(loc, column)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.iterrows()), part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
series = map(lambda idx_series_tuple: idx_series_tuple[1], iters)
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part) for part in self._df])
def concat_iters(iterables):
for partitions in zip(*iterables):
series = pd.concat([_series for _, _series in partitions])
series.index = self.index
yield (series.name, series)
return concat_iters(iters)
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name='Pandas'):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.itertuples(index=index, name=name)),
part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
def _replace_index(row_tuple, idx):
# We need to use try-except here because
# isinstance(row_tuple, namedtuple) won't work.
try:
row_tuple = row_tuple._replace(Index=idx)
except AttributeError: # Tuple not namedtuple
row_tuple = (idx,) + row_tuple[1:]
return row_tuple
if index:
iters = itertools.starmap(_replace_index, zip(iters, self.index))
return iters
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def le(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lookup(self, row_labels, col_labels):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mad(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def max(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.max(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.max(axis=1, skipna=None, level=None,
numeric_only=None, **kwargs)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def median(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def memory_usage(self, index=True, deep=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def min(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.min(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.min(axis=1, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
def mod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mode(self, axis=0, numeric_only=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def multiply(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ne(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nlargest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def notna(self):
"""Perform notna across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notnull())
def nsmallest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nunique(self, axis=0, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pipe(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot(self, index=None, columns=None, values=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def plot(self, x=None, y=None, kind='line', ax=None, subplots=False,
sharex=None, sharey=False, layout=None, figsize=None,
use_index=True, title=None, grid=None, legend=True, style=None,
logx=False, logy=False, loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None, rot=None, fontsize=None, colormap=None,
table=False, yerr=None, xerr=None, secondary_y=False,
sort_columns=False, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
popped = to_pandas(self._map_partitions(
lambda df: df.pop(item)))
self._df = self._map_partitions(lambda df: df.drop([item], axis=1))._df
self.columns = self.columns.drop(item)
return popped
def pow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def prod(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def product(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
new_dfs = [_deploy_func.remote(lambda df: df.query(expr, **kwargs),
part) for part in self._df]
if inplace:
self._update_inplace(new_dfs)
else:
return DataFrame(new_dfs, self.columns)
def radd(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rdiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex(self, labels=None, index=None, columns=None, axis=None,
method=None, copy=True, level=None, fill_value=np.nan,
limit=None, tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename(self, mapper=None, index=None, columns=None, axis=None,
copy=True, inplace=False, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reorder_levels(self, order, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into dataframe columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, pd.PeriodIndex):
values = index.asobject.values
elif isinstance(index, pd.DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
_, new_index = _compute_length_and_index.remote(new_obj._df)
new_index = ray.get(new_index).index
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, pd.MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, pd.MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, pd.MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
def rfloordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def round(self, decimals=0, *args, **kwargs):
return self._map_partitions(lambda df: df.round(decimals=decimals,
*args,
**kwargs))
def rpow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rsub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rtruediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select(self, crit, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select_dtypes(self, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sem(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pd.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._index._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pd.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pd.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pd.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pd.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pd.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def set_value(self, index, col, value, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def skew(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def slice_shift(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def squeeze(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def stack(self, level=-1, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def std(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def subtract(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swapaxes(self, axis1, axis2, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swaplevel(self, i=-2, j=-1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tail(self, n=5):
"""Get the last n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the last n rows of this dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes[::-1]))
reverse_dfs = self._df[::-1]
new_dfs = [reverse_dfs[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the last partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.tail(num_to_transfer),
reverse_dfs[last_index]))
new_dfs.reverse()
index = self._index.tail(n).index
return DataFrame(new_dfs, self.columns, index=index)
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_clipboard(self, excel=None, sep=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_csv(self, path_or_buf=None, sep=', ', na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dense(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dict(self, orient='dict', into=dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_feather(self, fname):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail',
private_key=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_hdf(self, path_or_buf, key, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False,
notebook=False, decimal='.', border=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None,
escape=None, encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_panel(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_period(self, freq=None, axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_pickle(self, path, compression='infer', protocol=4):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_records(self, index=True, convert_datetime64=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sparse(self, fill_value=None, kind='block'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding='latin-1', byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_xarray(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def transform(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truncate(self, before=None, after=None, axis=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tshift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_convert(self, tz, axis=0, level=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def unstack(self, level=-1, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def var(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def xs(self, key, axis=0, level=None, drop_level=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getitem__(self, key):
"""Get the column specified by key for this DataFrame.
Args:
key : The column name.
Returns:
A Pandas Series representing the value fo the column.
"""
result_column_chunks = self._map_partitions(
lambda df: df.__getitem__(key))
return to_pandas(result_column_chunks)
def __setitem__(self, key, value):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __len__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __unicode__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __invert__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __hash__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iter__(self):
"""Iterate over the columns
Returns:
An Iterator over the columns of the dataframe.
"""
return iter(self.columns)
def __contains__(self, key):
return key in self.columns
def __nonzero__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __bool__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __abs__(self):
"""Creates a modified DataFrame by elementwise taking the absolute value
Returns:
A modified DataFrame
"""
return self.abs()
def __round__(self, decimals=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array__(self, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array_wrap__(self, result, context=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getstate__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __setstate__(self, state):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __delitem__(self, key):
"""Delete an item by key. `del a[key]` for example.
Operation happnes in place.
Args:
key: key to delete
"""
def del_helper(df):
df.__delitem__(key)
return df
self._df = self._map_partitions(del_helper)._df
self.columns = self.columns.drop(key)
def __finalize__(self, other, method=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __copy__(self, deep=True):
"""Make a copy using Ray.DataFrame.copy method
Args:
deep: Boolean, deep copy or not.
Currently we do not support deep copy.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""Make a -deep- copy using Ray.DataFrame.copy method
This is equivalent to copy(deep=True).
Args:
memo: No effect. Just to comply with Pandas API.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=True)
def __and__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __or__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __xor__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __lt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __le__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __gt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ge__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __eq__(self, other):
"""Computes the equality of this DataFrame with another
Returns:
True, if the DataFrames are equal. False otherwise.
"""
return self.equals(other)
def __ne__(self, other):
"""Checks that this DataFrame is not equal to another
Returns:
True, if the DataFrames are not equal. False otherwise.
"""
return not self.equals(other)
def __add__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iadd__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __imul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __pow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ipow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __isub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __neg__(self):
"""Computes an element wise negative DataFrame
Returns:
A modified DataFrame where every element is the negation of before
"""
for t in self.dtypes:
if not (is_bool_dtype(t)
or is_numeric_dtype(t)
or is_timedelta64_dtype(t)):
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(t))
return self._map_partitions(lambda df: df.__neg__())
def __floordiv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __truediv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mod__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sizeof__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def __doc__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def blocks(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def style(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iat(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __rsub__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _Loc_Indexer
return _Loc_Indexer(self)
@property
def is_copy(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __itruediv__(other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __div__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ix(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _iLoc_Indexer
return _iLoc_Indexer(self)
def _get_lengths(df):
"""Gets the length of the dataframe.
Args:
df: A remote pd.DataFrame object.
Returns:
Returns an integer length of the dataframe object. If the attempt
fails, returns 0 as the length.
"""
try:
return len(df)
# Because we sometimes have cases where we have summary statistics in our
# DataFrames
except TypeError:
return 0
@ray.remote
def _shuffle(df, indices, chunksize):
"""Shuffle data by sending it through the Ray Store.
Args:
df (pd.DataFrame): The pandas DataFrame to shuffle.
indices ([any]): The list of indices for the DataFrame.
chunksize (int): The number of indices to send.
Returns:
The list of pd.DataFrame objects in order of their assignment. This
order is important because it determines which task will get the data.
"""
i = 0
partition = []
while len(indices) > chunksize:
oids = df.reindex(indices[:chunksize])
partition.append(oids)
indices = indices[chunksize:]
i += 1
else:
oids = df.reindex(indices)
partition.append(oids)
return partition
@ray.remote
def _local_groupby(df_rows, axis=0):
"""Apply a groupby on this partition for the blocks sent to it.
Args:
df_rows ([pd.DataFrame]): A list of dataframes for this partition. Goes
through the Ray object store.
Returns:
A DataFrameGroupBy object from the resulting groupby.
"""
concat_df = pd.concat(df_rows, axis=axis)
return concat_df.groupby(concat_df.index)
@ray.remote
def _deploy_func(func, dataframe, *args):
"""Deploys a function for the _map_partitions call.
Args:
dataframe (pandas.DataFrame): The pandas DataFrame for this partition.
Returns:
A futures object representing the return value of the function
provided.
"""
if len(args) == 0:
return func(dataframe)
else:
return func(dataframe, *args)
def from_pandas(df, npartitions=None, chunksize=None, sort=True):
"""Converts a pandas DataFrame to a Ray DataFrame.
Args:
df (pandas.DataFrame): The pandas DataFrame to convert.
npartitions (int): The number of partitions to split the DataFrame
into. Has priority over chunksize.
chunksize (int): The number of rows to put in each partition.
sort (bool): Whether or not to sort the df as it is being converted.
Returns:
A new Ray DataFrame object.
"""
if sort and not df.index.is_monotonic_increasing:
df = df.sort_index(ascending=True)
if npartitions is not None:
chunksize = int(len(df) / npartitions)
elif chunksize is None:
raise ValueError("The number of partitions or chunksize must be set.")
temp_df = df
dataframes = []
lengths = []
while len(temp_df) > chunksize:
t_df = temp_df[:chunksize]
lengths.append(len(t_df))
# reset_index here because we want a pd.RangeIndex
# within the partitions. It is smaller and sometimes faster.
t_df = t_df.reset_index(drop=True)
top = ray.put(t_df)
dataframes.append(top)
temp_df = temp_df[chunksize:]
else:
temp_df = temp_df.reset_index(drop=True)
dataframes.append(ray.put(temp_df))
lengths.append(len(temp_df))
return DataFrame(dataframes, df.columns, index=df.index)
def to_pandas(df):
"""Converts a Ray DataFrame to a pandas DataFrame/Series.
Args:
df (ray.DataFrame): The Ray DataFrame to convert.
Returns:
A new pandas DataFrame.
"""
pd_df = pd.concat(ray.get(df._df))
pd_df.index = df.index
pd_df.columns = df.columns
return pd_df
@ray.remote(num_return_vals=2)
def _compute_length_and_index(dfs):
"""Create a default index, which is a RangeIndex
Returns:
The pd.RangeIndex object that represents this DataFrame.
"""
lengths = ray.get([_deploy_func.remote(_get_lengths, d)
for d in dfs])
dest_indices = {"partition":
[i for i in range(len(lengths))
for j in range(lengths[i])],
"index_within_partition":
[j for i in range(len(lengths))
for j in range(lengths[i])]}
return lengths, pd.DataFrame(dest_indices)
|
[
"pandas.core.dtypes.cast.maybe_upcast_putmask",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.common.is_numeric_dtype",
"ray.put",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.DataFrame",
"ray.remote",
"numpy.cumsum",
"pandas.concat",
"pandas.compat.lzip",
"pandas.api.types.is_scalar",
"pandas.core.index._ensure_index_from_sequences",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"ray.get",
"numpy.dtype",
"pandas.unique",
"numpy.array",
"warnings.warn",
"itertools.chain.from_iterable"
] |
[((93466, 93495), 'ray.remote', 'ray.remote', ([], {'num_return_vals': '(2)'}), '(num_return_vals=2)\n', (93476, 93495), False, 'import ray\n'), ((91212, 91241), 'pandas.concat', 'pd.concat', (['df_rows'], {'axis': 'axis'}), '(df_rows, axis=axis)\n', (91221, 91241), True, 'import pandas as pd\n'), ((9073, 9092), 'ray.get', 'ray.get', (['partitions'], {}), '(partitions)\n', (9080, 9092), False, 'import ray\n'), ((36808, 36832), 'numpy.cumsum', 'np.cumsum', (['self._lengths'], {}), '(self._lengths)\n', (36817, 36832), True, 'import numpy as np\n'), ((38467, 38503), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['iters'], {}), '(iters)\n', (38496, 38503), False, 'import itertools\n'), ((40498, 40534), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['iters'], {}), '(iters)\n', (40527, 40534), False, 'import itertools\n'), ((55313, 55352), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['inplace', '"""inplace"""'], {}), "(inplace, 'inplace')\n", (55332, 55352), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((61969, 61986), 'pandas.api.types.is_scalar', 'is_scalar', (['labels'], {}), '(labels)\n', (61978, 61986), False, 'from pandas.api.types import is_scalar\n'), ((63750, 63789), 'pandas.util._validators.validate_bool_kwarg', 'validate_bool_kwarg', (['inplace', '"""inplace"""'], {}), "(inplace, 'inplace')\n", (63769, 63789), False, 'from pandas.util._validators import validate_bool_kwarg\n'), ((65283, 65326), 'pandas.core.index._ensure_index_from_sequences', '_ensure_index_from_sequences', (['arrays', 'names'], {}), '(arrays, names)\n', (65311, 65326), False, 'from pandas.core.index import _ensure_index_from_sequences\n'), ((92858, 92871), 'ray.put', 'ray.put', (['t_df'], {}), '(t_df)\n', (92865, 92871), False, 'import ray\n'), ((93371, 93386), 'ray.get', 'ray.get', (['df._df'], {}), '(df._df)\n', (93378, 93386), False, 'import ray\n'), ((94075, 94101), 'pandas.DataFrame', 'pd.DataFrame', (['dest_indices'], {}), '(dest_indices)\n', (94087, 94101), True, 'import pandas as pd\n'), ((2390, 2416), 'ray.get', 'ray.get', (['self._index_cache'], {}), '(self._index_cache)\n', (2397, 2416), False, 'import ray\n'), ((3230, 3257), 'ray.get', 'ray.get', (['self._length_cache'], {}), '(self._length_cache)\n', (3237, 3257), False, 'import ray\n'), ((32692, 32707), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (32700, 32707), True, 'import numpy as np\n'), ((56620, 56638), 'ray.get', 'ray.get', (['new_index'], {}), '(new_index)\n', (56627, 56638), False, 'import ray\n'), ((62000, 62275), 'warnings.warn', 'warnings.warn', (['"""set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas."""', 'FutureWarning'], {'stacklevel': '(2)'}), '(\n \'set_axis now takes "labels" as first argument, and "axis" as named parameter. The old form, with "axis" as first parameter and "labels" as second, is still supported but will be deprecated in a future version of pandas.\'\n , FutureWarning, stacklevel=2)\n', (62013, 62275), False, 'import warnings\n'), ((62439, 62636), 'warnings.warn', 'warnings.warn', (['"""set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning."""', 'FutureWarning'], {'stacklevel': '(2)'}), '(\n """set_axis currently defaults to operating inplace.\nThis will change in a future version of pandas, use inplace=True to avoid this warning."""\n , FutureWarning, stacklevel=2)\n', (62452, 62636), False, 'import warnings\n'), ((69095, 69116), 'numpy.array', 'np.array', (['sizes[::-1]'], {}), '(sizes[::-1])\n', (69103, 69116), True, 'import numpy as np\n'), ((93026, 93042), 'ray.put', 'ray.put', (['temp_df'], {}), '(temp_df)\n', (93033, 93042), False, 'import ray\n'), ((3455, 3482), 'ray.get', 'ray.get', (['self._length_cache'], {}), '(self._length_cache)\n', (3462, 3482), False, 'import ray\n'), ((9896, 9917), 'pandas.unique', 'pd.unique', (['self.index'], {}), '(self.index)\n', (9905, 9917), True, 'import pandas as pd\n'), ((11004, 11017), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (11012, 11017), True, 'import numpy as np\n'), ((26753, 26789), 'ray.get', 'ray.get', (["other._df[idx['partition']]"], {}), "(other._df[idx['partition']])\n", (26760, 26789), False, 'import ray\n'), ((27420, 27430), 'ray.get', 'ray.get', (['r'], {}), '(r)\n', (27427, 27430), False, 'import ray\n'), ((34129, 34142), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (34137, 34142), True, 'import numpy as np\n'), ((34935, 34948), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (34943, 34948), True, 'import numpy as np\n'), ((39205, 39254), 'pandas.concat', 'pd.concat', (['[_series for _, _series in partitions]'], {}), '([_series for _, _series in partitions])\n', (39214, 39254), True, 'import pandas as pd\n'), ((57237, 57279), 'pandas.compat.lzip', 'lzip', (['self.index.levels', 'self.index.labels'], {}), '(self.index.levels, self.index.labels)\n', (57241, 57279), False, 'from pandas.compat import lzip\n'), ((86289, 86305), 'pandas.core.dtypes.common.is_bool_dtype', 'is_bool_dtype', (['t'], {}), '(t)\n', (86302, 86305), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((86329, 86348), 'pandas.core.dtypes.common.is_numeric_dtype', 'is_numeric_dtype', (['t'], {}), '(t)\n', (86345, 86348), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((86372, 86395), 'pandas.core.dtypes.common.is_timedelta64_dtype', 'is_timedelta64_dtype', (['t'], {}), '(t)\n', (86392, 86395), False, 'from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype, is_timedelta64_dtype\n'), ((55842, 55875), 'pandas._libs.lib.maybe_convert_objects', 'lib.maybe_convert_objects', (['values'], {}), '(values)\n', (55867, 55875), False, 'from pandas._libs import lib\n'), ((56432, 56474), 'pandas.core.dtypes.cast.maybe_upcast_putmask', 'maybe_upcast_putmask', (['values', 'mask', 'np.nan'], {}), '(values, mask, np.nan)\n', (56452, 56474), False, 'from pandas.core.dtypes.cast import maybe_upcast_putmask\n')]
|
from typing import Tuple
import gym
import numpy as np
from gym_gathering.observations.base_observation_generator import ObservationGenerator
class SingleChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(SingleChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, 1), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros(self.maze.shape)
observation = self.render_particles(particles, out=observation)
observation = self.generate_noise(observation)
if self.random_goal:
observation = self.render_goal(goal, out=observation)
return observation[:, :, np.newaxis] # Convert to single channel image
class MultiChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(MultiChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.n_channels = 3 if random_goal else 2
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, self.n_channels), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros((*self.maze.shape, self.n_channels))
observation[:, :, 0] = self.render_maze()
particle_image = self.render_particles(particles)
particle_image = self.generate_noise(particle_image)
observation[:, :, 1] = particle_image
if self.random_goal:
observation[:, :, 2] = self.render_goal(goal)
return observation
|
[
"numpy.zeros",
"gym.spaces.Box"
] |
[((872, 943), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*maze.shape, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*maze.shape, 1), dtype=np.uint8)\n', (886, 943), False, 'import gym\n'), ((1062, 1087), 'numpy.zeros', 'np.zeros', (['self.maze.shape'], {}), '(self.maze.shape)\n', (1070, 1087), True, 'import numpy as np\n'), ((2169, 2259), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*maze.shape, self.n_channels)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*maze.shape, self.n_channels), dtype\n =np.uint8)\n', (2183, 2259), False, 'import gym\n'), ((2373, 2418), 'numpy.zeros', 'np.zeros', (['(*self.maze.shape, self.n_channels)'], {}), '((*self.maze.shape, self.n_channels))\n', (2381, 2418), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import time
'''
Parameters Used inside Code
'''
#Gaussian kernel size used for blurring
G_kernel_size = (3,3)
#canny thresholding parameters
canny_u_threshold = 200
canny_l_threshold = 80
# define the upper and lower boundaries of the HSV pixel
# intensities to be considered 'skin'
lower = np.array([0, 48, 80], dtype = "uint8")
upper = np.array([20, 255, 255], dtype = "uint8")
black_lower = np.array([0, 0, 0], dtype = "uint8")
black_upper = np.array([180, 255, 30], dtype = "uint8")
#threshhold for % of skin area detected
skinThresh = 0.00025
#Minimum number of whitepixels needed for square to be counted as occupied
min_white_count = 1
#minimum number of black detected pixels in square
min_black_pixels = 200
|
[
"numpy.array"
] |
[((325, 361), 'numpy.array', 'np.array', (['[0, 48, 80]'], {'dtype': '"""uint8"""'}), "([0, 48, 80], dtype='uint8')\n", (333, 361), True, 'import numpy as np\n'), ((372, 411), 'numpy.array', 'np.array', (['[20, 255, 255]'], {'dtype': '"""uint8"""'}), "([20, 255, 255], dtype='uint8')\n", (380, 411), True, 'import numpy as np\n'), ((430, 464), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': '"""uint8"""'}), "([0, 0, 0], dtype='uint8')\n", (438, 464), True, 'import numpy as np\n'), ((481, 520), 'numpy.array', 'np.array', (['[180, 255, 30]'], {'dtype': '"""uint8"""'}), "([180, 255, 30], dtype='uint8')\n", (489, 520), True, 'import numpy as np\n')]
|
from wordcloud import WordCloud, STOPWORDS
import os
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Patch
from loguru import logger
from GEN_Utils import FileHandling
from GEN_Utils.HDF5_Utils import hdf_to_dict
logger.info('Import OK')
input_path = 'analysis_results/summary_stats/summary_stats.xlsx'
output_folder = 'images/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Print all lone variables during execution
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# Set plotting backgrounds to white
matplotlib.rcParams.update(_VSCode_defaultMatplotlib_Params)
matplotlib.rcParams.update({'figure.facecolor': (1,1,1,1)})
# Retrieve cleaned data from HDF5
raw_data = pd.read_excel(input_path, sheetname=None)
raw_data.keys()
gender_summary = raw_data['per_gender']
gender_summary = gender_summary.drop(
[col for col in gender_summary.columns.tolist() if 'Unnamed' in col], axis=1)
# As Leadership levels were maintained separately in this table, need to map these to level 3 for 2019
# Generate data for plotting
for_plotting = gender_summary.copy().reset_index(drop=True)
males = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'm_' in col]]
males.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
males['gender'] = 'M'
females = for_plotting[['Year', 'type_cat'] +
[col for col in for_plotting if 'f_' in col]]
females.columns = ['Year', 'type_cat',
'Applications', 'Funded', 'Rate', 'Amount']
females['gender'] = 'F'
for_plotting = pd.concat([males, females]).reset_index(drop=True)
for_plotting = for_plotting.groupby(['Year', 'gender', 'type_cat']).sum().drop('Rate', axis=1).reset_index()
numeric_cols = ['Year', 'type_cat', 'Applications', 'Funded', 'Amount']
for_plotting[numeric_cols] = for_plotting[numeric_cols].astype(float)
year_dict = {2015: 0, 2016: 1, 2017: 2, 2018: 3, 2019: 4}
for_plotting['Year_num'] = for_plotting['Year'].map(year_dict)
for_plotting['Amount'] = for_plotting['Amount'] / 1000000
for_plotting['proportion_Funded'] = for_plotting['Funded'] / for_plotting['Applications'] *100
total_funded = for_plotting.groupby(['Year', 'type_cat']).sum()['Funded'].to_dict()
total_amounts = for_plotting.groupby(['Year', 'type_cat']).sum()[
'Amount'].to_dict()
for_plotting['mapper'] = tuple(zip(for_plotting['Year'], for_plotting['type_cat']))
for_plotting['total_amount'] = for_plotting['mapper'].map(total_amounts)
for_plotting['total_funded'] = for_plotting['mapper'].map(total_funded)
for_plotting['proportion_amount'] = for_plotting['Amount'] / for_plotting['total_amount'] * 100
for_plotting['proportion_total_funded'] = for_plotting['Funded'] / \
for_plotting['total_funded'] * 100
# Generate plot 1
# sns.palplot(sns.color_palette("Purples"))
# fem_colour = sns.color_palette("Purples")[4]
fem_colour = '#511751'
male_colour = sns.color_palette("Oranges")[4]
col_pal = [fem_colour, male_colour]
labels = ['Female', 'Male']
df = for_plotting.groupby(['Year_num', 'gender']).sum().reset_index()
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(x='Year_num', y='Amount', data=df, hue='gender', ax=ax, palette=col_pal)
legend_elements = [Patch(facecolor=col_pal[x], label=labels[x]) for x in range(0, len(labels))]
ax.legend(handles=legend_elements, loc='upper left', title='Funding Amount', ncol=3)
ax2 = ax.twinx()
sns.lineplot(x='Year_num', y='Funded', data=df,
hue='gender', marker='o', markersize=10, palette=col_pal, ax=ax2)
ax2.set_ylim(0, 200)
# Fix all the adjusted elements
plt.legend(labels, loc='upper left', title='Number funded', ncol=3, bbox_to_anchor=(0.67, 1.0))
ax.set_xlabel('Year of funding')
ax.set_ylabel('Total funding amount ($M AUD)')
ax2.set_ylabel('Number of successful applications', rotation=-90, labelpad=15)
plt.xticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title('Total funding awarded according to gender.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_total.png', dpi=300)
plt.show()
# Generate plot 2
for level, df in for_plotting.groupby('type_cat'):
plotting = df[df['gender'] == 'F']
fig, ax = plt.subplots(figsize=(10, 4))
m = sns.barplot(orient='h', y=list(plotting['Year_num']), x=[100 for x in plotting['Year_num']], color=male_colour)
f = sns.barplot(x=plotting['proportion_total_funded'], y=plotting['Year_num'], color=fem_colour, orient='h')
# Fix all the adjusted elements
ax.set_ylabel('Year of funding')
ax.set_xlabel('Proportion of funded applications (%)')
ax2.set_ylabel('Success rate (%)', rotation=-90, labelpad=15)
plt.yticks(np.arange(0, 5, 1), labels=list(year_dict.keys()))
plt.title(f'Proportion of Fellowships awarded by gender at level {int(level)}.', loc='left',
fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)
ax.axvline(50, c='#636363', linestyle='--', linewidth=3)
plt.tight_layout()
plt.savefig(f'{output_folder}gender_proportion_level{level}.png', dpi=300)
plt.show()
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"matplotlib.pyplot.tight_layout",
"os.mkdir",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"seaborn.barplot",
"os.path.exists",
"pandas.read_excel",
"loguru.logger.info",
"numpy.arange",
"seaborn.color_palette",
"matplotlib.patches.Patch",
"matplotlib.pyplot.subplots",
"pandas.concat",
"matplotlib.pyplot.savefig"
] |
[((302, 326), 'loguru.logger.info', 'logger.info', (['"""Import OK"""'], {}), "('Import OK')\n", (313, 326), False, 'from loguru import logger\n'), ((841, 882), 'pandas.read_excel', 'pd.read_excel', (['input_path'], {'sheetname': 'None'}), '(input_path, sheetname=None)\n', (854, 882), True, 'import pandas as pd\n'), ((3259, 3288), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (3271, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3289, 3378), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Year_num"""', 'y': '"""Amount"""', 'data': 'df', 'hue': '"""gender"""', 'ax': 'ax', 'palette': 'col_pal'}), "(x='Year_num', y='Amount', data=df, hue='gender', ax=ax, palette\n =col_pal)\n", (3300, 3378), True, 'import seaborn as sns\n'), ((3573, 3690), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Year_num"""', 'y': '"""Funded"""', 'data': 'df', 'hue': '"""gender"""', 'marker': '"""o"""', 'markersize': '(10)', 'palette': 'col_pal', 'ax': 'ax2'}), "(x='Year_num', y='Funded', data=df, hue='gender', marker='o',\n markersize=10, palette=col_pal, ax=ax2)\n", (3585, 3690), True, 'import seaborn as sns\n'), ((3753, 3852), 'matplotlib.pyplot.legend', 'plt.legend', (['labels'], {'loc': '"""upper left"""', 'title': '"""Number funded"""', 'ncol': '(3)', 'bbox_to_anchor': '(0.67, 1.0)'}), "(labels, loc='upper left', title='Number funded', ncol=3,\n bbox_to_anchor=(0.67, 1.0))\n", (3763, 3852), True, 'import matplotlib.pyplot as plt\n'), ((4070, 4198), 'matplotlib.pyplot.title', 'plt.title', (['"""Total funding awarded according to gender."""'], {'loc': '"""left"""', 'fontdict': "{'fontsize': 15, 'fontweight': 'bold'}", 'pad': '(20)'}), "('Total funding awarded according to gender.', loc='left',\n fontdict={'fontsize': 15, 'fontweight': 'bold'}, pad=20)\n", (4079, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4221), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4219, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4222, 4278), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{output_folder}gender_total.png"""'], {'dpi': '(300)'}), "(f'{output_folder}gender_total.png', dpi=300)\n", (4233, 4278), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4287, 4289), True, 'import matplotlib.pyplot as plt\n'), ((427, 456), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (441, 456), False, 'import os\n'), ((462, 485), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (470, 485), False, 'import os\n'), ((3081, 3109), 'seaborn.color_palette', 'sns.color_palette', (['"""Oranges"""'], {}), "('Oranges')\n", (3098, 3109), True, 'import seaborn as sns\n'), ((3394, 3438), 'matplotlib.patches.Patch', 'Patch', ([], {'facecolor': 'col_pal[x]', 'label': 'labels[x]'}), '(facecolor=col_pal[x], label=labels[x])\n', (3399, 3438), False, 'from matplotlib.patches import Patch\n'), ((4019, 4037), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (4028, 4037), True, 'import numpy as np\n'), ((4415, 4444), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (4427, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4681), 'seaborn.barplot', 'sns.barplot', ([], {'x': "plotting['proportion_total_funded']", 'y': "plotting['Year_num']", 'color': 'fem_colour', 'orient': '"""h"""'}), "(x=plotting['proportion_total_funded'], y=plotting['Year_num'],\n color=fem_colour, orient='h')\n", (4584, 4681), True, 'import seaborn as sns\n'), ((5178, 5196), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5194, 5196), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{output_folder}gender_proportion_level{level}.png"""'], {'dpi': '(300)'}), "(f'{output_folder}gender_proportion_level{level}.png', dpi=300)\n", (5212, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5280, 5290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5288, 5290), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1771), 'pandas.concat', 'pd.concat', (['[males, females]'], {}), '([males, females])\n', (1753, 1771), True, 'import pandas as pd\n'), ((4893, 4911), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (4902, 4911), True, 'import numpy as np\n')]
|
# <NAME>
import numpy as np
import pylab as plt
from spectral.io import envi
import os, sys
sys.path.append('../utils')
from fpa import FPA
I = envi.open('../data/EMIT_LinearityMap_20220117.hdr').load()
thresh = 20
fpa = FPA('../config/tvac2_config.json')
for band in range(I.shape[2]):
x = np.squeeze(I[:,:,band])
# Remove anomalously high or low values
for row in range(1,x.shape[0]):
for col in range(x.shape[1]):
if abs(x[row,col])>thresh:
x[row,col] = x[row-1,col]
# Copy and paste linearity columns over the first aquisition zone,
# which is anomalous
for col in range(24,44):
x[:,col] = x[:,44]
# Copy and paste linearity columns over the goober zone,
# which is anomalous
for col in range(1020,1027):
x[:,col] = x[:,1027]
# Copy and paste linearity rows over the OSF filter,
# which is anomalous
for lo, hi in fpa.osf_seam_positions:
for row in range(lo, hi+1):
x[row,:] = x[lo-1,:]
I[:,:,band] = x.reshape((x.shape[0],x.shape[1],1))
envi.save_image('../data/EMIT_LinearityMap_20220117.hdr',I,ext='',force=True)
|
[
"sys.path.append",
"spectral.io.envi.save_image",
"spectral.io.envi.open",
"fpa.FPA",
"numpy.squeeze"
] |
[((92, 119), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (107, 119), False, 'import os, sys\n'), ((224, 258), 'fpa.FPA', 'FPA', (['"""../config/tvac2_config.json"""'], {}), "('../config/tvac2_config.json')\n", (227, 258), False, 'from fpa import FPA\n'), ((1019, 1104), 'spectral.io.envi.save_image', 'envi.save_image', (['"""../data/EMIT_LinearityMap_20220117.hdr"""', 'I'], {'ext': '""""""', 'force': '(True)'}), "('../data/EMIT_LinearityMap_20220117.hdr', I, ext='', force=True\n )\n", (1034, 1104), False, 'from spectral.io import envi\n'), ((297, 322), 'numpy.squeeze', 'np.squeeze', (['I[:, :, band]'], {}), '(I[:, :, band])\n', (307, 322), True, 'import numpy as np\n'), ((145, 196), 'spectral.io.envi.open', 'envi.open', (['"""../data/EMIT_LinearityMap_20220117.hdr"""'], {}), "('../data/EMIT_LinearityMap_20220117.hdr')\n", (154, 196), False, 'from spectral.io import envi\n')]
|
'''
util.py
'''
import os.path
import h5py
import numpy as np
import constants
import skimage.io
import skimage.transform
from scipy.io import loadmat
import glob
import os
import cPickle as pickle
import torch
from itertools import izip_longest
from glove import Glove
import torch
import torch.nn as nn
# Makes the directories of they don't already exist
def make_directories():
output_path = constants.SAVE_PATH
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Made output directory")
else:
print("WARNING: starting training with an existing outputs directory")
if not os.path.exists(output_path + 'weights/'):
os.makedirs(output_path + 'weights/')
print("Made weights directory")
if not os.path.exists(output_path + 'images/'):
os.makedirs(output_path + 'images/')
print("Made images directory")
# Loads a map from image file names to 'test', 'train', or 'val'
# Used in other functions to split data
def load_dataset_map():
ids = loadmat('data_constants/setid.mat')
# Flip train and test examples since otherwise there would be 6000 test
train_ids = ids['tstid'][0] - 1
test_ids = ids['trnid'][0] - 1
val_ids = ids['valid'][0] - 1
print(len(train_ids), len(val_ids), len(test_ids), "Train, val, test examples, respectively")
filenames = [name for name in os.listdir('Data/' + constants.ENTIRE_DATASET) if name.endswith('.jpg')]
image_paths = sorted(filenames)
dataset_map = {}
for i, name in enumerate(image_paths):
if i in train_ids:
dataset_map[name] = 'train'
elif i in test_ids:
dataset_map[name] ='test'
elif i in val_ids:
dataset_map[name] ='val'
else:
print("Invalid ID!")
return dataset_map
def load_flowers_capt_dict():
"""Use pickle to load the flowers captions"""
flowers_capt_dict = pickle.load(open( constants.FLOWERS_CAP_DICT, "rb" ))
return flowers_capt_dict
def load_coco_capt_dict():
"""Use pickle to load the MSCOCO captions"""
coco_capt_dict = pickle.load(open(constants.COCO_CAP_DICT, "rb"))
return coco_capt_dict
# Adapted from https://github.com/paarthneekhara/text-to-image
# Takes the directoy and file name of the hdf5 file that contains the word vectors
# Returns a dict from image to list of captions
def load_text_vec(directory, file_name, dataset_map):
h = h5py.File(os.path.join(directory, file_name))
train_captions, val_captions, test_captions = {}, {}, {}
for item in h.iteritems():
name = item[0]
if dataset_map[name] == 'train':
train_captions[name] = np.array(item[1])
elif dataset_map[name] =='val':
val_captions[name] = np.array(item[1])
elif dataset_map[name] =='test':
test_captions[name] = np.array(item[1])
else:
print("Invalid name")
return train_captions, val_captions, test_captions
# Gets images for the main function
def get_images(directory, file_name, save_path):
if os.path.exists(save_path):
image_dicts = torch.load(save_path)
train_image_dict, val_image_dict, test_image_dict = image_dicts
print("Loaded images")
else:
print("Loading images and separating into train/val/test sets")
path = os.path.join(directory, file_name)
filenames = train_captions.keys() + val_captions.keys() + test_captions.keys()
train_image_dict, val_image_dict, test_image_dict = util.load_images(path, filenames, dataset_map)
image_dicts = [train_image_dict, val_image_dict, test_image_dict]
torch.save(image_dicts, save_path)
return train_image_dict, val_image_dict, test_image_dict
# Takes in the directory and a list of file names and returns a dict of file name -> images
def load_images(directory, filenames, dataset_map):
train_image_dict, val_image_dict, test_image_dict = {}, {}, {}
for name in filenames:
image_file = os.path.join(directory + name)
curr_image = skimage.io.imread(image_file)
# Resize image to correct size as float 32
resized_image = skimage.transform.resize(curr_image, (constants.IMAGE_SIZE, constants.IMAGE_SIZE)).astype('float32')
if dataset_map[name] =='train':
train_image_dict[name] = resized_image
elif dataset_map[name] =='val':
val_image_dict[name] = resized_image
elif dataset_map[name] =='test':
test_image_dict[name] = resized_image
else:
print("Invalid name")
return train_image_dict, val_image_dict, test_image_dict
# custom weights initialization called on netG and netD
# from https://github.com/pytorch/examples/blob/master/dcgan/main.py
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Embedding') != -1:
m.weight.data.fill_(1.0)
elif classname.find('LSTM') != -1:
nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0)
def preprocess2(batch_input):
"""Inputs for self.embeddings in TextModel(). Batch_input must be numpy padded"""
batch_size, sent_len = batch_input.shape
offsets = [sent_len * i for i in range(batch_size)]
return batch_input.flatten(), offsets
def preprocess(batch_input):
"""If batch_input isn't numpy"""
glove = Glove()
flatten, offsets = [], []
index = 0
for ex in batch_input:
ex = ex.replace(',', ' ')
words = ex.strip('.').split()
result = []
for w in words:
try:
idx = glove.get_index(w)
result.append(idx)
except:
continue
# words = [glove.get_index(word) for word in words]
offsets.append(index)
flatten.extend(result)
index += len(result)
return torch.LongTensor(flatten), torch.LongTensor(offsets)
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
# From https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
# Iterates over an array in chunks
def grouper(array, n):
args = [iter(array)] * n
return izip_longest(*args)
# Show the generated image improves over time
def print_images(generated):
for img in generated:
image_done = img.data.numpy()
swap_image = np.swapaxes(image_done,1,2)
swap_image = np.swapaxes(swap_image,2,3)
plt.imshow(swap_image[0])
plt.show()
def get_text_description(text_caption_dict, batch_keys):
g_idx = [np.random.randint(len(text_caption_dict[batch_keys[0]])) for i in range(len(batch_keys))]
g_text_des = np.array([text_caption_dict[k][i] for k,i in zip(batch_keys, g_idx)])
# g_text_des = np.expand_dims(g_text_des, axis=0) ONLY NEED FOR 1 DIM
return g_text_des
def choose_wrong_image(image_dict, batch_keys):
wrong_image = []
for k in batch_keys:
wrong_key = np.random.choice(image_dict.keys())
while wrong_key == k:
wrong_key = np.random.choice(image_dict.keys())
wrong_image.append(image_dict[wrong_key])
wrong_image = np.array(wrong_image)
wrong_image = augment_image_batch(wrong_image)
wrong_image = np.swapaxes(wrong_image, 2, 3)
wrong_image = np.swapaxes(wrong_image, 1, 2)
return wrong_image
# Finds the real image for the given batch data
def choose_real_image(image_dict, batch_keys):
real_img = np.array([image_dict[k] for k in batch_keys])
real_img = augment_image_batch(real_img)
real_img = np.swapaxes(real_img, 2, 3)
real_img = np.swapaxes(real_img, 1, 2)
return real_img
def augment_image_batch(images):
batch_size = images.shape[0]
for i in range(batch_size):
curr = images[i, :, :, :]
if np.random.rand() > .5:
curr = np.flip(curr, 1)
images[i, :, :, :] = curr
return images
# https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/began.py
def adjust_learning_rate(optimizer, niter):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = constants.LR * (0.95 ** (niter // constants.LR_DECAY_EVERY))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
[
"numpy.flip",
"os.makedirs",
"scipy.io.loadmat",
"torch.LongTensor",
"itertools.izip_longest",
"torch.load",
"os.path.exists",
"torch.save",
"torch.nn.init.xavier_uniform",
"numpy.array",
"glove.Glove",
"numpy.swapaxes",
"numpy.random.rand",
"os.path.join",
"os.listdir"
] |
[((1039, 1074), 'scipy.io.loadmat', 'loadmat', (['"""data_constants/setid.mat"""'], {}), "('data_constants/setid.mat')\n", (1046, 1074), False, 'from scipy.io import loadmat\n'), ((3085, 3110), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (3099, 3110), False, 'import os\n'), ((5568, 5575), 'glove.Glove', 'Glove', ([], {}), '()\n', (5573, 5575), False, 'from glove import Glove\n'), ((6708, 6727), 'itertools.izip_longest', 'izip_longest', (['*args'], {}), '(*args)\n', (6720, 6727), False, 'from itertools import izip_longest\n'), ((7674, 7695), 'numpy.array', 'np.array', (['wrong_image'], {}), '(wrong_image)\n', (7682, 7695), True, 'import numpy as np\n'), ((7765, 7795), 'numpy.swapaxes', 'np.swapaxes', (['wrong_image', '(2)', '(3)'], {}), '(wrong_image, 2, 3)\n', (7776, 7795), True, 'import numpy as np\n'), ((7814, 7844), 'numpy.swapaxes', 'np.swapaxes', (['wrong_image', '(1)', '(2)'], {}), '(wrong_image, 1, 2)\n', (7825, 7844), True, 'import numpy as np\n'), ((7979, 8024), 'numpy.array', 'np.array', (['[image_dict[k] for k in batch_keys]'], {}), '([image_dict[k] for k in batch_keys])\n', (7987, 8024), True, 'import numpy as np\n'), ((8085, 8112), 'numpy.swapaxes', 'np.swapaxes', (['real_img', '(2)', '(3)'], {}), '(real_img, 2, 3)\n', (8096, 8112), True, 'import numpy as np\n'), ((8128, 8155), 'numpy.swapaxes', 'np.swapaxes', (['real_img', '(1)', '(2)'], {}), '(real_img, 1, 2)\n', (8139, 8155), True, 'import numpy as np\n'), ((433, 460), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (447, 460), False, 'import os\n'), ((470, 494), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (481, 494), False, 'import os\n'), ((634, 674), 'os.path.exists', 'os.path.exists', (["(output_path + 'weights/')"], {}), "(output_path + 'weights/')\n", (648, 674), False, 'import os\n'), ((684, 721), 'os.makedirs', 'os.makedirs', (["(output_path + 'weights/')"], {}), "(output_path + 'weights/')\n", (695, 721), False, 'import os\n'), ((773, 812), 'os.path.exists', 'os.path.exists', (["(output_path + 'images/')"], {}), "(output_path + 'images/')\n", (787, 812), False, 'import os\n'), ((822, 858), 'os.makedirs', 'os.makedirs', (["(output_path + 'images/')"], {}), "(output_path + 'images/')\n", (833, 858), False, 'import os\n'), ((2459, 2493), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (2471, 2493), False, 'import os\n'), ((3134, 3155), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (3144, 3155), False, 'import torch\n'), ((3356, 3390), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (3368, 3390), False, 'import os\n'), ((3667, 3701), 'torch.save', 'torch.save', (['image_dicts', 'save_path'], {}), '(image_dicts, save_path)\n', (3677, 3701), False, 'import torch\n'), ((4025, 4055), 'os.path.join', 'os.path.join', (['(directory + name)'], {}), '(directory + name)\n', (4037, 4055), False, 'import os\n'), ((6063, 6088), 'torch.LongTensor', 'torch.LongTensor', (['flatten'], {}), '(flatten)\n', (6079, 6088), False, 'import torch\n'), ((6090, 6115), 'torch.LongTensor', 'torch.LongTensor', (['offsets'], {}), '(offsets)\n', (6106, 6115), False, 'import torch\n'), ((6889, 6918), 'numpy.swapaxes', 'np.swapaxes', (['image_done', '(1)', '(2)'], {}), '(image_done, 1, 2)\n', (6900, 6918), True, 'import numpy as np\n'), ((6938, 6967), 'numpy.swapaxes', 'np.swapaxes', (['swap_image', '(2)', '(3)'], {}), '(swap_image, 2, 3)\n', (6949, 6967), True, 'import numpy as np\n'), ((1389, 1435), 'os.listdir', 'os.listdir', (["('Data/' + constants.ENTIRE_DATASET)"], {}), "('Data/' + constants.ENTIRE_DATASET)\n", (1399, 1435), False, 'import os\n'), ((2686, 2703), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2694, 2703), True, 'import numpy as np\n'), ((8320, 8336), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8334, 8336), True, 'import numpy as np\n'), ((8362, 8378), 'numpy.flip', 'np.flip', (['curr', '(1)'], {}), '(curr, 1)\n', (8369, 8378), True, 'import numpy as np\n'), ((2777, 2794), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2785, 2794), True, 'import numpy as np\n'), ((2870, 2887), 'numpy.array', 'np.array', (['item[1]'], {}), '(item[1])\n', (2878, 2887), True, 'import numpy as np\n'), ((5166, 5198), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['m.weight'], {}), '(m.weight)\n', (5188, 5198), True, 'import torch.nn as nn\n')]
|
from __future__ import print_function, division
import matplotlib.pyplot as plt
import math
from sklearn.metrics import auc
import numpy as np
import cv2
import os, sys
int_ = lambda x: int(round(x))
def IoU( r1, r2 ):
x11, y11, w1, h1 = r1
x21, y21, w2, h2 = r2
x12 = x11 + w1; y12 = y11 + h1
x22 = x21 + w2; y22 = y21 + h2
x_overlap = max(0, min(x12,x22) - max(x11,x21) )
y_overlap = max(0, min(y12,y22) - max(y11,y21) )
I = 1. * x_overlap * y_overlap
U = (y12-y11)*(x12-x11) + (y22-y21)*(x22-x21) - I
J = I/U
return J
def evaluate_iou( rect_gt, rect_pred ):
# score of iou
score = [ IoU(i, j) for i, j in zip(rect_gt, rect_pred) ]
return score
def compute_score( x, w, h ):
# score of response strength
k = np.ones( (h, w) )
score = cv2.filter2D(x, -1, k)
score[:, :w//2] = 0
score[:, math.ceil(-w/2):] = 0
score[:h//2, :] = 0
score[math.ceil(-h/2):, :] = 0
return score
def locate_bbox( a, w, h ):
row = np.argmax( np.max(a, axis=1) )
col = np.argmax( np.max(a, axis=0) )
x = col - 1. * w / 2
y = row - 1. * h / 2
return x, y, w, h
def score2curve( score, thres_delta = 0.01 ):
thres = np.linspace( 0, 1, int(1./thres_delta)+1 )
success_num = []
for th in thres:
success_num.append( np.sum(score >= (th+1e-6)) )
success_rate = np.array(success_num) / len(score)
return thres, success_rate
def all_sample_iou( score_list, gt_list):
num_samples = len(score_list)
iou_list = []
for idx in range(num_samples):
score, image_gt = score_list[idx], gt_list[idx]
w, h = image_gt[2:]
pred_rect = locate_bbox( score, w, h )
iou = IoU( image_gt, pred_rect )
iou_list.append( iou )
return iou_list
def plot_success_curve( iou_score, title='' ):
thres, success_rate = score2curve( iou_score, thres_delta = 0.05 )
auc_ = np.mean( success_rate[:-1] ) # this is same auc protocol as used in previous template matching papers #auc_ = auc( thres, success_rate ) # this is the actual auc
plt.figure()
plt.grid(True)
plt.xticks(np.linspace(0,1,11))
plt.yticks(np.linspace(0,1,11))
plt.ylim(0, 1)
plt.title(title + 'auc={}'.format(auc_))
plt.plot( thres, success_rate )
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.sum",
"cv2.filter2D",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"math.ceil",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.max",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.grid"
] |
[((777, 792), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (784, 792), True, 'import numpy as np\n'), ((807, 829), 'cv2.filter2D', 'cv2.filter2D', (['x', '(-1)', 'k'], {}), '(x, -1, k)\n', (819, 829), False, 'import cv2\n'), ((1921, 1947), 'numpy.mean', 'np.mean', (['success_rate[:-1]'], {}), '(success_rate[:-1])\n', (1928, 1947), True, 'import numpy as np\n'), ((2087, 2099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2097, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2118), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2112, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2209), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2203, 2209), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2288), 'matplotlib.pyplot.plot', 'plt.plot', (['thres', 'success_rate'], {}), '(thres, success_rate)\n', (2267, 2288), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1033), 'numpy.max', 'np.max', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (1022, 1033), True, 'import numpy as np\n'), ((1057, 1074), 'numpy.max', 'np.max', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (1063, 1074), True, 'import numpy as np\n'), ((1370, 1391), 'numpy.array', 'np.array', (['success_num'], {}), '(success_num)\n', (1378, 1391), True, 'import numpy as np\n'), ((2134, 2155), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (2145, 2155), True, 'import numpy as np\n'), ((2170, 2191), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (2181, 2191), True, 'import numpy as np\n'), ((1322, 1349), 'numpy.sum', 'np.sum', (['(score >= th + 1e-06)'], {}), '(score >= th + 1e-06)\n', (1328, 1349), True, 'import numpy as np\n'), ((867, 884), 'math.ceil', 'math.ceil', (['(-w / 2)'], {}), '(-w / 2)\n', (876, 884), False, 'import math\n'), ((923, 940), 'math.ceil', 'math.ceil', (['(-h / 2)'], {}), '(-h / 2)\n', (932, 940), False, 'import math\n')]
|
#
# Compare lithium-ion battery models with and without particle size distibution
#
import numpy as np
import pybamm
pybamm.set_logging_level("INFO")
# load models
models = [
pybamm.lithium_ion.DFN(name="standard DFN"),
pybamm.lithium_ion.DFN(name="particle DFN"),
]
# load parameter values
params = [models[0].default_parameter_values, models[1].default_parameter_values]
def negative_distribution(x):
return 1 + 2 * x / models[1].param.l_n
def positive_distribution(x):
return 1 + 2 * (1 - x) / models[1].param.l_p
params[1]["Negative particle distribution in x"] = negative_distribution
params[1]["Positive particle distribution in x"] = positive_distribution
# set up and solve simulations
t_eval = np.linspace(0, 3600, 100)
sols = []
for model, param in zip(models, params):
sim = pybamm.Simulation(model, parameter_values=param)
sol = sim.solve(t_eval)
sols.append(sol)
output_variables = [
"Negative particle surface concentration",
"Electrolyte concentration",
"Positive particle surface concentration",
"Current [A]",
"Negative electrode potential [V]",
"Electrolyte potential [V]",
"Positive electrode potential [V]",
"Terminal voltage [V]",
"Negative particle distribution in x",
"Positive particle distribution in x",
]
# plot
plot = pybamm.QuickPlot(sols, output_variables=output_variables)
plot.dynamic_plot()
|
[
"pybamm.set_logging_level",
"pybamm.Simulation",
"numpy.linspace",
"pybamm.QuickPlot",
"pybamm.lithium_ion.DFN"
] |
[((118, 150), 'pybamm.set_logging_level', 'pybamm.set_logging_level', (['"""INFO"""'], {}), "('INFO')\n", (142, 150), False, 'import pybamm\n'), ((730, 755), 'numpy.linspace', 'np.linspace', (['(0)', '(3600)', '(100)'], {}), '(0, 3600, 100)\n', (741, 755), True, 'import numpy as np\n'), ((1327, 1384), 'pybamm.QuickPlot', 'pybamm.QuickPlot', (['sols'], {'output_variables': 'output_variables'}), '(sols, output_variables=output_variables)\n', (1343, 1384), False, 'import pybamm\n'), ((181, 224), 'pybamm.lithium_ion.DFN', 'pybamm.lithium_ion.DFN', ([], {'name': '"""standard DFN"""'}), "(name='standard DFN')\n", (203, 224), False, 'import pybamm\n'), ((230, 273), 'pybamm.lithium_ion.DFN', 'pybamm.lithium_ion.DFN', ([], {'name': '"""particle DFN"""'}), "(name='particle DFN')\n", (252, 273), False, 'import pybamm\n'), ((817, 865), 'pybamm.Simulation', 'pybamm.Simulation', (['model'], {'parameter_values': 'param'}), '(model, parameter_values=param)\n', (834, 865), False, 'import pybamm\n')]
|
from scipy.misc import imread
from tqdm import tqdm
import numpy as np
import os
import random
import warnings
class SetList(object):
'''A class to hold lists of inputs for a network'''
def __init__(self, source='', target=None):
'''Constructs a new SetList.
Args:
source (str): The path to the list file
'''
self.source = source
if target is None:
self.target = source
else:
self.target = target
self.list = []
self.mean = []
if source != '':
self.load()
@property
def set(self):
return set(self.list)
@set.setter
def set(self, set):
self.list = list(set)
def __len__(self):
'''Returns the length of this Set'''
return len(self.list)
def __str__(self):
'''Returns a str-description of this Set'''
return '{}[{}] → {}'.format(self.source, len(self.list), self.target)
def __iter__(self):
'''Returns the iterator for the contained list'''
return iter(self.list)
def load(self):
'''Loads the contents of self.source into the list. If source is a dir
it will list all files in it without extensions. It does replace the
whole content and does not append to it.'''
# utils.touch(self.source)
if os.path.isdir(self.source):
self.load_directory(self.source)
self.source = ''
self.target = ''
else:
if not os.path.exists(self.source):
self.list = []
else:
with open(self.source) as f:
self.list = [l[:-1] for l in f.readlines() if l.strip()]
def load_directory(self, dir):
'''Loads the contents of a dirctory into the list
Args:
dir (str): The path to the dir
'''
self.list = [os.path.splitext(f)[0] for f in next(os.walk(dir))[2]]
def write(self):
'''Saves the list to the path set in self.target. This is normally set
to self.source'''
with open(self.target, 'w') as f:
for row in self:
f.write("{}\n".format(row))
print('List {} written...'.format(self.target))
def shuffle(self):
'''Shuffles the list'''
random.shuffle(self.list)
def add_pre_suffix(self, prefix='', suffix=''):
'''Adds a prefix and a suffix to every element of the list.
Args:
prefix (str,optional): The prefix to prepend
suffix (str,optional): The prefix to append
'''
self.list = [prefix + x + suffix for x in self]
def rm_pre_suffix(self, prefix='', suffix=''):
'''Removes a prefix and a suffix from every element of the list.
Args:
prefix (str,optional): The prefix to remove
suffix (str,optional): The prefix to remove
'''
self.list = [x[len(prefix):-len(suffix)] for x in self]
def calculate_mean(self):
'''Calculates the mean pixel for this set. The list has to contain full
paths obviously so you probably have to append Prefixes and suffixes
before running this.
Returns:
The mean pixel. As BGR!
'''
self.mean = [[], [], []]
print('Calculating mean pixel...')
for row in tqdm(self):
im = imread(row)
self.mean[0].append(np.mean(im[..., 0]))
self.mean[1].append(np.mean(im[..., 1]))
self.mean[2].append(np.mean(im[..., 2]))
self.mean = np.mean(self.mean, axis=1)
if self.mean.shape == (3,):
return self.mean
else:
return self.mean[:, :, ::-1]
def each(self, callback):
'''Applies a callable to every element of the list
Args:
callback (func): The callback function to use
Returns:
True if successfull and False if not
'''
if not callable(callback):
warnings.warn('Not callable object')
return False
print('Each of {}'.format(self.source))
for row in tqdm(self):
callback(row)
return True
|
[
"tqdm.tqdm",
"os.path.isdir",
"random.shuffle",
"os.walk",
"os.path.exists",
"numpy.mean",
"os.path.splitext",
"warnings.warn",
"scipy.misc.imread"
] |
[((1365, 1391), 'os.path.isdir', 'os.path.isdir', (['self.source'], {}), '(self.source)\n', (1378, 1391), False, 'import os\n'), ((2331, 2356), 'random.shuffle', 'random.shuffle', (['self.list'], {}), '(self.list)\n', (2345, 2356), False, 'import random\n'), ((3380, 3390), 'tqdm.tqdm', 'tqdm', (['self'], {}), '(self)\n', (3384, 3390), False, 'from tqdm import tqdm\n'), ((3600, 3626), 'numpy.mean', 'np.mean', (['self.mean'], {'axis': '(1)'}), '(self.mean, axis=1)\n', (3607, 3626), True, 'import numpy as np\n'), ((4165, 4175), 'tqdm.tqdm', 'tqdm', (['self'], {}), '(self)\n', (4169, 4175), False, 'from tqdm import tqdm\n'), ((3409, 3420), 'scipy.misc.imread', 'imread', (['row'], {}), '(row)\n', (3415, 3420), False, 'from scipy.misc import imread\n'), ((4036, 4072), 'warnings.warn', 'warnings.warn', (['"""Not callable object"""'], {}), "('Not callable object')\n", (4049, 4072), False, 'import warnings\n'), ((1529, 1556), 'os.path.exists', 'os.path.exists', (['self.source'], {}), '(self.source)\n', (1543, 1556), False, 'import os\n'), ((1914, 1933), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1930, 1933), False, 'import os\n'), ((3453, 3472), 'numpy.mean', 'np.mean', (['im[..., 0]'], {}), '(im[..., 0])\n', (3460, 3472), True, 'import numpy as np\n'), ((3506, 3525), 'numpy.mean', 'np.mean', (['im[..., 1]'], {}), '(im[..., 1])\n', (3513, 3525), True, 'import numpy as np\n'), ((3559, 3578), 'numpy.mean', 'np.mean', (['im[..., 2]'], {}), '(im[..., 2])\n', (3566, 3578), True, 'import numpy as np\n'), ((1951, 1963), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (1958, 1963), False, 'import os\n')]
|
from Source import ModelsIO as MIO
import numpy as np
from h5py import File
def E_fit(_cube: np.ndarray((10, 13, 21, 128, 128), '>f4'),
data: np.ndarray((128, 128), '>f4'),
seg: np.ndarray((128, 128), '>f4'),
noise: np.ndarray((128, 128), '>f4')) -> np.ndarray((10, 13, 21), '>f4'):
scaled_models: np.ndarray((10, 13, 21, 128, 128), '>f4')
flux_models: np.ndarray((10, 13, 21), '>f4')
flux_data: np.float('>f4')
X: np.ndarray((10, 13, 21), '>f4')
resta: np.ndarray((10, 13, 21, 128, 128), '>f4')
residuo: np.ndarray((10, 13, 21, 128, 128), '>f4')
chi: np.ndarray((10, 13, 21), '>f4')
area: int
flux_models = np.einsum("ijkxy,xy->ijk", _cube, seg)
flux_data = np.einsum("xy,xy", data, seg)
X = flux_data / flux_models
scaled_models = X[:, :, :, np.newaxis, np.newaxis] * _cube
resta = data - scaled_models
residuo = (resta ** 2) / (scaled_models + noise ** 2)
chi = np.einsum("ijkxy,xy->ijk", residuo, seg)
area = seg.sum()
chi = chi / area
return chi
def read_obj_h5(name):
# debe ser
try:
with File(name, 'r') as f:
data = f['obj'][:]
seg = f['seg'][:]
rms = f['rms'][:]
return data, seg, rms
except IOError:
print("{} not found".format(name))
return False, False, False
# se necesita esta funcion??
def read_obj(name):
try:
data = MIO.fits.open(name)[1].data
rms = MIO.fits.open(name.replace('objs', 'noise'))[1].data
seg = MIO.fits.open(name.replace('object', "segment").replace("objs", "segs"))[1].data
except IOError:
print("{} not found".format(name))
return False, False, False
noise = np.median(rms)
return data, seg, noise
def feed(name, cube):
"""
From a name and a models cube, run an object through the routine
Outputs the numpy array of the chi_cube
"""
a, b, s = read_obj_h5(name)
if a is not False:
chi = E_fit(cube, a, b, noise=s)
# outchi = MIO.fits.ImageHDU(data=chi)
# outchi.writeto(name.replace('cut_object',"chi_cube"),overwrite=True)
return chi
else:
return False
def save_chi(name, cube):
"""
Parameters
name : str of output file
cube : crunch.feed output
"""
outchi = MIO.fits.ImageHDU(data=cube)
outchi.writeto(name, overwrite=True)
return True
def get_cube(name):
cube = MIO.ModelsCube(name)
cube = cube.data.reshape((10, 13, 128, 21, 128))
cube = np.swapaxes(cube, 2, 3) # new shape (10, 13, 21, 128, 128)
return cube
def chi_index(chi_name):
"""
Parameters
----------
chi_name : chi_cube fits filename.
Returns
-------
tuple (i,j,k) of the index which minimize the residuals.
"""
chi_cube = MIO.fits.open(chi_name)
i, j, k = np.unravel_index(np.argmin(chi_cube[1].data), shape=(10, 13, 21))
return i, j, k
def pond_rad_like(chi_name, logh):
i, j, k = chi_index(chi_name)
chi_cubo = MIO.fits.open(chi_name)[1].data
weights = np.e ** (chi_cubo[i, j, :])
r_weight = 0
for r in range(21):
r_weight += (10 ** (logh[r])) / weights[r]
r_chi = np.log10(r_weight / np.sum(1. / weights))
r_var = 0
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (weights[r])
r_var = r_var / np.sum(1. / weights)
return r_chi, r_var
def pond_rad(chi_name, logh):
i, j, k = chi_index(chi_name)
chi_cubo = MIO.fits.open(chi_name)[1].data
weights = chi_cubo[i, j, :]
r_weight = 0
for r in range(21):
r_weight += (10 ** (logh[r])) / weights[r]
r_chi = np.log10(r_weight / np.sum(1. / weights))
r_var = 0
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (weights[r])
r_var = r_var / np.sum(1. / weights)
return r_chi, r_var
def pond_rad_3d(chi_name, logh):
chi_cubo = MIO.fits.open(chi_name)[1].data
sqrt_chi = np.sqrt(chi_cubo)
r_weight = 0
for e in range(10):
for t in range(13):
for r in range(21):
r_weight += (10 ** (logh[r])) / sqrt_chi[e, t, r]
r_chi = np.log10(r_weight / np.sum(1. / sqrt_chi))
r_var = 0
for e in range(10):
for t in range(13):
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (chi_cubo[e, t, r])
r_var = r_var / np.sum(1. / chi_cubo)
return r_chi, r_var
def make_mosaic(obj, chi, cube):
"""
Parameters
----------
obj : str
DESCRIPTION.
chi : str
DESCRIPTION.
cube : numpy array
DESCRIPTION.
Returns
-------
Bool
Builds a mosaic containing the data,segment,model and residual
"""
i, j, k = chi_index(chi)
model = cube[i, j, k]
gal, seg, noise = read_obj(obj)
output = chi.replace('chi_cube', 'mosaic').replace('cut_object', 'mosaic')
fg = np.sum(gal * seg)
fm1 = np.sum(model * seg)
aux = np.zeros((128, 128 * 4))
aux[:, 0:128] = gal
aux[:, 128:256] = seg * (fg / seg.sum())
aux[:, 256:384] = model * (fg / fm1)
aux[:, 384:] = gal - model * (fg / fm1)
gg = MIO.fits.ImageHDU(data=aux)
gg.writeto(output, overwrite=True)
return True
def make_mosaic_h5(obj, chi, cube):
"""
Parameters
----------
obj : str
DESCRIPTION.
chi : str
DESCRIPTION.
cube : numpy array
DESCRIPTION.
Returns
-------
Bool
Builds a mosaic containing the data,segment,model and residual
"""
i, j, k = chi_index(chi)
model = cube[i, j, k]
output = chi.replace('chi_cube', 'mosaic').replace('cut', 'mosaic')
with File(obj, 'r') as f:
gal = f['obj'][:]
seg = f['seg'][:]
fg = np.sum(gal * seg)
fm1 = np.sum(model * seg)
aux = np.zeros((128, 128 * 4))
aux[:, 0:128] = gal
aux[:, 128:256] = seg * (fg / seg.sum())
aux[:, 256:384] = model * (fg / fm1)
aux[:, 384:] = gal - model * (fg / fm1)
gg = MIO.fits.ImageHDU(data=aux)
gg.writeto(output, overwrite=True)
return True
|
[
"h5py.File",
"Source.ModelsIO.fits.ImageHDU",
"numpy.sum",
"Source.ModelsIO.ModelsCube",
"numpy.median",
"numpy.einsum",
"numpy.float",
"numpy.zeros",
"Source.ModelsIO.fits.open",
"numpy.argmin",
"numpy.swapaxes",
"numpy.ndarray",
"numpy.sqrt"
] |
[((282, 313), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21)', '""">f4"""'], {}), "((10, 13, 21), '>f4')\n", (292, 313), True, 'import numpy as np\n'), ((335, 376), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21, 128, 128)', '""">f4"""'], {}), "((10, 13, 21, 128, 128), '>f4')\n", (345, 376), True, 'import numpy as np\n'), ((394, 425), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21)', '""">f4"""'], {}), "((10, 13, 21), '>f4')\n", (404, 425), True, 'import numpy as np\n'), ((441, 456), 'numpy.float', 'np.float', (['""">f4"""'], {}), "('>f4')\n", (449, 456), True, 'import numpy as np\n'), ((464, 495), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21)', '""">f4"""'], {}), "((10, 13, 21), '>f4')\n", (474, 495), True, 'import numpy as np\n'), ((507, 548), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21, 128, 128)', '""">f4"""'], {}), "((10, 13, 21, 128, 128), '>f4')\n", (517, 548), True, 'import numpy as np\n'), ((562, 603), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21, 128, 128)', '""">f4"""'], {}), "((10, 13, 21, 128, 128), '>f4')\n", (572, 603), True, 'import numpy as np\n'), ((613, 644), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21)', '""">f4"""'], {}), "((10, 13, 21), '>f4')\n", (623, 644), True, 'import numpy as np\n'), ((678, 716), 'numpy.einsum', 'np.einsum', (['"""ijkxy,xy->ijk"""', '_cube', 'seg'], {}), "('ijkxy,xy->ijk', _cube, seg)\n", (687, 716), True, 'import numpy as np\n'), ((733, 762), 'numpy.einsum', 'np.einsum', (['"""xy,xy"""', 'data', 'seg'], {}), "('xy,xy', data, seg)\n", (742, 762), True, 'import numpy as np\n'), ((959, 999), 'numpy.einsum', 'np.einsum', (['"""ijkxy,xy->ijk"""', 'residuo', 'seg'], {}), "('ijkxy,xy->ijk', residuo, seg)\n", (968, 999), True, 'import numpy as np\n'), ((1743, 1757), 'numpy.median', 'np.median', (['rms'], {}), '(rms)\n', (1752, 1757), True, 'import numpy as np\n'), ((2348, 2376), 'Source.ModelsIO.fits.ImageHDU', 'MIO.fits.ImageHDU', ([], {'data': 'cube'}), '(data=cube)\n', (2365, 2376), True, 'from Source import ModelsIO as MIO\n'), ((2467, 2487), 'Source.ModelsIO.ModelsCube', 'MIO.ModelsCube', (['name'], {}), '(name)\n', (2481, 2487), True, 'from Source import ModelsIO as MIO\n'), ((2552, 2575), 'numpy.swapaxes', 'np.swapaxes', (['cube', '(2)', '(3)'], {}), '(cube, 2, 3)\n', (2563, 2575), True, 'import numpy as np\n'), ((2844, 2867), 'Source.ModelsIO.fits.open', 'MIO.fits.open', (['chi_name'], {}), '(chi_name)\n', (2857, 2867), True, 'from Source import ModelsIO as MIO\n'), ((3986, 4003), 'numpy.sqrt', 'np.sqrt', (['chi_cubo'], {}), '(chi_cubo)\n', (3993, 4003), True, 'import numpy as np\n'), ((4944, 4961), 'numpy.sum', 'np.sum', (['(gal * seg)'], {}), '(gal * seg)\n', (4950, 4961), True, 'import numpy as np\n'), ((4972, 4991), 'numpy.sum', 'np.sum', (['(model * seg)'], {}), '(model * seg)\n', (4978, 4991), True, 'import numpy as np\n'), ((5002, 5026), 'numpy.zeros', 'np.zeros', (['(128, 128 * 4)'], {}), '((128, 128 * 4))\n', (5010, 5026), True, 'import numpy as np\n'), ((5191, 5218), 'Source.ModelsIO.fits.ImageHDU', 'MIO.fits.ImageHDU', ([], {'data': 'aux'}), '(data=aux)\n', (5208, 5218), True, 'from Source import ModelsIO as MIO\n'), ((95, 136), 'numpy.ndarray', 'np.ndarray', (['(10, 13, 21, 128, 128)', '""">f4"""'], {}), "((10, 13, 21, 128, 128), '>f4')\n", (105, 136), True, 'import numpy as np\n'), ((154, 183), 'numpy.ndarray', 'np.ndarray', (['(128, 128)', '""">f4"""'], {}), "((128, 128), '>f4')\n", (164, 183), True, 'import numpy as np\n'), ((200, 229), 'numpy.ndarray', 'np.ndarray', (['(128, 128)', '""">f4"""'], {}), "((128, 128), '>f4')\n", (210, 229), True, 'import numpy as np\n'), ((248, 277), 'numpy.ndarray', 'np.ndarray', (['(128, 128)', '""">f4"""'], {}), "((128, 128), '>f4')\n", (258, 277), True, 'import numpy as np\n'), ((2899, 2926), 'numpy.argmin', 'np.argmin', (['chi_cube[1].data'], {}), '(chi_cube[1].data)\n', (2908, 2926), True, 'import numpy as np\n'), ((3390, 3411), 'numpy.sum', 'np.sum', (['(1.0 / weights)'], {}), '(1.0 / weights)\n', (3396, 3411), True, 'import numpy as np\n'), ((3844, 3865), 'numpy.sum', 'np.sum', (['(1.0 / weights)'], {}), '(1.0 / weights)\n', (3850, 3865), True, 'import numpy as np\n'), ((4419, 4441), 'numpy.sum', 'np.sum', (['(1.0 / chi_cubo)'], {}), '(1.0 / chi_cubo)\n', (4425, 4441), True, 'import numpy as np\n'), ((5712, 5726), 'h5py.File', 'File', (['obj', '"""r"""'], {}), "(obj, 'r')\n", (5716, 5726), False, 'from h5py import File\n'), ((5799, 5816), 'numpy.sum', 'np.sum', (['(gal * seg)'], {}), '(gal * seg)\n', (5805, 5816), True, 'import numpy as np\n'), ((5831, 5850), 'numpy.sum', 'np.sum', (['(model * seg)'], {}), '(model * seg)\n', (5837, 5850), True, 'import numpy as np\n'), ((5865, 5889), 'numpy.zeros', 'np.zeros', (['(128, 128 * 4)'], {}), '((128, 128 * 4))\n', (5873, 5889), True, 'import numpy as np\n'), ((6074, 6101), 'Source.ModelsIO.fits.ImageHDU', 'MIO.fits.ImageHDU', ([], {'data': 'aux'}), '(data=aux)\n', (6091, 6101), True, 'from Source import ModelsIO as MIO\n'), ((1120, 1135), 'h5py.File', 'File', (['name', '"""r"""'], {}), "(name, 'r')\n", (1124, 1135), False, 'from h5py import File\n'), ((3053, 3076), 'Source.ModelsIO.fits.open', 'MIO.fits.open', (['chi_name'], {}), '(chi_name)\n', (3066, 3076), True, 'from Source import ModelsIO as MIO\n'), ((3252, 3273), 'numpy.sum', 'np.sum', (['(1.0 / weights)'], {}), '(1.0 / weights)\n', (3258, 3273), True, 'import numpy as np\n'), ((3517, 3540), 'Source.ModelsIO.fits.open', 'MIO.fits.open', (['chi_name'], {}), '(chi_name)\n', (3530, 3540), True, 'from Source import ModelsIO as MIO\n'), ((3706, 3727), 'numpy.sum', 'np.sum', (['(1.0 / weights)'], {}), '(1.0 / weights)\n', (3712, 3727), True, 'import numpy as np\n'), ((3939, 3962), 'Source.ModelsIO.fits.open', 'MIO.fits.open', (['chi_name'], {}), '(chi_name)\n', (3952, 3962), True, 'from Source import ModelsIO as MIO\n'), ((4204, 4226), 'numpy.sum', 'np.sum', (['(1.0 / sqrt_chi)'], {}), '(1.0 / sqrt_chi)\n', (4210, 4226), True, 'import numpy as np\n'), ((1442, 1461), 'Source.ModelsIO.fits.open', 'MIO.fits.open', (['name'], {}), '(name)\n', (1455, 1461), True, 'from Source import ModelsIO as MIO\n')]
|
import nltk
import os
import torch
import torch.utils.data as data
import numpy as np
import json
from .vocabulary import Vocabulary
from pycocotools.coco import COCO
from PIL import Image
from tqdm import tqdm
class CoCoDataset(data.Dataset):
def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file, img_folder):
self.transform = transform
self.mode = mode
self.batch_size = batch_size
self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file)
self.img_folder = img_folder
if self.mode == 'train':
self.coco = COCO(annotations_file)
self.ids = list(self.coco.anns.keys())
print('Obtaining caption lengths...')
all_tokens = [nltk.tokenize.word_tokenize(str(self.coco.anns[self.ids[index]]['caption']).lower()) for index in tqdm(np.arange(len(self.ids)))]
self.caption_lengths = [len(token) for token in all_tokens]
else:
test_info = json.loads(open(annotations_file).read())
self.paths = [item['file_name'] for item in test_info['images']]
def __getitem__(self, index):
# obtain image and caption if in training mode
if self.mode == 'train':
ann_id = self.ids[index]
caption = self.coco.anns[ann_id]['caption']
img_id = self.coco.anns[ann_id]['image_id']
path = self.coco.loadImgs(img_id)[0]['file_name']
# Convert image to tensor and pre-process using transform
image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
image = self.transform(image)
# Convert caption to tensor of word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(self.vocab(self.vocab.start_word))
caption.extend([self.vocab(token) for token in tokens])
caption.append(self.vocab(self.vocab.end_word))
caption = torch.Tensor(caption).long()
# return pre-processed image and caption tensors
return image, caption
# obtain image if in test mode
else:
path = self.paths[index]
# Convert image to tensor and pre-process using transform
PIL_image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
orig_image = np.array(PIL_image)
image = self.transform(PIL_image)
# return original image and pre-processed image tensor
return orig_image, image
def get_train_indices(self):
sel_length = np.random.choice(self.caption_lengths)
all_indices = np.where([self.caption_lengths[i] == sel_length for i in np.arange(len(self.caption_lengths))])[0]
indices = list(np.random.choice(all_indices, size=self.batch_size))
return indices
def __len__(self):
if self.mode == 'train':
return len(self.ids)
else:
return len(self.paths)
|
[
"pycocotools.coco.COCO",
"torch.Tensor",
"numpy.array",
"numpy.random.choice",
"os.path.join"
] |
[((2802, 2840), 'numpy.random.choice', 'np.random.choice', (['self.caption_lengths'], {}), '(self.caption_lengths)\n', (2818, 2840), True, 'import numpy as np\n'), ((776, 798), 'pycocotools.coco.COCO', 'COCO', (['annotations_file'], {}), '(annotations_file)\n', (780, 798), False, 'from pycocotools.coco import COCO\n'), ((2576, 2595), 'numpy.array', 'np.array', (['PIL_image'], {}), '(PIL_image)\n', (2584, 2595), True, 'import numpy as np\n'), ((2985, 3036), 'numpy.random.choice', 'np.random.choice', (['all_indices'], {'size': 'self.batch_size'}), '(all_indices, size=self.batch_size)\n', (3001, 3036), True, 'import numpy as np\n'), ((2177, 2198), 'torch.Tensor', 'torch.Tensor', (['caption'], {}), '(caption)\n', (2189, 2198), False, 'import torch\n'), ((1721, 1756), 'os.path.join', 'os.path.join', (['self.img_folder', 'path'], {}), '(self.img_folder, path)\n', (1733, 1756), False, 'import os\n'), ((2499, 2534), 'os.path.join', 'os.path.join', (['self.img_folder', 'path'], {}), '(self.img_folder, path)\n', (2511, 2534), False, 'import os\n')]
|
import os
import json
from six import iteritems
import h5py
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from vdgnn.dataset.readers import DenseAnnotationsReader, ImageFeaturesHdfReader
TRAIN_VAL_SPLIT = {'0.9': 80000, '1.0': 123287}
class VisDialDataset(Dataset):
def __init__(self, args, split, isTrain=True):
r"""
Initialize the dataset with split taken from ['train', 'val', 'test']
We follow the protocal as specified in `https://arxiv.org/pdf/1611.08669.pdf`, namely
For VisDial v1.0:
train split:
img_feat: train split
dialog_data: trainval split (top 123287)
val split:
img_feat: val split
dialog_data: trainval split (last 2064)
test split:
img_feat: test split
dialog_data: test split
For VisDial v0.9:
train split:
img_feat: train split
dialog_data: trainval split (top 80000)
val split (isTrain=True):
img_feat: train split
dialog_data: trainval split (last 2783)
val split (isTrain=False):
img_feat: val split
dialog_data: val split
"""
super(VisDialDataset, self).__init__()
self.args = args
self.__split = split
self.__in_memory = args.in_memory
self.__version = args.version
self.isTrain = isTrain
if self.__split == 'val' and self.__version == '0.9' and self.isTrain:
input_img_path = args.img_train
img_split = 'train'
self.img_start_idx = TRAIN_VAL_SPLIT[self.__version]
else:
input_img_path = getattr(args, 'img_%s' % split)
img_split = self.__split
self.img_start_idx = 0
if self.__split == 'val' and self.isTrain:
self.data_start_idx = TRAIN_VAL_SPLIT[self.__version]
data_split = 'train'
else:
self.data_start_idx = 0
data_split = self.__split
self.input_img = os.path.join(args.dataroot, input_img_path)
self.input_json = os.path.join(args.dataroot, args.visdial_params)
self.input_ques = os.path.join(args.dataroot, args.visdial_data)
self.input_dialog = os.path.join(
args.dataroot, getattr(args, 'dialog_%s' % split))
self.dense_annotations_jsonpath = os.path.join(
args.dataroot, args.dense_annotations)
self.num_data = getattr(args, 'num_%s' % split)
self.use_img_id_idx = None
# preprocessing split
print("\nProcessing split [{}]...".format(self.__split))
print("Dataloader loading json file: {}".format(self.input_json))
with open(self.input_json, 'r') as info_file:
info = json.load(info_file)
# possible keys: {'ind2word', 'word2ind', 'unique_img_(split)'}
for key, value in iteritems(info):
setattr(self, key, value)
# add <START> and <END> to vocabulary
word_count = len(self.word2ind)
self.word2ind['<START>'] = word_count + 1
self.word2ind['<END>'] = word_count + 2
self.start_token = self.word2ind['<START>']
self.end_token = self.word2ind['<END>']
# padding + <START> + <END> token
self.vocab_size = word_count + 3
print("Vocab size with <START>, <END>: {}".format(self.vocab_size))
# construct reverse of word2ind after adding tokens
self.ind2word = {
int(ind): word_count
for word, ind in iteritems(self.word2ind)
}
print("Dataloader loading image h5 file: {}".format(self.input_img))
# Either img_feats or img_reader will be set.
if self.__version == '0.9':
# trainval image features
with h5py.File(self.input_img, 'r') as img_hdf5:
img_feats_h5 = img_hdf5.get('images_%s' % img_split)
self.num_data_points = len(img_feats_h5) - self.img_start_idx
self.img_reader = None
if self.__split == 'train':
self.num_data_points = min(self.num_data_points, TRAIN_VAL_SPLIT[self.__version])
else:
# split image features
self.use_img_id_idx = True
self.img_reader = ImageFeaturesHdfReader(
self.input_img, in_memory=self.__in_memory)
self.num_data_points = len(self.img_reader)
if self.num_data is not None:
self.num_data_points = min(self.num_data, self.num_data_points)
self.img_end_idx = self.img_start_idx + self.num_data_points
self.data_end_idx = self.data_start_idx + self.num_data_points
if self.img_reader is None:
with h5py.File(self.input_img, 'r') as img_hdf5:
img_feats_h5 = img_hdf5.get('images_%s' % img_split)
self.img_feats = torch.from_numpy(
np.array(img_feats_h5[self.img_start_idx:self.img_end_idx]))
if 'val' == self.__split and os.path.exists(self.dense_annotations_jsonpath):
self.use_img_id_idx = True
self.annotations_reader = DenseAnnotationsReader(
self.dense_annotations_jsonpath)
else:
self.annotations_reader = None
if self.use_img_id_idx:
print('Loading input dialog json: {}'.format(self.input_dialog))
with open(self.input_dialog, 'r') as dialog_json:
visdial_data = json.load(dialog_json)
self.idx2imgid = [dialog_for_image['image_id']
for dialog_for_image in visdial_data['data']['dialogs']]
print("Dataloader loading h5 file: {}".format(self.input_ques))
ques_file = h5py.File(self.input_ques, 'r')
# load all data mats from ques_file into this
self.data = {}
self.img_norm = args.img_norm
img_fnames = getattr(self, 'unique_img_' + data_split)
self.data[self.__split + '_img_fnames'] = img_fnames[self.data_start_idx:self.data_end_idx]
# map from load to save labels
io_map = {
'ques_{}': '{}_ques',
'ques_length_{}': '{}_ques_len',
'ans_{}': '{}_ans',
'ans_length_{}': '{}_ans_len',
'img_pos_{}': '{}_img_pos',
'cap_{}': '{}_cap',
'cap_length_{}': '{}_cap_len',
'opt_{}': '{}_opt',
'opt_length_{}': '{}_opt_len',
'opt_list_{}': '{}_opt_list',
'num_rounds_{}': '{}_num_rounds',
'ans_index_{}': '{}_ans_ind'
}
# read the question, answer, option related information
for load_label, save_label in iteritems(io_map):
label = load_label.format(data_split)
if load_label.format(data_split) not in ques_file:
continue
if label.startswith('opt_list') or label.startswith('opt_length'):
if self.__version == '1.0' and self.__split == 'val':
label = load_label.format('test')
self.data[save_label.format(self.__split)] = torch.from_numpy(
np.array(ques_file[label], dtype='int64'))
else:
self.data[save_label.format(self.__split)] = torch.from_numpy(
np.array(ques_file[label][self.data_start_idx:self.data_end_idx], dtype='int64'))
ques_file.close()
# record some stats, will be transferred to encoder/decoder later
# assume similar stats across multiple data subsets
# maximum number of questions per image, ideally 10
self.max_ques_count = self.data[self.__split + '_ques'].size(1)
# maximum length of question
self.max_ques_len = self.data[self.__split + '_ques'].size(2)
# maximum length of answer
self.max_ans_len = self.data[self.__split + '_ans'].size(2)
print("[{0}] no. of data points: {1}".format(
self.__split, self.num_data_points))
print("\tMax no. of rounds: {}".format(self.max_ques_count))
print("\tMax ques len: {}".format(self.max_ques_len))
print("\tMax ans len: {}".format(self.max_ans_len))
# prepare history
self._process_history(self.__split)
# 1 indexed to 0 indexed
self.data[self.__split + '_opt'] -= 1
if self.__split + '_ans_ind' in self.data:
self.data[self.__split + '_ans_ind'] -= 1
@property
def split(self):
return self.__split
# ------------------------------------------------------------------------
# methods to override - __len__ and __getitem__ methods
# ------------------------------------------------------------------------
def __len__(self):
return self.num_data_points
def __getitem__(self, idx):
dtype = self.__split
item = {'index': idx}
item['num_rounds'] = self.data[dtype + '_num_rounds'][idx]
# get image features
if self.use_img_id_idx:
image_id = self.idx2imgid[idx]
item['image_id'] = torch.tensor(image_id).long()
if self.img_reader is None:
img_feats = self.img_feats[idx]
else:
img_feats = torch.tensor(self.img_reader[image_id])
if self.img_norm:
img_feats = F.normalize(img_feats, dim=0, p=2)
item['img_feat'] = img_feats
item['img_fnames'] = self.data[dtype + '_img_fnames'][idx]
# get question tokens
item['ques'] = self.data[dtype + '_ques'][idx]
item['ques_len'] = self.data[dtype + '_ques_len'][idx]
# get history tokens
item['hist_len'] = self.data[dtype + '_hist_len'][idx]
item['hist'] = self.data[dtype + '_hist'][idx]
# get caption tokens
item['cap'] = self.data[dtype + '_cap'][idx]
item['cap_len'] = self.data[dtype + '_cap_len'][idx]
# get answer tokens
item['ans'] = self.data[dtype + '_ans'][idx]
item['ans_len'] = self.data[dtype + '_ans_len'][idx]
# get options tokens
opt_inds = self.data[dtype + '_opt'][idx]
opt_size = list(opt_inds.size())
new_size = torch.Size(opt_size + [-1])
ind_vector = opt_inds.view(-1)
option_in = self.data[dtype + '_opt_list'].index_select(0, ind_vector)
option_in = option_in.view(new_size)
opt_len = self.data[dtype + '_opt_len'].index_select(0, ind_vector)
opt_len = opt_len.view(opt_size)
item['opt'] = option_in
item['opt_len'] = opt_len
if dtype != 'test':
ans_ind = self.data[dtype + '_ans_ind'][idx]
item['ans_ind'] = ans_ind.view(-1)
if dtype == 'val' and self.annotations_reader is not None:
dense_annotations = self.annotations_reader[image_id]
item['gt_relevance'] = torch.tensor(
dense_annotations["gt_relevance"]).float()
item['round_id'] = torch.tensor(
dense_annotations['round_id']).long()
# convert zero length sequences to one length
# this is for handling empty rounds of v1.0 test, they will be dropped anyway
if dtype == 'test':
item['ques_len'][item['ques_len'] == 0] += 1
item['opt_len'][item['opt_len'] == 0] += 1
item['hist_len'][item['hist_len'] == 0] += 1
return item
# -------------------------------------------------------------------------
# collate function utilized by dataloader for batching
# -------------------------------------------------------------------------
def collate_fn(self, batch):
dtype = self.__split
merged_batch = {key: [d[key] for d in batch] for key in batch[0]}
out = {}
for key in merged_batch:
if key in {'index', 'num_rounds', 'img_fnames'}:
out[key] = merged_batch[key]
elif key in {'cap_len'}:
out[key] = torch.Tensor(merged_batch[key]).long()
else:
out[key] = torch.stack(merged_batch[key], 0)
# Dynamic shaping of padded batch
out['hist'] = out['hist'][:, :, :torch.max(out['hist_len'])].contiguous()
out['ques'] = out['ques'][:, :, :torch.max(out['ques_len'])].contiguous()
out['ans'] = out['ans'][:, :, :torch.max(out['ans_len'])].contiguous()
out['cap'] = out['cap'][:, :torch.max(out['cap_len'])].contiguous()
out['opt'] = out['opt'][:, :, :, :torch.max(out['opt_len'])].contiguous()
batch_keys = ['num_rounds', 'img_feat', 'img_fnames', 'hist', 'hist_len', 'ques', 'ques_len',
'ans', 'ans_len', 'cap', 'cap_len', 'opt', 'opt_len']
if dtype != 'test':
batch_keys.append('ans_ind')
if dtype == 'val' and self.annotations_reader is not None:
batch_keys.append('gt_relevance')
batch_keys.append('round_id')
return {key: out[key] for key in batch_keys}
# -------------------------------------------------------------------------
# preprocessing functions
# -------------------------------------------------------------------------
def _process_history(self, dtype):
"""
Process caption as well as history. Optionally, concatenate history
for lf-encoder.
"""
captions = self.data[dtype + '_cap']
questions = self.data[dtype + '_ques']
ques_len = self.data[dtype + '_ques_len']
cap_len = self.data[dtype + '_cap_len']
max_ques_len = questions.size(2)
answers = self.data[dtype + '_ans']
ans_len = self.data[dtype + '_ans_len']
num_convs, num_rounds, max_ans_len = answers.size()
if self.args.concat_history:
self.max_hist_len = min(
num_rounds * (max_ques_len + max_ans_len), 300)
history = torch.zeros(num_convs, num_rounds,
self.max_hist_len).long()
else:
history = torch.zeros(num_convs, num_rounds,
max_ques_len + max_ans_len).long()
hist_len = torch.zeros(num_convs, num_rounds).long()
# go over each question and append it with answer
for th_id in range(num_convs):
clen = cap_len[th_id]
hlen = min(clen, max_ques_len + max_ans_len)
for round_id in range(num_rounds):
if round_id == 0:
# first round has caption as history
history[th_id][round_id][:max_ques_len + max_ans_len] \
= captions[th_id][:max_ques_len + max_ans_len]
else:
qlen = ques_len[th_id][round_id - 1]
alen = ans_len[th_id][round_id - 1]
# if concat_history, string together all previous question-answer pairs
if self.args.concat_history:
history[th_id][round_id][:hlen] = history[th_id][round_id - 1][:hlen]
history[th_id][round_id][hlen] = self.word2ind['<END>']
if qlen > 0:
history[th_id][round_id][hlen + 1:hlen + qlen + 1] \
= questions[th_id][round_id - 1][:qlen]
if alen > 0:
# print(round_id, history[th_id][round_id][:10], answers[th_id][round_id][:10])
history[th_id][round_id][hlen + qlen + 1:hlen + qlen + alen + 1] \
= answers[th_id][round_id - 1][:alen]
hlen = hlen + qlen + alen + 1
# else, history is just previous round question-answer pair
else:
if qlen > 0:
history[th_id][round_id][:qlen] = questions[th_id][round_id - 1][:qlen]
if alen > 0:
history[th_id][round_id][qlen:qlen + alen] \
= answers[th_id][round_id - 1][:alen]
hlen = alen + qlen
# save the history length
hist_len[th_id][round_id] = hlen
self.data[dtype + '_hist'] = history
self.data[dtype + '_hist_len'] = hist_len
|
[
"h5py.File",
"json.load",
"torch.stack",
"os.path.exists",
"vdgnn.dataset.readers.DenseAnnotationsReader",
"torch.zeros",
"vdgnn.dataset.readers.ImageFeaturesHdfReader",
"torch.Tensor",
"numpy.array",
"torch.max",
"torch.Size",
"torch.nn.functional.normalize",
"six.iteritems",
"os.path.join",
"torch.tensor"
] |
[((2280, 2323), 'os.path.join', 'os.path.join', (['args.dataroot', 'input_img_path'], {}), '(args.dataroot, input_img_path)\n', (2292, 2323), False, 'import os\n'), ((2350, 2398), 'os.path.join', 'os.path.join', (['args.dataroot', 'args.visdial_params'], {}), '(args.dataroot, args.visdial_params)\n', (2362, 2398), False, 'import os\n'), ((2425, 2471), 'os.path.join', 'os.path.join', (['args.dataroot', 'args.visdial_data'], {}), '(args.dataroot, args.visdial_data)\n', (2437, 2471), False, 'import os\n'), ((2619, 2670), 'os.path.join', 'os.path.join', (['args.dataroot', 'args.dense_annotations'], {}), '(args.dataroot, args.dense_annotations)\n', (2631, 2670), False, 'import os\n'), ((6029, 6060), 'h5py.File', 'h5py.File', (['self.input_ques', '"""r"""'], {}), "(self.input_ques, 'r')\n", (6038, 6060), False, 'import h5py\n'), ((6986, 7003), 'six.iteritems', 'iteritems', (['io_map'], {}), '(io_map)\n', (6995, 7003), False, 'from six import iteritems\n'), ((10476, 10503), 'torch.Size', 'torch.Size', (['(opt_size + [-1])'], {}), '(opt_size + [-1])\n', (10486, 10503), False, 'import torch\n'), ((3027, 3047), 'json.load', 'json.load', (['info_file'], {}), '(info_file)\n', (3036, 3047), False, 'import json\n'), ((3154, 3169), 'six.iteritems', 'iteritems', (['info'], {}), '(info)\n', (3163, 3169), False, 'from six import iteritems\n'), ((4559, 4625), 'vdgnn.dataset.readers.ImageFeaturesHdfReader', 'ImageFeaturesHdfReader', (['self.input_img'], {'in_memory': 'self.__in_memory'}), '(self.input_img, in_memory=self.__in_memory)\n', (4581, 4625), False, 'from vdgnn.dataset.readers import DenseAnnotationsReader, ImageFeaturesHdfReader\n'), ((5300, 5347), 'os.path.exists', 'os.path.exists', (['self.dense_annotations_jsonpath'], {}), '(self.dense_annotations_jsonpath)\n', (5314, 5347), False, 'import os\n'), ((5426, 5481), 'vdgnn.dataset.readers.DenseAnnotationsReader', 'DenseAnnotationsReader', (['self.dense_annotations_jsonpath'], {}), '(self.dense_annotations_jsonpath)\n', (5448, 5481), False, 'from vdgnn.dataset.readers import DenseAnnotationsReader, ImageFeaturesHdfReader\n'), ((9523, 9562), 'torch.tensor', 'torch.tensor', (['self.img_reader[image_id]'], {}), '(self.img_reader[image_id])\n', (9535, 9562), False, 'import torch\n'), ((9613, 9647), 'torch.nn.functional.normalize', 'F.normalize', (['img_feats'], {'dim': '(0)', 'p': '(2)'}), '(img_feats, dim=0, p=2)\n', (9624, 9647), True, 'import torch.nn.functional as F\n'), ((3807, 3831), 'six.iteritems', 'iteritems', (['self.word2ind'], {}), '(self.word2ind)\n', (3816, 3831), False, 'from six import iteritems\n'), ((4065, 4095), 'h5py.File', 'h5py.File', (['self.input_img', '"""r"""'], {}), "(self.input_img, 'r')\n", (4074, 4095), False, 'import h5py\n'), ((5009, 5039), 'h5py.File', 'h5py.File', (['self.input_img', '"""r"""'], {}), "(self.input_img, 'r')\n", (5018, 5039), False, 'import h5py\n'), ((5759, 5781), 'json.load', 'json.load', (['dialog_json'], {}), '(dialog_json)\n', (5768, 5781), False, 'import json\n'), ((14434, 14468), 'torch.zeros', 'torch.zeros', (['num_convs', 'num_rounds'], {}), '(num_convs, num_rounds)\n', (14445, 14468), False, 'import torch\n'), ((5193, 5252), 'numpy.array', 'np.array', (['img_feats_h5[self.img_start_idx:self.img_end_idx]'], {}), '(img_feats_h5[self.img_start_idx:self.img_end_idx])\n', (5201, 5252), True, 'import numpy as np\n'), ((7445, 7486), 'numpy.array', 'np.array', (['ques_file[label]'], {'dtype': '"""int64"""'}), "(ques_file[label], dtype='int64')\n", (7453, 7486), True, 'import numpy as np\n'), ((7601, 7686), 'numpy.array', 'np.array', (['ques_file[label][self.data_start_idx:self.data_end_idx]'], {'dtype': '"""int64"""'}), "(ques_file[label][self.data_start_idx:self.data_end_idx], dtype='int64'\n )\n", (7609, 7686), True, 'import numpy as np\n'), ((9375, 9397), 'torch.tensor', 'torch.tensor', (['image_id'], {}), '(image_id)\n', (9387, 9397), False, 'import torch\n'), ((11153, 11200), 'torch.tensor', 'torch.tensor', (["dense_annotations['gt_relevance']"], {}), "(dense_annotations['gt_relevance'])\n", (11165, 11200), False, 'import torch\n'), ((11257, 11300), 'torch.tensor', 'torch.tensor', (["dense_annotations['round_id']"], {}), "(dense_annotations['round_id'])\n", (11269, 11300), False, 'import torch\n'), ((12344, 12377), 'torch.stack', 'torch.stack', (['merged_batch[key]', '(0)'], {}), '(merged_batch[key], 0)\n', (12355, 12377), False, 'import torch\n'), ((14180, 14233), 'torch.zeros', 'torch.zeros', (['num_convs', 'num_rounds', 'self.max_hist_len'], {}), '(num_convs, num_rounds, self.max_hist_len)\n', (14191, 14233), False, 'import torch\n'), ((14311, 14373), 'torch.zeros', 'torch.zeros', (['num_convs', 'num_rounds', '(max_ques_len + max_ans_len)'], {}), '(num_convs, num_rounds, max_ques_len + max_ans_len)\n', (14322, 14373), False, 'import torch\n'), ((12260, 12291), 'torch.Tensor', 'torch.Tensor', (['merged_batch[key]'], {}), '(merged_batch[key])\n', (12272, 12291), False, 'import torch\n'), ((12462, 12488), 'torch.max', 'torch.max', (["out['hist_len']"], {}), "(out['hist_len'])\n", (12471, 12488), False, 'import torch\n'), ((12544, 12570), 'torch.max', 'torch.max', (["out['ques_len']"], {}), "(out['ques_len'])\n", (12553, 12570), False, 'import torch\n'), ((12624, 12649), 'torch.max', 'torch.max', (["out['ans_len']"], {}), "(out['ans_len'])\n", (12633, 12649), False, 'import torch\n'), ((12700, 12725), 'torch.max', 'torch.max', (["out['cap_len']"], {}), "(out['cap_len'])\n", (12709, 12725), False, 'import torch\n'), ((12783, 12808), 'torch.max', 'torch.max', (["out['opt_len']"], {}), "(out['opt_len'])\n", (12792, 12808), False, 'import torch\n')]
|
import matplotlib
matplotlib.use('TkAgg') # noqa
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cmocean
import numpy as np
import os
import ast
import pickle
import pandas as pd
from collections import defaultdict
from oggm import workflow, cfg, tasks, utils
from oggm.core.flowline import FileModel
from oggm.graphics import plot_centerlines
from relic.postprocessing import (mae_weighted, optimize_cov, calc_coverage,
get_ensemble_length, get_rcp_ensemble_length)
from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT
def paramplots(df, glid, pout, y_len=None):
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
allvars = ['prcp_scaling_factor', 'mbbias', 'glena_factor']
varcols = {'mbbias': np.array([-1400, -1200, -1000, -800, -600, -400, -200,
-100, 0, 100, 200, 400, 600, 800, 1000]),
'prcp_scaling_factor': np.arange(0.5, 4.1, 0.25),
'glena_factor': np.arange(1, 4.1, 0.5)}
for var, ax in zip(allvars, [ax1, ax2, ax3]):
notvars = allvars.copy()
notvars.remove(var)
# lets use OGGM HISTALP default
papar = {'glena_factor': 1.0, 'mbbias': 0, 'prcp_scaling_factor': 1.75}
# store specific runs
dfvar = pd.DataFrame([], columns=varcols[var], index=df.index)
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.isclose(para[notvars[0]],
papar[notvars[0]], atol=0.01)) and
(np.isclose(para[notvars[1]],
papar[notvars[1]], atol=0.01))):
dfvar.loc[:, para[var]] = df.loc[:, run]
if var == 'prcp_scaling_factor':
lbl = 'Precip scaling factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.deep))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.375, 4.2, 0.25)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'glena_factor':
lbl = 'Glen A factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.matter))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.75, 4.3, 0.5)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'mbbias':
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.balance))
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplist[128] = (0.412, 0.847, 0.655, 1.0)
cmap = mcolors.LinearSegmentedColormap.from_list('mcm', cmaplist,
cmap.N)
cbarticks = np.array([-1400, -1000, -600, -200,
0, 200, 600, 1000])
bounds = np.array([-1500, -1300, -1100, -900, -700, -500, -300,
-150, -50, 50, 100, 300, 500, 700, 900, 1100])
normalize = mcolors.Normalize(vmin=-1600,
vmax=1600)
lbl = 'MB bias [mm w.e.]'
colors = [cmap(normalize(n)) for n in varcols[var]]
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
cbaxes = inset_axes(ax, width="3%", height="40%", loc=3)
cbar = plt.colorbar(scalarmappaple, cax=cbaxes,
label=lbl,
boundaries=bounds)
cbar.set_ticks(cbarticks)
cbaxes.tick_params(axis='both', which='major', labelsize=16)
cbar.set_label(label=lbl, size=16)
# plot observations
df.loc[:, 'obs'].rolling(1, min_periods=1).mean(). \
plot(ax=ax, color='k', style='.',
marker='o', label='Observed length change',
markersize=6)
dfvar = dfvar.sort_index(axis=1)
# default parameter column
dc = np.where(dfvar.columns == papar[var])[0][0]
dfvar.loc[:, varcols[var][dc]].rolling(y_len, center=True).mean(). \
plot(ax=ax, color=colors[dc], linewidth=5,
label='{}: {} (OGGM default)'.
format(lbl, str(varcols[var][dc])))
# all parameters
nolbl = ['' for i in np.arange(len(dfvar.columns))]
dfvar.columns = nolbl
dfvar.rolling(y_len, center=True).mean().plot(ax=ax, color=colors,
linewidth=2)
ax.set_xlabel('Year', fontsize=26)
ax.set_xlim([1850, 2010])
ax.set_ylim([-4000, 2000])
ax.tick_params(axis='both', which='major', labelsize=22)
if not ax == ax1:
ax.set_yticklabels([])
ax.grid(True)
ax.set_xticks(np.arange(1880, 2010, 40))
ax.legend(fontsize=16, loc=2)
ax1.set_ylabel('relative length change [m]', fontsize=26)
name = name_plus_id(rgi_id)
fig1.suptitle('%s' % name, fontsize=28)
fig1.subplots_adjust(left=0.09, right=0.99, bottom=0.12, top=0.89,
wspace=0.05)
fn1 = os.path.join(pout, 'calibration_%s.png' % glid)
fig1.savefig(fn1)
def past_simulation_and_params(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig = plt.figure(figsize=[20, 7])
gs = GridSpec(1, 4) # 1 rows, 4 columns
ax1 = fig.add_subplot(gs[0, 0:3])
ax2 = fig.add_subplot(gs[0, 3])
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
df.loc[:, run].rolling(y_len, center=True). \
mean().plot(ax=ax1, linewidth=2, color='k',
label='OGGM default parameter run')
oggmdefault = run
maes = mae_weighted(df).sort_values()
idx2plot = optimize_cov(df.loc[:, maes.index[:150]],
df.loc[:, 'obs'], glid, minuse=5)
ensmean = df.loc[:, idx2plot].mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df.loc[:, idx2plot].std(axis=1).rolling(y_len,
center=True).mean()
# coverage
cov = calc_coverage(df, idx2plot, df['obs'])
ax1.fill_between(ensmeanmean.index, ensmeanmean - ensstdmean,
ensmeanmean + ensstdmean, color='xkcd:teal', alpha=0.5)
# nolbl = df.loc[:, idx2plot2].rolling(y_len, center=True).mean().copy()
# nolbl.columns = ['' for i in range(len(nolbl.columns))]
#df.loc[:, idx2plot2].rolling(y_len, center=True).mean().plot(
# ax=ax1, linewidth=0.8)
# plot ens members
ensmeanmean.plot(ax=ax1, linewidth=4.0, color='xkcd:teal',
label='ensemble parameters runs')
# reference run (basically min mae)
df.loc[:, maes.index[0]].rolling(y_len, center=True).mean(). \
plot(ax=ax1, linewidth=3, color='xkcd:lavender',
label='minimum wMAE parameter run')
name = name_plus_id(rgi_id)
mae_ens = mae_weighted(pd.concat([ensmean, df['obs']], axis=1))[0]
mae_best = maes[0]
ax1.set_title('%s' % name, fontsize=28)
ax1.text(2030, -4900, 'wMAE ensemble mean = %.2f m\n'
'wMAE minimum run = %.2f m' %
(mae_ens, mae_best), fontsize=18,
horizontalalignment='right')
ax1.text(2040, -4900, '%d ensemble members\n'
'coverage = %.2f' %
(len(idx2plot), cov), fontsize=18)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2020])
ax1.set_ylim([-3500, 1000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.1, -0.15), loc='upper left',
fontsize=18, ncol=2)
# parameter plots
from colorspace import sequential_hcl
col = sequential_hcl('Blue-Yellow').colors(len(idx2plot) + 3)
for i, run in enumerate(idx2plot):
para = ast.literal_eval('{' + run + '}')
psf = para['prcp_scaling_factor']
gla = para['glena_factor']
mbb = para['mbbias']
mbb = (mbb - -1400) * (4-0.5) / (1000 - -1400) + 0.5
ax2.plot([1, 2, 3], [psf, gla, mbb], color=col[i], linewidth=2)
ax2.set_xlabel('calibration parameters', fontsize=18)
ax2.set_ylabel('Precipitation scaling factor\nGlen A factor',
fontsize=18)
ax2.set_xlim([0.8, 3.2])
ax2.set_ylim([0.3, 4.2])
ax2.set_xticks([1, 2, 3])
ax2.set_xticklabels(['Psf', 'GlenA', 'MB bias'], fontsize=16)
ax2.tick_params(axis='y', which='major', labelsize=16)
ax2.grid(True)
ax3 = ax2.twinx()
# scale to same y lims
scale = (4.2-0.3)/(4.0-0.5)
dy = (2400*scale-2400)/2
ax3.set_ylim([-1400-dy, 1000+dy])
ax3.set_ylabel('mass balance bias [m w.e. ]', fontsize=18)
ax3.set_yticks(np.arange(-1400, 1100, 400))
ax3.set_yticklabels(['-1.4', '-1.0', '-0.6', '-0.2',
'0.2', '0.6', '1.0'])
ax3.tick_params(axis='both', which='major', labelsize=16)
fig.subplots_adjust(left=0.08, right=0.95, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'histalp_%s.png' % glid)
fig.savefig(fn1)
used = dict()
used['oggmdefault'] = oggmdefault
used['minmae'] = idx2plot[0]
used['ensemble'] = idx2plot
pickle.dump(used, open(os.path.join(pout, 'runs_%s.p' % glid), 'wb'))
def past_simulation_and_commitment(rgi, allobs, allmeta, histalp_storage,
comit_storage, comit_storage_noseed,
pout, y_len=5, comyears=300):
cols = ['xkcd:teal',
'xkcd:orange',
'xkcd:azure',
'xkcd:tomato',
'xkcd:blue',
'xkcd:chartreuse',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[:2015].index,
ensmeanmean.loc[:2015] - ensstdmean.loc[:2015],
ensmeanmean.loc[:2015] + ensstdmean.loc[:2015],
color=cols[0], alpha=0.5)
ensmeanmean.loc[:2015].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# 1999
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[1], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='Random climate (1984-2014)')
# 1970
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[5], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[5],
label='Random climate (1960-1980)')
# 1885
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[2], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='Random climate (1870-1900)')
# ---------------------------------------------------------------------
# plot commitment ensemble length
# 1984
efn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
edf99 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn99, meta)
ensmean = edf99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
postlength + poststd, postlength - poststd,
color=cols[3], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [postlength, postlength], linewidth=4.0,
color=cols[3],
label=('Random climate (1984-2014) '
'equlibrium length'))
# 1970
efn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
edf70 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn70, meta)
ensmean = edf70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[6],
label=('Random climate (1960-1980) '
'equlibrium length'))
# 1885
efn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
edf85 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn85, meta)
ensmean = edf85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[4], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[4],
label=('Random climate (1870-1900) '
'equlibrium length'))
# ---------------------------------------------------------------------
ylim = ax1.get_ylim()
#ax1.plot([2015, 2015], ylim, 'k-', linewidth=2)
ax1.set_xlim([1850, 2014+comyears+30])
#ax1.set_ylim(ylim)
ax2 = ax1.twinx()
ax2.set_ylabel('approximate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.set_xticks([1850, 1950, 2014, 2114, 2214, 2314])
ax1.set_xticklabels(['1850', '1950', '2014/0', '100', '200', '300'])
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.17), loc='upper left', fontsize=18,
ncol=3)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'commit_%s.png' % rgi)
fig.savefig(fn1)
def past_simulation_and_projection(rgi, allobs, allmeta, histalp_storage,
proj_storage, comit_storage,
pout, y_len=5,):
cols = ['xkcd:teal',
'xkcd:azure',
'xkcd:lime',
'xkcd:orange',
'xkcd:magenta',
'xkcd:tomato',
'xkcd:blue',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
dfall = pd.DataFrame([], index=np.arange(1850, 2101))
dfallstd = pd.DataFrame([], index=np.arange(1850, 2101))
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
dfrcp = get_rcp_ensemble_length(rgi, histalp_storage, proj_storage,
rcp, meta)
ensmean = dfrcp.mean(axis=1)
dfall.loc[:, rcp] = ensmean.rolling(y_len, center=True).mean()
dfallstd.loc[:, rcp] = dfrcp.std(axis=1).\
rolling(y_len, center=True).mean()
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ax1.fill_between(dfall.loc[:2015, rcp].index,
dfall.loc[:2015, rcp] - dfallstd.loc[:2015, rcp],
dfall.loc[:2015, rcp] + dfallstd.loc[:2015, rcp],
color=cols[0], alpha=0.5)
dfall.loc[:2015, rcp].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# projections
# rcp26
ax1.fill_between(dfall.loc[2015:, 'rcp26'].index,
dfall.loc[2015:, 'rcp26'] - dfallstd.loc[2015:, 'rcp26'],
dfall.loc[2015:, 'rcp26'] + dfallstd.loc[2015:, 'rcp26'],
color=cols[1], alpha=0.5)
dfall.loc[2015:, 'rcp26'].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='RCP 2.6 climate')
# rcp45
dfall.loc[2015:, 'rcp45'].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='RCP 4.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# rcp60
dfall.loc[2015:, 'rcp60'].plot(ax=ax1, linewidth=4.0, color=cols[3],
label='RCP 6.0 climate')
# rcp85
ax1.fill_between(dfall.loc[2015:, 'rcp85'].index,
dfall.loc[2015:, 'rcp85'] - dfallstd.loc[2015:, 'rcp85'],
dfall.loc[2015:, 'rcp85'] + dfallstd.loc[2015:, 'rcp85'],
color=cols[4], alpha=0.5)
dfall.loc[2015:, 'rcp85'].plot(ax=ax1, linewidth=4.0, color=cols[4],
label='RCP 8.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# plot commitment length
# 1984
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
postlength + poststd, postlength - poststd,
color=cols[5], alpha=0.5)
ax1.plot([2105.5, 2110.5], [postlength, postlength], linewidth=4.0,
color=cols[5],
label=('Random climate (1984-2014) '
'equilibrium length'))
# 1970
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[7], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[7],
label=('Random climate (1960-1980) '
'equilibrium length'))
# 1885
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[6],
label=('Random climate (1870-1900) '
'equilibrium length'))
ylim = ax1.get_ylim()
ax1.set_xlim([1850, 2112])
ax2 = ax1.twinx()
ax2.set_ylabel('apporixmate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(0.0, -0.17), loc='upper left', fontsize=18,
ncol=4)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'proj_%s.png' % rgi)
fig.savefig(fn1)
def get_mean_temps_eq(rgi, histalp_storage, comit_storage, ensmembers):
from oggm import cfg, utils, GlacierDirectory
from oggm.core.massbalance import MultipleFlowlineMassBalance
from oggm.core.flowline import FileModel
import shutil
# 1. get mean surface heights
df85 = pd.DataFrame([])
df99 = pd.DataFrame([])
for i in range(ensmembers):
fnc1 = os.path.join(comit_storage, rgi,
'model_run_commitment1885_{:02d}.nc'.format(i))
fnc2 = os.path.join(comit_storage, rgi,
'model_run_commitment1999_{:02d}.nc'.format(i))
tmpmod1 = FileModel(fnc1)
tmpmod2 = FileModel(fnc2)
for j in np.arange(270, 301):
tmpmod1.run_until(j)
df85.loc[:, '{}{}'.format(i, j)] = tmpmod1.fls[-1].surface_h
tmpmod2.run_until(j)
df99.loc[:, '{}{}'.format(i, j)] = tmpmod2.fls[-1].surface_h
meanhgt99 = df99.mean(axis=1).values
meanhgt85 = df85.mean(axis=1).values
# 2. get the climate
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
i = 0
storage_dir = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
new_dir = os.path.join(cfg.PATHS['working_dir'], 'per_glacier',
rgi[:8], rgi[:11], rgi)
shutil.copytree(storage_dir, new_dir)
gdir = GlacierDirectory(rgi)
mb = MultipleFlowlineMassBalance(gdir, filename='climate_monthly',
check_calib_params=False)
# need to do the above for every ensemble member if I consider PRECIP!
# and set cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']
df99_2 = pd.DataFrame()
df85_2 = pd.DataFrame()
for i in np.arange(9, 12):
for y in np.arange(1870, 1901):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt85,
flyear)[0]
df85_2.loc[y, i] = tmp.mean()
for y in np.arange(1984, 2015):
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt99,
flyear)[0]
df99_2.loc[y, i] = tmp.mean()
t99 = df99_2.mean().mean()
t85 = df85_2.mean().mean()
return t85, t99
def get_mean_temps_2k(rgi, return_prcp):
from oggm import cfg, utils, workflow, tasks
from oggm.core.massbalance import PastMassBalance
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
cfg.PARAMS['prcp_scaling_factor'] = 1.75
gdir = workflow.init_glacier_regions(rgidf=rgi.split('_')[0],
from_prepro_level=3,
prepro_border=10)[0]
# run histalp climate on glacier!
tasks.process_histalp_data(gdir)
f = gdir.get_filepath('climate_historical')
with utils.ncDataset(f) as nc:
refhgt = nc.ref_hgt
mb = PastMassBalance(gdir, check_calib_params=False)
df = pd.DataFrame()
df2 = pd.DataFrame()
for y in np.arange(1870, 2015):
for i in np.arange(9, 12):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.get_monthly_climate([refhgt], flyear)[0]
df.loc[y, i] = tmp.mean()
if return_prcp:
for i in np.arange(3, 6):
flyear = utils.date_to_floatyear(y, i)
pcp = mb.get_monthly_climate([refhgt], flyear)[3]
df2.loc[y, i] = tmp.mean()
t99 = df.loc[1984:2014, :].mean().mean()
t85 = df.loc[1870:1900, :].mean().mean()
t2k = df.loc[1900:2000, :].mean().mean()
if return_prcp:
p99 = df2.loc[1984:2014, :].mean().mean()
p85 = df2.loc[1870:1900, :].mean().mean()
p2k = df2.loc[1900:2000, :].mean().mean()
return t85, t99, t2k, p85, p99, p2k
return t85, t99, t2k
def get_absolute_length(y0, y1, rgi, df, storage):
rgipath = os.path.join(storage, rgi, '{:02d}'.format(0),
rgi[:8], rgi[:11], rgi)
mfile = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(0))
tmpmod = FileModel(mfile)
absL = tmpmod.length_m
deltaL = df.loc[int(tmpmod.yr.values), 0]
abs_y0 = absL + (y0 - deltaL)
abs_y1 = absL + (y1 - deltaL)
return abs_y0, abs_y1
def elevation_profiles(rgi, meta, histalp_storage, pout):
name = name_plus_id(rgi)
df1850 = pd.DataFrame()
df2003 = pd.DataFrame()
df2003b = pd.DataFrame()
dfbed = pd.DataFrame()
for i in np.arange(999):
# Local working directory (where OGGM will write its output)
rgipath = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
fn = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(i))
try:
tmpmod = FileModel(fn)
except FileNotFoundError:
break
df1850.loc[:, i] = tmpmod.fls[-1].surface_h
# get bed surface
dfbed.loc[:, i] = tmpmod.fls[-1].bed_h
# HISTALP surface
tmpmod.run_until(2003)
df2003.loc[:, i] = tmpmod.fls[-1].surface_h
df2003b.loc[:, i] = tmpmod.fls[-1].thick
# RGI init surface, once is enough
fn2 = os.path.join(histalp_storage, rgi, '00', rgi[:8], rgi[:11],
rgi, 'model_run_spinup_00.nc')
tmpmod2 = FileModel(fn2)
initsfc = tmpmod2.fls[-1].surface_h
# get distance on line
dx_meter = tmpmod.fls[-1].dx_meter
meanbed = dfbed.mean(axis=1).values
maxbed = dfbed.max(axis=1).values
minbed = dfbed.min(axis=1).values
# 1850
mean1850 = df1850.mean(axis=1).values
# where is mean glacier thinner than 1m
ix50 = np.where(mean1850-meanbed < 1)[0][0]
mean1850[ix50:] = np.nan
min1850 = df1850.min(axis=1).values
min1850[ix50:] = np.nan
min1850[min1850 <= meanbed] = meanbed[min1850 <= meanbed]
max1850 = df1850.max(axis=1).values
max1850[max1850 <= meanbed] = meanbed[max1850 <= meanbed]
# 2003
mean2003 = df2003.mean(axis=1).values
# where is mean glacier thinner than 1m
ix03 = np.where(mean2003-meanbed < 1)[0][0]
mean2003[ix03:] = np.nan
min2003 = df2003.min(axis=1).values
min2003[ix03:] = np.nan
min2003[min2003 <= meanbed] = meanbed[min2003 <= meanbed]
max2003 = df2003.max(axis=1).values
max2003[max2003 <= meanbed] = meanbed[max2003 <= meanbed]
lastx = np.where(initsfc-meanbed < 1)[0][0]
initsfc[lastx:] = np.nan
initsfc[lastx] = meanbed[lastx]
dis = np.arange(len(meanbed)) * dx_meter / 1000
xmax = sum(np.isfinite(mean1850))
ymax = np.nanmax(mean1850) + 50
ymin = minbed[np.where(np.isfinite(mean1850))].min() - 50
fig, ax = plt.subplots(1, figsize=[15, 9])
ax.fill_between(dis[:xmax+1], dis[:xmax+1] * 0 + ymin, minbed[:xmax+1],
color='0.7', alpha=0.5)
ax.fill_between(dis[:xmax+1], minbed[:xmax+1], maxbed[:xmax+1],
color='xkcd:tan', alpha=0.5)
ax.plot(dis[:xmax+1], meanbed[:xmax+1], 'k-', color='xkcd:tan',
linewidth=3, label='Glacier bed elevation [m]')
ax.fill_between(dis, min1850, max1850, color='xkcd:azure', alpha=0.5)
ax.plot(dis, mean1850, 'k-', color='xkcd:azure', linewidth=4,
label=('Surface elevation [m] year {:d}\n'
'(initialization state after spinup)'.
format(meta['first'])))
ax.fill_between(dis, min2003, max2003, color='xkcd:teal', alpha=0.5)
ax.plot(dis, mean2003, 'k-', color='xkcd:teal', linewidth=4,
label=('Surface elevation [m] year 2003\n'
'(from HISTALP ensemble simulations)'))
ax.plot(dis, initsfc, 'k-', color='xkcd:crimson', linewidth=4,
label=('Surface elevation [m] year 2003\n'
'(from RGI initialization)'))
ax.legend(loc=1, fontsize=20)
ax.set_ylim(ymin, ymax)
ax.set_xlim(0, dis[xmax])
ax.set_xlabel('Distance along major flowline [km]', fontsize=28)
ax.set_ylabel('Elevation [m a.s.l.]', fontsize=28)
ax.tick_params(axis='both', which='major', labelsize=26)
ax.grid(True)
ax.set_title(name, fontsize=30)
fig.tight_layout()
fn = os.path.join(pout, 'profile_%s' % rgi)
if ('3643' in rgi) or ('1450' in rgi) or ('2051' in rgi) or ('897' in rgi):
fig.savefig('{}.svg'.format(fn))
fig.savefig('{}.png'.format(fn))
def grey_madness(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig, ax1 = plt.subplots(figsize=[20, 7])
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
oggmdefault = run
break
nolbl = df.loc[:, df.columns != 'obs'].\
rolling(y_len, center=True).mean().copy()
nolbl.columns = ['' for i in range(len(nolbl.columns))]
nolbl.plot(ax=ax1, linewidth=0.8, color='0.7')
df.loc[:, oggmdefault].rolling(y_len, center=True).mean().plot(
ax=ax1, linewidth=0.8, color='0.7',
label='Every possible calibration parameter combination')
df.loc[:, oggmdefault].rolling(y_len, center=True).mean().\
plot(ax=ax1, color='k', linewidth=2,
label='OGGM default parameters')
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
name = name_plus_id(rgi_id)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2014])
ax1.set_ylim([-7500, 4000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.15), loc='upper left',
fontsize=18, ncol=2)
fig.subplots_adjust(left=0.09, right=0.99, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'all_%s.png' % glid)
fig.savefig(fn1)
def run_and_plot_merged_montmine(pout):
# Set-up
cfg.initialize(logging_level='WORKFLOW')
cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-merging',
reset=True)
# Use a suitable border size for your domain
cfg.PARAMS['border'] = 80
cfg.PARAMS['use_intersects'] = False
montmine = workflow.init_glacier_directories(['RGI60-11.02709'],
from_prepro_level=3)[0]
gdirs = workflow.init_glacier_directories(['RGI60-11.02709',
'RGI60-11.02715'],
from_prepro_level=3)
workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)
gdirs_merged = workflow.merge_glacier_tasks(gdirs, 'RGI60-11.02709',
return_all=False,
filename='climate_monthly',
buffer=2.5)
# plot centerlines
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 10])
plot_centerlines(montmine, ax=ax1, use_flowlines=True)
xt = ax1.get_xticks()
ax1.set_xticks(xt[::2])
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.set_title('entity glacier', fontsize=24)
plot_centerlines(gdirs_merged, ax=ax2, use_model_flowlines=True)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.set_title('merged with Glacier de Ferpecle', fontsize=24)
axs = fig.get_axes()
axs[3].remove()
axs[2].tick_params(axis='y', labelsize=16)
axs[2].set_ylabel('Altitude [m]', fontsize=18)
fig.suptitle('Glacier du Mont Mine', fontsize=24)
fig.subplots_adjust(left=0.04, right=0.99, bottom=0.08, top=0.89,
wspace=0.3)
fn = os.path.join(pout, 'merged_montmine.png')
fig.savefig(fn)
# run glaciers with negative t bias
# some model settings
years = 125
tbias = -1.5
# model Mont Mine glacier as entity and complile the output
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity',
temperature_bias=tbias)
ds_entity = utils.compile_run_output([montmine], path=False,
filesuffix='_entity')
# model the merged glacier and complile the output
tasks.run_constant_climate(gdirs_merged, nyears=years,
output_filesuffix='_merged',
temperature_bias=tbias,
climate_filename='climate_monthly')
ds_merged = utils.compile_run_output([gdirs_merged], path=False,
filesuffix='_merged')
#
# bring them to same size again
tbias = -2.2
years = 125
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity1',
temperature_bias=tbias)
ds_entity1 = utils.compile_run_output([montmine], path=False,
filesuffix='_entity1')
# and let them shrink again
# some model settings
tbias = -0.5
years = 100
# load the previous entity run
tmp_mine = FileModel(
montmine.get_filepath('model_run', filesuffix='_entity1'))
tmp_mine.run_until(years)
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity2',
init_model_fls=tmp_mine.fls,
temperature_bias=tbias)
ds_entity2 = utils.compile_run_output([montmine], path=False,
filesuffix='_entity2')
# model the merged glacier and complile the output
tmp_merged = FileModel(
gdirs_merged.get_filepath('model_run', filesuffix='_merged'))
tmp_merged.run_until(years)
tasks.run_constant_climate(gdirs_merged, nyears=years,
output_filesuffix='_merged2',
init_model_fls=tmp_merged.fls,
temperature_bias=tbias,
climate_filename='climate_monthly')
ds_merged2 = utils.compile_run_output([gdirs_merged], path=False,
filesuffix='_merged2')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 7])
dse = ds_entity.length.to_series().rolling(5, center=True).mean()
dsm = ds_merged.length.to_series().rolling(5, center=True).mean()
ax1.plot(dse.values, 'C1', label='Entity glacier', linewidth=3)
ax1.plot(dsm.values, 'C2', label='Merged glacier', linewidth=3)
ax1.set_xlabel('Simulation time [yr]', fontsize=20)
ax1.set_ylabel('Glacier length[m]', fontsize=20)
ax1.grid(True)
ax1.legend(loc=2, fontsize=18)
dse2 = ds_entity2.length.to_series().rolling(5, center=True).mean()
dsm2 = ds_merged2.length.to_series().rolling(5, center=True).mean()
ax2.plot(dse2.values, 'C1', label='Entity glacier', linewidth=3)
ax2.plot(dsm2.values, 'C2', label='Merged glacier', linewidth=3)
ax2.set_xlabel('Simulation time [yr]', fontsize=22)
ax2.set_ylabel('Glacier length [m]', fontsize=22)
ax2.grid(True)
ax2.legend(loc=1, fontsize=18)
ax1.set_xlim([0, 120])
ax2.set_xlim([0, 100])
ax1.set_ylim([7500, 12000])
ax2.set_ylim([7500, 12000])
ax1.tick_params(axis='both', which='major', labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
fig.subplots_adjust(left=0.08, right=0.96, bottom=0.11, top=0.93,
wspace=0.3)
fn = os.path.join(pout, 'merged_montmine_timeseries.png')
fig.savefig(fn)
def climate_vs_lengthchange(dfout, pout):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=[20, 15])
ost = dfout.loc[dfout['lon'] >= 9.5]
west = dfout.loc[dfout['lon'] < 9.5]
# ax1: temp, winter
ost.plot.scatter(x='dl 1885-1970', y='dt win', color='C1',
ax=ax1, s=80, label='Temp. Oct-Apr (East)')
ost.plot.scatter(x='dl 1885-1970', y='dt djf', color='C3',
ax=ax1, s=80, label='Temp. DJF (East)')
west.plot.scatter(x='dl 1885-1970', y='dt win', color='C2', marker='s',
ax=ax1, s=80, label='Temp. Oct-Apr (West)')
west.plot.scatter(x='dl 1885-1970', y='dt djf', color='C4', marker='s',
ax=ax1, s=80, label='Temp. DJF (West)')
# ax2: temp, sommer
ost.plot.scatter(x='dl 1885-1970', y='dt som', color='C1',
ax=ax2, s=80, label='Temp. Mai-Sep (East)')
ost.plot.scatter(x='dl 1885-1970', y='dt jja', color='C3',
ax=ax2, s=80, label='Temp. JJA (East)')
west.plot.scatter(x='dl 1885-1970', y='dt som', color='C2', marker='s',
ax=ax2, s=80, label='Temp. Mai-Sep (West)')
west.plot.scatter(x='dl 1885-1970', y='dt jja', color='C4', marker='s',
ax=ax2, s=80, label='Temp. JJA (West)')
# ax3: pcp, winter
west.plot.scatter(x='dl 1885-1970', y='dp win', color='C2', marker='s',
ax=ax3, s=80, label='Prcp. Oct-Apr (West)')
west.plot.scatter(x='dl 1885-1970', y='dp djf', color='C4', marker='s',
ax=ax3, s=80, label='Prcp. DJF (West)')
ost.plot.scatter(x='dl 1885-1970', y='dp win', color='C1',
ax=ax3, s=80, label='Prcp. Oct-Apr (East)')
ost.plot.scatter(x='dl 1885-1970', y='dp djf', color='C3',
ax=ax3, s=80, label='Prcp. DJF (East)')
# ax4: pcp, sommer
west.plot.scatter(x='dl 1885-1970', y='dp jja', color='C4', marker='s',
ax=ax4, s=80, label='Prcp. JJA (West)')
west.plot.scatter(x='dl 1885-1970', y='dp som', color='C2', marker='s',
ax=ax4, s=80, label='Prcp. Mai-Sep (West)')
ost.plot.scatter(x='dl 1885-1970', y='dp jja', color='C3',
ax=ax4, s=80, label='Prcp. JJA (East)')
ost.plot.scatter(x='dl 1885-1970', y='dp som', color='C1',
ax=ax4, s=80, label='Prcp. Mai-Sep (East)')
ax4.set_xlabel(('Equilibrium length difference\nbetween 1870-1900 '
'and 1960-1980 climate'), fontsize=20)
ax3.set_xlabel(('Equilibrium length difference\nbetween 1870-1900 '
'and 1960-1980 climate'), fontsize=20)
ax1.set_ylabel(('Temperature difference between\n 1870-1900 and '
'1960-1980 climate'), fontsize=20)
ax3.set_ylabel(('Precipitation difference between\n 1870-1900 and '
'1960-1980 climate'), fontsize=20)
ax2.set_ylabel('')
ax4.set_ylabel('')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax1.set_ylim([-1.0, 0.2])
ax2.set_ylim([-1.0, 0.2])
ax3.set_ylim([-350, 50])
ax4.set_ylim([-350, 50])
for ax in [ax1, ax2, ax3, ax4]:
ax.grid(True)
ax.legend(loc=3, ncol=2, fontsize=18)
ax.set_xlim([-4, 2])
ax.tick_params(axis='both', which='major', labelsize=20)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.93,
wspace=0.2, hspace=0.2)
fig.savefig(os.path.join(pout, 'climate_vs_length.png'))
def histogram(pin, pout):
glena = defaultdict(int)
mbbias = defaultdict(int)
prcpsf = defaultdict(int)
for glc in GLCDICT.keys():
glid = str(glc)
if MERGEDICT.get(glc):
glid += '_merged'
rundictpath = os.path.join(pin, 'runs_%s.p' % glid)
rundict = pickle.load(open(rundictpath, 'rb'))
ens = rundict['ensemble']
for run in ens:
para = ast.literal_eval('{' + run + '}')
prcpsf[para['prcp_scaling_factor']] += 1
glena[para['glena_factor']] += 1
mbbias[para['mbbias']] += 1
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
ax1.bar(list(glena.keys()), glena.values(), width=0.4)
ax1.set_xlabel('Glen A factor', fontsize=22)
ax1.set_ylabel('# used in ensemble', fontsize=22)
ax2.bar(list(prcpsf.keys()), prcpsf.values(), width=0.2)
ax2.set_xlabel('Prcp SF factor', fontsize=22)
ax2.set_ylabel('# used in ensemble', fontsize=22)
ax3.bar(list(mbbias.keys()), mbbias.values(), width=150)
ax3.set_xlabel('MB bias', fontsize=22)
ax3.set_ylabel('# used in ensemble', fontsize=22)
for ax in [ax1, ax2, ax3]:
ax.tick_params(axis='both', which='major', labelsize=20)
ax.grid(True)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.93,
wspace=0.2, hspace=0.2)
fig.savefig(os.path.join(pout, 'histo.png'))
|
[
"numpy.abs",
"oggm.cfg.initialize",
"relic.preprocessing.GLCDICT.keys",
"relic.postprocessing.get_ensemble_length",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"mpl_toolkits.axes_grid1.inset_locator.inset_axes",
"numpy.arange",
"oggm.utils.ncDataset",
"numpy.isclose",
"relic.preprocessing.name_plus_id",
"oggm.workflow.merge_glacier_tasks",
"os.path.join",
"oggm.tasks.run_constant_climate",
"pandas.DataFrame",
"relic.postprocessing.mae_weighted",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.colors.Normalize",
"matplotlib.cm.ScalarMappable",
"numpy.isfinite",
"matplotlib.pyplot.colorbar",
"relic.postprocessing.optimize_cov",
"relic.postprocessing.get_rcp_ensemble_length",
"oggm.core.massbalance.MultipleFlowlineMassBalance",
"relic.postprocessing.calc_coverage",
"oggm.core.massbalance.PastMassBalance",
"matplotlib.pyplot.subplots",
"pandas.concat",
"oggm.core.flowline.FileModel",
"ast.literal_eval",
"oggm.tasks.process_histalp_data",
"cmocean.tools.get_dict",
"matplotlib.gridspec.GridSpec",
"matplotlib.use",
"numpy.nanmax",
"oggm.workflow.execute_entity_task",
"oggm.utils.compile_run_output",
"relic.preprocessing.MERGEDICT.get",
"oggm.utils.date_to_floatyear",
"oggm.GlacierDirectory",
"oggm.workflow.init_glacier_directories",
"numpy.where",
"numpy.array",
"colorspace.sequential_hcl",
"oggm.utils.gettempdir",
"shutil.copytree",
"oggm.utils.mkdir",
"oggm.graphics.plot_centerlines"
] |
[((19, 42), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (33, 42), False, 'import matplotlib\n'), ((923, 958), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '[20, 7]'}), '(1, 3, figsize=[20, 7])\n', (935, 958), True, 'import matplotlib.pyplot as plt\n'), ((5423, 5443), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi_id'], {}), '(rgi_id)\n', (5435, 5443), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((5607, 5654), 'os.path.join', 'os.path.join', (['pout', "('calibration_%s.png' % glid)"], {}), "(pout, 'calibration_%s.png' % glid)\n", (5619, 5654), False, 'import os\n'), ((11319, 11387), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn99', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn99, meta)\n', (11338, 11387), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((11455, 11523), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn85', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn85, meta)\n', (11474, 11523), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((11591, 11659), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn70', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn70, meta)\n', (11610, 11659), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((11687, 11719), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '[20, 7]'}), '(1, figsize=[20, 7])\n', (11699, 11719), True, 'import matplotlib.pyplot as plt\n'), ((14065, 14141), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage_noseed', 'efn99', 'meta'], {}), '(rgi, histalp_storage, comit_storage_noseed, efn99, meta)\n', (14084, 14141), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((14915, 14991), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage_noseed', 'efn70', 'meta'], {}), '(rgi, histalp_storage, comit_storage_noseed, efn70, meta)\n', (14934, 14991), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((15769, 15845), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage_noseed', 'efn85', 'meta'], {}), '(rgi, histalp_storage, comit_storage_noseed, efn85, meta)\n', (15788, 15845), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((17046, 17063), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi'], {}), '(rgi)\n', (17058, 17063), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((17638, 17679), 'os.path.join', 'os.path.join', (['pout', "('commit_%s.png' % rgi)"], {}), "(pout, 'commit_%s.png' % rgi)\n", (17650, 17679), False, 'import os\n'), ((18735, 18767), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '[20, 7]'}), '(1, figsize=[20, 7])\n', (18747, 18767), True, 'import matplotlib.pyplot as plt\n'), ((20598, 20666), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn99', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn99, meta)\n', (20617, 20666), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((21357, 21425), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn70', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn70, meta)\n', (21376, 21425), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((22107, 22175), 'relic.postprocessing.get_ensemble_length', 'get_ensemble_length', (['rgi', 'histalp_storage', 'comit_storage', 'fn85', 'meta'], {}), '(rgi, histalp_storage, comit_storage, fn85, meta)\n', (22126, 22175), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((23116, 23133), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi'], {}), '(rgi)\n', (23128, 23133), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((23578, 23617), 'os.path.join', 'os.path.join', (['pout', "('proj_%s.png' % rgi)"], {}), "(pout, 'proj_%s.png' % rgi)\n", (23590, 23617), False, 'import os\n'), ((23939, 23955), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (23951, 23955), True, 'import pandas as pd\n'), ((23967, 23983), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (23979, 23983), True, 'import pandas as pd\n'), ((24717, 24733), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (24731, 24733), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((24743, 24771), 'oggm.utils.gettempdir', 'utils.gettempdir', ([], {'reset': '(True)'}), '(reset=True)\n', (24759, 24771), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((24810, 24837), 'oggm.utils.mkdir', 'utils.mkdir', (['wd'], {'reset': '(True)'}), '(wd, reset=True)\n', (24821, 24837), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((25112, 25189), 'os.path.join', 'os.path.join', (["cfg.PATHS['working_dir']", '"""per_glacier"""', 'rgi[:8]', 'rgi[:11]', 'rgi'], {}), "(cfg.PATHS['working_dir'], 'per_glacier', rgi[:8], rgi[:11], rgi)\n", (25124, 25189), False, 'import os\n'), ((25221, 25258), 'shutil.copytree', 'shutil.copytree', (['storage_dir', 'new_dir'], {}), '(storage_dir, new_dir)\n', (25236, 25258), False, 'import shutil\n'), ((25270, 25291), 'oggm.GlacierDirectory', 'GlacierDirectory', (['rgi'], {}), '(rgi)\n', (25286, 25291), False, 'from oggm import cfg, utils, GlacierDirectory\n'), ((25301, 25392), 'oggm.core.massbalance.MultipleFlowlineMassBalance', 'MultipleFlowlineMassBalance', (['gdir'], {'filename': '"""climate_monthly"""', 'check_calib_params': '(False)'}), "(gdir, filename='climate_monthly',\n check_calib_params=False)\n", (25328, 25392), False, 'from oggm.core.massbalance import MultipleFlowlineMassBalance\n'), ((25594, 25608), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (25606, 25608), True, 'import pandas as pd\n'), ((25622, 25636), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (25634, 25636), True, 'import pandas as pd\n'), ((25650, 25666), 'numpy.arange', 'np.arange', (['(9)', '(12)'], {}), '(9, 12)\n', (25659, 25666), True, 'import numpy as np\n'), ((26439, 26455), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (26453, 26455), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((26465, 26493), 'oggm.utils.gettempdir', 'utils.gettempdir', ([], {'reset': '(True)'}), '(reset=True)\n', (26481, 26493), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((26532, 26559), 'oggm.utils.mkdir', 'utils.mkdir', (['wd'], {'reset': '(True)'}), '(wd, reset=True)\n', (26543, 26559), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((26959, 26991), 'oggm.tasks.process_histalp_data', 'tasks.process_histalp_data', (['gdir'], {}), '(gdir)\n', (26985, 26991), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((27114, 27161), 'oggm.core.massbalance.PastMassBalance', 'PastMassBalance', (['gdir'], {'check_calib_params': '(False)'}), '(gdir, check_calib_params=False)\n', (27129, 27161), False, 'from oggm.core.massbalance import PastMassBalance\n'), ((27172, 27186), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27184, 27186), True, 'import pandas as pd\n'), ((27197, 27211), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27209, 27211), True, 'import pandas as pd\n'), ((27226, 27247), 'numpy.arange', 'np.arange', (['(1870)', '(2015)'], {}), '(1870, 2015)\n', (27235, 27247), True, 'import numpy as np\n'), ((28292, 28308), 'oggm.core.flowline.FileModel', 'FileModel', (['mfile'], {}), '(mfile)\n', (28301, 28308), False, 'from oggm.core.flowline import FileModel\n'), ((28550, 28567), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi'], {}), '(rgi)\n', (28562, 28567), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((28582, 28596), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28594, 28596), True, 'import pandas as pd\n'), ((28610, 28624), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28622, 28624), True, 'import pandas as pd\n'), ((28639, 28653), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28651, 28653), True, 'import pandas as pd\n'), ((28666, 28680), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28678, 28680), True, 'import pandas as pd\n'), ((28695, 28709), 'numpy.arange', 'np.arange', (['(999)'], {}), '(999)\n', (28704, 28709), True, 'import numpy as np\n'), ((29421, 29515), 'os.path.join', 'os.path.join', (['histalp_storage', 'rgi', '"""00"""', 'rgi[:8]', 'rgi[:11]', 'rgi', '"""model_run_spinup_00.nc"""'], {}), "(histalp_storage, rgi, '00', rgi[:8], rgi[:11], rgi,\n 'model_run_spinup_00.nc')\n", (29433, 29515), False, 'import os\n'), ((29549, 29563), 'oggm.core.flowline.FileModel', 'FileModel', (['fn2'], {}), '(fn2)\n', (29558, 29563), False, 'from oggm.core.flowline import FileModel\n'), ((30924, 30956), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '[15, 9]'}), '(1, figsize=[15, 9])\n', (30936, 30956), True, 'import matplotlib.pyplot as plt\n'), ((32410, 32448), 'os.path.join', 'os.path.join', (['pout', "('profile_%s' % rgi)"], {}), "(pout, 'profile_%s' % rgi)\n", (32422, 32448), False, 'import os\n'), ((34616, 34656), 'oggm.cfg.initialize', 'cfg.initialize', ([], {'logging_level': '"""WORKFLOW"""'}), "(logging_level='WORKFLOW')\n", (34630, 34656), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((34688, 34740), 'oggm.utils.gettempdir', 'utils.gettempdir', ([], {'dirname': '"""OGGM-merging"""', 'reset': '(True)'}), "(dirname='OGGM-merging', reset=True)\n", (34704, 34740), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((35065, 35161), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (["['RGI60-11.02709', 'RGI60-11.02715']"], {'from_prepro_level': '(3)'}), "(['RGI60-11.02709', 'RGI60-11.02715'],\n from_prepro_level=3)\n", (35098, 35161), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((35255, 35323), 'oggm.workflow.execute_entity_task', 'workflow.execute_entity_task', (['tasks.init_present_time_glacier', 'gdirs'], {}), '(tasks.init_present_time_glacier, gdirs)\n', (35283, 35323), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((35343, 35458), 'oggm.workflow.merge_glacier_tasks', 'workflow.merge_glacier_tasks', (['gdirs', '"""RGI60-11.02709"""'], {'return_all': '(False)', 'filename': '"""climate_monthly"""', 'buffer': '(2.5)'}), "(gdirs, 'RGI60-11.02709', return_all=False,\n filename='climate_monthly', buffer=2.5)\n", (35371, 35458), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((35645, 35681), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[20, 10]'}), '(1, 2, figsize=[20, 10])\n', (35657, 35681), True, 'import matplotlib.pyplot as plt\n'), ((35686, 35740), 'oggm.graphics.plot_centerlines', 'plot_centerlines', (['montmine'], {'ax': 'ax1', 'use_flowlines': '(True)'}), '(montmine, ax=ax1, use_flowlines=True)\n', (35702, 35740), False, 'from oggm.graphics import plot_centerlines\n'), ((35912, 35976), 'oggm.graphics.plot_centerlines', 'plot_centerlines', (['gdirs_merged'], {'ax': 'ax2', 'use_model_flowlines': '(True)'}), '(gdirs_merged, ax=ax2, use_model_flowlines=True)\n', (35928, 35976), False, 'from oggm.graphics import plot_centerlines\n'), ((36420, 36461), 'os.path.join', 'os.path.join', (['pout', '"""merged_montmine.png"""'], {}), "(pout, 'merged_montmine.png')\n", (36432, 36461), False, 'import os\n'), ((36651, 36759), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['montmine'], {'nyears': 'years', 'output_filesuffix': '"""_entity"""', 'temperature_bias': 'tbias'}), "(montmine, nyears=years, output_filesuffix=\n '_entity', temperature_bias=tbias)\n", (36677, 36759), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((36833, 36903), 'oggm.utils.compile_run_output', 'utils.compile_run_output', (['[montmine]'], {'path': '(False)', 'filesuffix': '"""_entity"""'}), "([montmine], path=False, filesuffix='_entity')\n", (36857, 36903), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((37005, 37153), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['gdirs_merged'], {'nyears': 'years', 'output_filesuffix': '"""_merged"""', 'temperature_bias': 'tbias', 'climate_filename': '"""climate_monthly"""'}), "(gdirs_merged, nyears=years, output_filesuffix=\n '_merged', temperature_bias=tbias, climate_filename='climate_monthly')\n", (37031, 37153), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((37258, 37332), 'oggm.utils.compile_run_output', 'utils.compile_run_output', (['[gdirs_merged]'], {'path': '(False)', 'filesuffix': '"""_merged"""'}), "([gdirs_merged], path=False, filesuffix='_merged')\n", (37282, 37332), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((37455, 37564), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['montmine'], {'nyears': 'years', 'output_filesuffix': '"""_entity1"""', 'temperature_bias': 'tbias'}), "(montmine, nyears=years, output_filesuffix=\n '_entity1', temperature_bias=tbias)\n", (37481, 37564), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((37639, 37710), 'oggm.utils.compile_run_output', 'utils.compile_run_output', (['[montmine]'], {'path': '(False)', 'filesuffix': '"""_entity1"""'}), "([montmine], path=False, filesuffix='_entity1')\n", (37663, 37710), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((38009, 38147), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['montmine'], {'nyears': 'years', 'output_filesuffix': '"""_entity2"""', 'init_model_fls': 'tmp_mine.fls', 'temperature_bias': 'tbias'}), "(montmine, nyears=years, output_filesuffix=\n '_entity2', init_model_fls=tmp_mine.fls, temperature_bias=tbias)\n", (38035, 38147), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((38253, 38324), 'oggm.utils.compile_run_output', 'utils.compile_run_output', (['[montmine]'], {'path': '(False)', 'filesuffix': '"""_entity2"""'}), "([montmine], path=False, filesuffix='_entity2')\n", (38277, 38324), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((38558, 38742), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['gdirs_merged'], {'nyears': 'years', 'output_filesuffix': '"""_merged2"""', 'init_model_fls': 'tmp_merged.fls', 'temperature_bias': 'tbias', 'climate_filename': '"""climate_monthly"""'}), "(gdirs_merged, nyears=years, output_filesuffix=\n '_merged2', init_model_fls=tmp_merged.fls, temperature_bias=tbias,\n climate_filename='climate_monthly')\n", (38584, 38742), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((38875, 38950), 'oggm.utils.compile_run_output', 'utils.compile_run_output', (['[gdirs_merged]'], {'path': '(False)', 'filesuffix': '"""_merged2"""'}), "([gdirs_merged], path=False, filesuffix='_merged2')\n", (38899, 38950), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((39016, 39051), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[20, 7]'}), '(1, 2, figsize=[20, 7])\n', (39028, 39051), True, 'import matplotlib.pyplot as plt\n'), ((40299, 40351), 'os.path.join', 'os.path.join', (['pout', '"""merged_montmine_timeseries.png"""'], {}), "(pout, 'merged_montmine_timeseries.png')\n", (40311, 40351), False, 'import os\n'), ((40452, 40488), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '[20, 15]'}), '(2, 2, figsize=[20, 15])\n', (40464, 40488), True, 'import matplotlib.pyplot as plt\n'), ((43948, 43964), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (43959, 43964), False, 'from collections import defaultdict\n'), ((43978, 43994), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (43989, 43994), False, 'from collections import defaultdict\n'), ((44008, 44024), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (44019, 44024), False, 'from collections import defaultdict\n'), ((44041, 44055), 'relic.preprocessing.GLCDICT.keys', 'GLCDICT.keys', ([], {}), '()\n', (44053, 44055), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((44534, 44569), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '[20, 7]'}), '(1, 3, figsize=[20, 7])\n', (44546, 44569), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1149), 'numpy.array', 'np.array', (['[-1400, -1200, -1000, -800, -600, -400, -200, -100, 0, 100, 200, 400, 600, \n 800, 1000]'], {}), '([-1400, -1200, -1000, -800, -600, -400, -200, -100, 0, 100, 200, \n 400, 600, 800, 1000])\n', (1057, 1149), True, 'import numpy as np\n'), ((1219, 1244), 'numpy.arange', 'np.arange', (['(0.5)', '(4.1)', '(0.25)'], {}), '(0.5, 4.1, 0.25)\n', (1228, 1244), True, 'import numpy as np\n'), ((1277, 1299), 'numpy.arange', 'np.arange', (['(1)', '(4.1)', '(0.5)'], {}), '(1, 4.1, 0.5)\n', (1286, 1299), True, 'import numpy as np\n'), ((1581, 1635), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'varcols[var]', 'index': 'df.index'}), '([], columns=varcols[var], index=df.index)\n', (1593, 1635), True, 'import pandas as pd\n'), ((3748, 3792), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'normalize', 'cmap': 'cmap'}), '(norm=normalize, cmap=cmap)\n', (3765, 3792), True, 'import matplotlib.cm as cm\n'), ((3810, 3857), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '"""3%"""', 'height': '"""40%"""', 'loc': '(3)'}), "(ax, width='3%', height='40%', loc=3)\n", (3820, 3857), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((3873, 3943), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['scalarmappaple'], {'cax': 'cbaxes', 'label': 'lbl', 'boundaries': 'bounds'}), '(scalarmappaple, cax=cbaxes, label=lbl, boundaries=bounds)\n', (3885, 3943), True, 'import matplotlib.pyplot as plt\n'), ((5863, 5890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 7]'}), '(figsize=[20, 7])\n', (5873, 5890), True, 'import matplotlib.pyplot as plt\n'), ((5905, 5919), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(1)', '(4)'], {}), '(1, 4)\n', (5913, 5919), False, 'from matplotlib.gridspec import GridSpec\n'), ((6764, 6839), 'relic.postprocessing.optimize_cov', 'optimize_cov', (['df.loc[:, maes.index[:150]]', "df.loc[:, 'obs']", 'glid'], {'minuse': '(5)'}), "(df.loc[:, maes.index[:150]], df.loc[:, 'obs'], glid, minuse=5)\n", (6776, 6839), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((7173, 7211), 'relic.postprocessing.calc_coverage', 'calc_coverage', (['df', 'idx2plot', "df['obs']"], {}), "(df, idx2plot, df['obs'])\n", (7186, 7211), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((8019, 8039), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi_id'], {}), '(rgi_id)\n', (8031, 8039), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((10468, 10511), 'os.path.join', 'os.path.join', (['pout', "('histalp_%s.png' % glid)"], {}), "(pout, 'histalp_%s.png' % glid)\n", (10480, 10511), False, 'import os\n'), ((18390, 18460), 'relic.postprocessing.get_rcp_ensemble_length', 'get_rcp_ensemble_length', (['rgi', 'histalp_storage', 'proj_storage', 'rcp', 'meta'], {}), '(rgi, histalp_storage, proj_storage, rcp, meta)\n', (18413, 18460), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((24282, 24297), 'oggm.core.flowline.FileModel', 'FileModel', (['fnc1'], {}), '(fnc1)\n', (24291, 24297), False, 'from oggm.core.flowline import FileModel\n'), ((24316, 24331), 'oggm.core.flowline.FileModel', 'FileModel', (['fnc2'], {}), '(fnc2)\n', (24325, 24331), False, 'from oggm.core.flowline import FileModel\n'), ((24349, 24368), 'numpy.arange', 'np.arange', (['(270)', '(301)'], {}), '(270, 301)\n', (24358, 24368), True, 'import numpy as np\n'), ((25685, 25706), 'numpy.arange', 'np.arange', (['(1870)', '(1901)'], {}), '(1870, 1901)\n', (25694, 25706), True, 'import numpy as np\n'), ((25968, 25989), 'numpy.arange', 'np.arange', (['(1984)', '(2015)'], {}), '(1984, 2015)\n', (25977, 25989), True, 'import numpy as np\n'), ((27050, 27068), 'oggm.utils.ncDataset', 'utils.ncDataset', (['f'], {}), '(f)\n', (27065, 27068), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((27266, 27282), 'numpy.arange', 'np.arange', (['(9)', '(12)'], {}), '(9, 12)\n', (27275, 27282), True, 'import numpy as np\n'), ((30788, 30809), 'numpy.isfinite', 'np.isfinite', (['mean1850'], {}), '(mean1850)\n', (30799, 30809), True, 'import numpy as np\n'), ((30822, 30841), 'numpy.nanmax', 'np.nanmax', (['mean1850'], {}), '(mean1850)\n', (30831, 30841), True, 'import numpy as np\n'), ((32784, 32813), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[20, 7]'}), '(figsize=[20, 7])\n', (32796, 32813), True, 'import matplotlib.pyplot as plt\n'), ((33913, 33933), 'relic.preprocessing.name_plus_id', 'name_plus_id', (['rgi_id'], {}), '(rgi_id)\n', (33925, 33933), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((34492, 34531), 'os.path.join', 'os.path.join', (['pout', "('all_%s.png' % glid)"], {}), "(pout, 'all_%s.png' % glid)\n", (34504, 34531), False, 'import os\n'), ((34925, 34999), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (["['RGI60-11.02709']"], {'from_prepro_level': '(3)'}), "(['RGI60-11.02709'], from_prepro_level=3)\n", (34958, 34999), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((43863, 43906), 'os.path.join', 'os.path.join', (['pout', '"""climate_vs_length.png"""'], {}), "(pout, 'climate_vs_length.png')\n", (43875, 43906), False, 'import os\n'), ((44092, 44110), 'relic.preprocessing.MERGEDICT.get', 'MERGEDICT.get', (['glc'], {}), '(glc)\n', (44105, 44110), False, 'from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT\n'), ((44164, 44201), 'os.path.join', 'os.path.join', (['pin', "('runs_%s.p' % glid)"], {}), "(pin, 'runs_%s.p' % glid)\n", (44176, 44201), False, 'import os\n'), ((45312, 45343), 'os.path.join', 'os.path.join', (['pout', '"""histo.png"""'], {}), "(pout, 'histo.png')\n", (45324, 45343), False, 'import os\n'), ((1765, 1798), 'ast.literal_eval', 'ast.literal_eval', (["('{' + run + '}')"], {}), "('{' + run + '}')\n", (1781, 1798), False, 'import ast\n'), ((2299, 2334), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ([], {'vmin': '(0)', 'vmax': '(4.5)'}), '(vmin=0, vmax=4.5)\n', (2316, 2334), True, 'import matplotlib.colors as mcolors\n'), ((2398, 2425), 'numpy.arange', 'np.arange', (['(0.375)', '(4.2)', '(0.25)'], {}), '(0.375, 4.2, 0.25)\n', (2407, 2425), True, 'import numpy as np\n'), ((2450, 2470), 'numpy.arange', 'np.arange', (['(1)', '(4.1)', '(1)'], {}), '(1, 4.1, 1)\n', (2459, 2470), True, 'import numpy as np\n'), ((5283, 5308), 'numpy.arange', 'np.arange', (['(1880)', '(2010)', '(40)'], {}), '(1880, 2010, 40)\n', (5292, 5308), True, 'import numpy as np\n'), ((6267, 6300), 'ast.literal_eval', 'ast.literal_eval', (["('{' + run + '}')"], {}), "('{' + run + '}')\n", (6283, 6300), False, 'import ast\n'), ((9153, 9186), 'ast.literal_eval', 'ast.literal_eval', (["('{' + run + '}')"], {}), "('{' + run + '}')\n", (9169, 9186), False, 'import ast\n'), ((10131, 10158), 'numpy.arange', 'np.arange', (['(-1400)', '(1100)', '(400)'], {}), '(-1400, 1100, 400)\n', (10140, 10158), True, 'import numpy as np\n'), ((18235, 18256), 'numpy.arange', 'np.arange', (['(1850)', '(2101)'], {}), '(1850, 2101)\n', (18244, 18256), True, 'import numpy as np\n'), ((18296, 18317), 'numpy.arange', 'np.arange', (['(1850)', '(2101)'], {}), '(1850, 2101)\n', (18305, 18317), True, 'import numpy as np\n'), ((25729, 25758), 'oggm.utils.date_to_floatyear', 'utils.date_to_floatyear', (['y', 'i'], {}), '(y, i)\n', (25752, 25758), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((27305, 27334), 'oggm.utils.date_to_floatyear', 'utils.date_to_floatyear', (['y', 'i'], {}), '(y, i)\n', (27328, 27334), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((27481, 27496), 'numpy.arange', 'np.arange', (['(3)', '(6)'], {}), '(3, 6)\n', (27490, 27496), True, 'import numpy as np\n'), ((29019, 29032), 'oggm.core.flowline.FileModel', 'FileModel', (['fn'], {}), '(fn)\n', (29028, 29032), False, 'from oggm.core.flowline import FileModel\n'), ((29897, 29929), 'numpy.where', 'np.where', (['(mean1850 - meanbed < 1)'], {}), '(mean1850 - meanbed < 1)\n', (29905, 29929), True, 'import numpy as np\n'), ((30306, 30338), 'numpy.where', 'np.where', (['(mean2003 - meanbed < 1)'], {}), '(mean2003 - meanbed < 1)\n', (30314, 30338), True, 'import numpy as np\n'), ((30619, 30650), 'numpy.where', 'np.where', (['(initsfc - meanbed < 1)'], {}), '(initsfc - meanbed < 1)\n', (30627, 30650), True, 'import numpy as np\n'), ((32943, 32976), 'ast.literal_eval', 'ast.literal_eval', (["('{' + run + '}')"], {}), "('{' + run + '}')\n", (32959, 32976), False, 'import ast\n'), ((44334, 44367), 'ast.literal_eval', 'ast.literal_eval', (["('{' + run + '}')"], {}), "('{' + run + '}')\n", (44350, 44367), False, 'import ast\n'), ((1817, 1875), 'numpy.isclose', 'np.isclose', (['para[notvars[0]]', 'papar[notvars[0]]'], {'atol': '(0.01)'}), '(para[notvars[0]], papar[notvars[0]], atol=0.01)\n', (1827, 1875), True, 'import numpy as np\n'), ((1930, 1988), 'numpy.isclose', 'np.isclose', (['para[notvars[1]]', 'papar[notvars[1]]'], {'atol': '(0.01)'}), '(para[notvars[1]], papar[notvars[1]], atol=0.01)\n', (1940, 1988), True, 'import numpy as np\n'), ((2217, 2256), 'cmocean.tools.get_dict', 'cmocean.tools.get_dict', (['cmocean.cm.deep'], {}), '(cmocean.cm.deep)\n', (2239, 2256), False, 'import cmocean\n'), ((2678, 2713), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ([], {'vmin': '(0)', 'vmax': '(4.5)'}), '(vmin=0, vmax=4.5)\n', (2695, 2713), True, 'import matplotlib.colors as mcolors\n'), ((2777, 2802), 'numpy.arange', 'np.arange', (['(0.75)', '(4.3)', '(0.5)'], {}), '(0.75, 4.3, 0.5)\n', (2786, 2802), True, 'import numpy as np\n'), ((2827, 2847), 'numpy.arange', 'np.arange', (['(1)', '(4.1)', '(1)'], {}), '(1, 4.1, 1)\n', (2836, 2847), True, 'import numpy as np\n'), ((4465, 4502), 'numpy.where', 'np.where', (['(dfvar.columns == papar[var])'], {}), '(dfvar.columns == papar[var])\n', (4473, 4502), True, 'import numpy as np\n'), ((6713, 6729), 'relic.postprocessing.mae_weighted', 'mae_weighted', (['df'], {}), '(df)\n', (6725, 6729), False, 'from relic.postprocessing import mae_weighted, optimize_cov, calc_coverage, get_ensemble_length, get_rcp_ensemble_length\n'), ((8072, 8111), 'pandas.concat', 'pd.concat', (["[ensmean, df['obs']]"], {'axis': '(1)'}), "([ensmean, df['obs']], axis=1)\n", (8081, 8111), True, 'import pandas as pd\n'), ((9035, 9064), 'colorspace.sequential_hcl', 'sequential_hcl', (['"""Blue-Yellow"""'], {}), "('Blue-Yellow')\n", (9049, 9064), False, 'from colorspace import sequential_hcl\n'), ((10707, 10745), 'os.path.join', 'os.path.join', (['pout', "('runs_%s.p' % glid)"], {}), "(pout, 'runs_%s.p' % glid)\n", (10719, 10745), False, 'import os\n'), ((27523, 27552), 'oggm.utils.date_to_floatyear', 'utils.date_to_floatyear', (['y', 'i'], {}), '(y, i)\n', (27546, 27552), False, 'from oggm import cfg, utils, workflow, tasks\n'), ((2594, 2635), 'cmocean.tools.get_dict', 'cmocean.tools.get_dict', (['cmocean.cm.matter'], {}), '(cmocean.cm.matter)\n', (2616, 2635), False, 'import cmocean\n'), ((3121, 3187), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mcolors.LinearSegmentedColormap.from_list', (['"""mcm"""', 'cmaplist', 'cmap.N'], {}), "('mcm', cmaplist, cmap.N)\n", (3162, 3187), True, 'import matplotlib.colors as mcolors\n'), ((3273, 3328), 'numpy.array', 'np.array', (['[-1400, -1000, -600, -200, 0, 200, 600, 1000]'], {}), '([-1400, -1000, -600, -200, 0, 200, 600, 1000])\n', (3281, 3328), True, 'import numpy as np\n'), ((3384, 3490), 'numpy.array', 'np.array', (['[-1500, -1300, -1100, -900, -700, -500, -300, -150, -50, 50, 100, 300, 500,\n 700, 900, 1100]'], {}), '([-1500, -1300, -1100, -900, -700, -500, -300, -150, -50, 50, 100, \n 300, 500, 700, 900, 1100])\n', (3392, 3490), True, 'import numpy as np\n'), ((3541, 3581), 'matplotlib.colors.Normalize', 'mcolors.Normalize', ([], {'vmin': '(-1600)', 'vmax': '(1600)'}), '(vmin=-1600, vmax=1600)\n', (3558, 3581), True, 'import matplotlib.colors as mcolors\n'), ((6318, 6360), 'numpy.abs', 'np.abs', (["(para['prcp_scaling_factor'] - 1.75)"], {}), "(para['prcp_scaling_factor'] - 1.75)\n", (6324, 6360), True, 'import numpy as np\n'), ((32994, 33036), 'numpy.abs', 'np.abs', (["(para['prcp_scaling_factor'] - 1.75)"], {}), "(para['prcp_scaling_factor'] - 1.75)\n", (33000, 33036), True, 'import numpy as np\n'), ((2930, 2972), 'cmocean.tools.get_dict', 'cmocean.tools.get_dict', (['cmocean.cm.balance'], {}), '(cmocean.cm.balance)\n', (2952, 2972), False, 'import cmocean\n'), ((30874, 30895), 'numpy.isfinite', 'np.isfinite', (['mean1850'], {}), '(mean1850)\n', (30885, 30895), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from . import common
def main(debug=False):
name = ['I', 'A', 'S', 'C']
suffix = ['', '', '', '']
df0 = []
for n, s in zip(name, suffix):
prec = pd.read_csv(f'results/logk_prec_{n}{s}.csv')
prec = prec.groupby(['v', 'x'])['log_err'].mean()
time = pd.read_csv(f'results/logk_time_{n}{s}.csv')
time = time.groupby(['v', 'x'])['time'].mean()
tmp = pd.concat([prec, time], axis=1)
tmp['time'] = np.where(tmp['log_err'] < 3, 1000 * tmp['time'], np.nan)
tmp = tmp['time']
tmp.name = n
df0.append(tmp)
df0 = pd.concat(df0, axis=1)
name = [['I', 'A'], ['S', 'C']]
pos = [[[0.1, 0.85], [0.85, 0.1]], [[0.1, 0.1], [0.1, 0.85]]]
fig = common.figure(figsize=(5.5, 4), box=debug)
ax = fig.subplots(
2, 3, sharex=True, sharey=True,
gridspec_kw=dict(width_ratios=(1,1,0.15)),
)
ax[0, 2].set_visible(False)
ax[1, 2].set_visible(False)
cbar = fig.add_axes([0.93, 0.1, 0.02, 0.85])
xticks = [0, 1, 5, 10, 50]
yticks = [0.1, 0.5, 1, 5, 10, 50]
cmap = plt.get_cmap('Greys').copy()
cmap.set_bad(color='gray')
for i in range(2):
for j in range(2):
hm = df0[name[i][j]].unstack(0)
if i == j == 0:
args = dict(cbar_ax=cbar)
else:
args = dict(cbar=False)
sns.heatmap(hm, vmin=0, vmax=28, cmap=cmap, ax=ax[i, j], **args)
ax[i, j].invert_yaxis()
ax[i, j].text(*pos[i][j], name[i][j], transform=ax[i, j].transAxes)
ax[i, j].set_xticks([40*np.log10(x+1) for x in xticks])
ax[i, j].set_xticklabels([f"${k}$" for k in xticks], rotation=0)
ax[i, j].xaxis.set_ticks_position('both')
ax[i, j].set_yticks([40*(np.log10(x)+1) for x in yticks])
ax[i, j].set_yticklabels([f"${k}$" for k in yticks])
ax[i, j].yaxis.set_ticks_position('both')
if i == 1:
ax[i, j].set_xlabel('$v$')
else:
ax[i, j].set_xlabel('')
if j == 0:
ax[i, j].set_ylabel('$x$')
else:
ax[i, j].set_ylabel('')
cbar = ax[0, 0].collections[0].colorbar
cbar.set_ticks([0, 10, 20])
cbar.set_ticklabels([f'${{{l}}}$' for l in [0, 10, 20]])
fig.savefig('figs/fig3.pdf')
if __name__ == '__main__':
main(debug=False)
|
[
"seaborn.heatmap",
"matplotlib.pyplot.get_cmap",
"pandas.read_csv",
"numpy.where",
"numpy.log10",
"pandas.concat"
] |
[((690, 712), 'pandas.concat', 'pd.concat', (['df0'], {'axis': '(1)'}), '(df0, axis=1)\n', (699, 712), True, 'import pandas as pd\n'), ((266, 310), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_prec_{n}{s}.csv"""'], {}), "(f'results/logk_prec_{n}{s}.csv')\n", (277, 310), True, 'import pandas as pd\n'), ((384, 428), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_time_{n}{s}.csv"""'], {}), "(f'results/logk_time_{n}{s}.csv')\n", (395, 428), True, 'import pandas as pd\n'), ((498, 529), 'pandas.concat', 'pd.concat', (['[prec, time]'], {'axis': '(1)'}), '([prec, time], axis=1)\n', (507, 529), True, 'import pandas as pd\n'), ((552, 608), 'numpy.where', 'np.where', (["(tmp['log_err'] < 3)", "(1000 * tmp['time'])", 'np.nan'], {}), "(tmp['log_err'] < 3, 1000 * tmp['time'], np.nan)\n", (560, 608), True, 'import numpy as np\n'), ((1184, 1205), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Greys"""'], {}), "('Greys')\n", (1196, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1542), 'seaborn.heatmap', 'sns.heatmap', (['hm'], {'vmin': '(0)', 'vmax': '(28)', 'cmap': 'cmap', 'ax': 'ax[i, j]'}), '(hm, vmin=0, vmax=28, cmap=cmap, ax=ax[i, j], **args)\n', (1489, 1542), True, 'import seaborn as sns\n'), ((1695, 1710), 'numpy.log10', 'np.log10', (['(x + 1)'], {}), '(x + 1)\n', (1703, 1710), True, 'import numpy as np\n'), ((1895, 1906), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (1903, 1906), True, 'import numpy as np\n')]
|
import argparse
from datetime import datetime
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from torch_model import SizedGenerator
import os
from tqdm import trange
from torchvision.utils import save_image, make_grid
import params as P
from utils import save_img_tensorboard, load_trained_generator, load_target_image, psnr
def output_to_imshow(v):
return v.squeeze(0).detach().to('cpu').numpy().transpose(1, 2, 0)
def main(args):
logdir = f'tensorboard_logs/search/{args.run_name}'
os.makedirs(logdir,
exist_ok=True) # TODO - decide whether to clobber or what?
writer = SummaryWriter(logdir)
device = 'cuda:0'
x = load_target_image(args.image).to(device)
save_img_tensorboard(x.squeeze(0).detach().cpu(), writer, f'original')
g = load_trained_generator(SizedGenerator,
args.generator_checkpoint,
latent_dim=64,
num_filters=P.num_filters,
image_size=P.size,
num_ups=P.num_ups).to(device)
g.eval()
if args.latent_dim != g.latent_dim:
args.skip_linear_layer = True
else:
args.skip_linear_layer = False
if args.skip_linear_layer and args.latent_dim < 8192:
# Then we need a new linear layer to map to dimension 8192
linear_layer = torch.nn.Linear(args.latent_dim, 8192).to(device)
else:
linear_layer = lambda x: x
save_every_n = 50
for i in trange(args.n_restarts):
seed = i
torch.manual_seed(seed)
np.random.seed(seed)
# NOTE - based on quick experiments:
# - std=1.0 better than std=0.1 or std=0.01
# - uniform and normal performed nearly identical
if args.initialization == 'uniform':
z = (2 * args.std) * torch.rand(args.latent_dim,
device=device) - args.std
elif args.initialization == 'normal':
z = torch.randn(args.latent_dim, device=device) * args.std
elif args.initialization == 'ones':
mask = torch.rand(args.latent_dim) < 0.5
z = torch.ones(args.latent_dim, device=device)
z[mask] = -1
else:
raise NotImplementedError(args.initialization)
# network only saw [-1, 1] during training
z = torch.nn.Parameter(torch.clamp(z, -1, 1))
z_initial = z.data.clone()
optimizer = torch.optim.Adam([z], lr=0.05, betas=(0.5, 0.999))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.n_steps)
with torch.no_grad():
model_input = linear_layer(z_initial)
save_img_tensorboard(
g(model_input,
skip_linear_layer=args.skip_linear_layer).squeeze(
0).detach().cpu(), writer, f'restart_{i}/beginning')
for j in trange(args.n_steps, leave=False):
optimizer.zero_grad()
model_input = linear_layer(z)
x_hat = g(model_input,
skip_linear_layer=args.skip_linear_layer).squeeze(0)
mse = F.mse_loss(x_hat, x)
mse.backward()
optimizer.step()
scheduler.step()
writer.add_scalar(f'MSE/{i}', mse, j)
writer.add_scalar(f'PSNR/{i}', psnr(x, x_hat), j)
if j % save_every_n == 0:
save_img_tensorboard(
x_hat.squeeze(0).detach().cpu(), writer,
f'restart_{i}/reconstruction', j)
save_img_tensorboard(
x_hat.squeeze(0).detach().cpu(), writer, f'restart_{i}/final')
save_image(make_grid([x, x_hat.squeeze(0)], nrow=2),
f'{args.run_name}.png')
def get_latent_dims(x):
x = int(x)
if x > 8192:
raise ValueError('give a latent_dim between [1, 8192]')
return x
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--generator_checkpoint',
default='./checkpoints/celeba_cropped/gen_ckpt.49.pt',
help="Path to generator checkpoint")
p.add_argument('--image', required=True)
p.add_argument('--run_name', default=datetime.now().isoformat())
p.add_argument('--n_restarts', type=int, default=3)
p.add_argument('--n_steps', type=int, default=3000)
p.add_argument('--initialization',
choices=['uniform', 'normal', 'ones'],
default='normal')
p.add_argument(
'--std',
type=float,
default=1.0,
help='for normal dist, the std. for uniform, the min and max val')
p.add_argument('--latent_dim',
type=get_latent_dims,
default=4096,
help='int between [1, 8192]')
args = p.parse_args()
# TODO - if model used latent_dim=64 and you also wanan reconstruct from 64,
# does it hurt to just skip linear layer?
main(args)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.randn",
"utils.psnr",
"torch.no_grad",
"torch.ones",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"datetime.datetime.now",
"utils.load_trained_generator",
"tqdm.trange",
"torch.manual_seed",
"torch.nn.functional.mse_loss",
"torch.optim.Adam",
"torch.clamp",
"torch.rand",
"os.makedirs",
"utils.load_target_image"
] |
[((568, 602), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (579, 602), False, 'import os\n'), ((678, 699), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (691, 699), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1591, 1614), 'tqdm.trange', 'trange', (['args.n_restarts'], {}), '(args.n_restarts)\n', (1597, 1614), False, 'from tqdm import trange\n'), ((4051, 4076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4074, 4076), False, 'import argparse\n'), ((1641, 1664), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1658, 1664), False, 'import torch\n'), ((1673, 1693), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1687, 1693), True, 'import numpy as np\n'), ((2559, 2609), 'torch.optim.Adam', 'torch.optim.Adam', (['[z]'], {'lr': '(0.05)', 'betas': '(0.5, 0.999)'}), '([z], lr=0.05, betas=(0.5, 0.999))\n', (2575, 2609), False, 'import torch\n'), ((2630, 2697), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', 'args.n_steps'], {}), '(optimizer, args.n_steps)\n', (2672, 2697), False, 'import torch\n'), ((3019, 3052), 'tqdm.trange', 'trange', (['args.n_steps'], {'leave': '(False)'}), '(args.n_steps, leave=False)\n', (3025, 3052), False, 'from tqdm import trange\n'), ((732, 761), 'utils.load_target_image', 'load_target_image', (['args.image'], {}), '(args.image)\n', (749, 761), False, 'from utils import save_img_tensorboard, load_trained_generator, load_target_image, psnr\n'), ((857, 1011), 'utils.load_trained_generator', 'load_trained_generator', (['SizedGenerator', 'args.generator_checkpoint'], {'latent_dim': '(64)', 'num_filters': 'P.num_filters', 'image_size': 'P.size', 'num_ups': 'P.num_ups'}), '(SizedGenerator, args.generator_checkpoint,\n latent_dim=64, num_filters=P.num_filters, image_size=P.size, num_ups=P.\n num_ups)\n', (879, 1011), False, 'from utils import save_img_tensorboard, load_trained_generator, load_target_image, psnr\n'), ((2480, 2501), 'torch.clamp', 'torch.clamp', (['z', '(-1)', '(1)'], {}), '(z, -1, 1)\n', (2491, 2501), False, 'import torch\n'), ((2725, 2740), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2738, 2740), False, 'import torch\n'), ((3258, 3278), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['x_hat', 'x'], {}), '(x_hat, x)\n', (3268, 3278), True, 'import torch.nn.functional as F\n'), ((1459, 1497), 'torch.nn.Linear', 'torch.nn.Linear', (['args.latent_dim', '(8192)'], {}), '(args.latent_dim, 8192)\n', (1474, 1497), False, 'import torch\n'), ((3458, 3472), 'utils.psnr', 'psnr', (['x', 'x_hat'], {}), '(x, x_hat)\n', (3462, 3472), False, 'from utils import save_img_tensorboard, load_trained_generator, load_target_image, psnr\n'), ((1928, 1970), 'torch.rand', 'torch.rand', (['args.latent_dim'], {'device': 'device'}), '(args.latent_dim, device=device)\n', (1938, 1970), False, 'import torch\n'), ((2088, 2131), 'torch.randn', 'torch.randn', (['args.latent_dim'], {'device': 'device'}), '(args.latent_dim, device=device)\n', (2099, 2131), False, 'import torch\n'), ((2256, 2298), 'torch.ones', 'torch.ones', (['args.latent_dim'], {'device': 'device'}), '(args.latent_dim, device=device)\n', (2266, 2298), False, 'import torch\n'), ((4338, 4352), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4350, 4352), False, 'from datetime import datetime\n'), ((2206, 2233), 'torch.rand', 'torch.rand', (['args.latent_dim'], {}), '(args.latent_dim)\n', (2216, 2233), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import pathlib as _pl
import pandas as _pd
import s3fs as _s3fs
# import urllib as _urllib
# import html2text as _html2text
import psutil as _psutil
import numpy as _np
# import xarray as _xr
def readme():
url = 'https://docs.opendata.aws/noaa-goes16/cics-readme.html'
# html = _urllib.request.urlopen(url).read().decode("utf-8")
# out = _html2text.html2text(html)
# print(out)
print(f'follow link for readme: {url}')
def available_products():
aws = _s3fs.S3FileSystem(anon=True)
df = _pd.DataFrame()
for satellite in [16,17]:
# satellite = 16#16 (east) or 17(west)
base_folder = _pl.Path(f'noaa-goes{satellite}')
products_available = aws.glob(base_folder.joinpath('*').as_posix())
df[satellite] = [p.split('/')[-1] for p in products_available if '.pdf' not in p]
if _np.all(df[16] == df[17]):
ins = ''
else:
ins = ' !!_NOT_!!'
print(f'goes 16 and 17 products are{ins} identical')
return df
class AwsQuery(object):
def __init__(self,
path2folder_local = '/mnt/telg/tmp/aws_tmp/',
satellite = '16',
product = 'ABI-L2-AOD',
scan_sector = 'C',
start = '2020-08-08 20:00:00',
end = '2020-08-09 18:00:00',
process = None,
keep_files = None,
# check_if_file_exist = True,
# no_of_days = None,
# last_x_days = None,
# max_no_of_files = 100,#10*24*7,
):
"""
This will initialize a search on AWS.
Parameters
----------
path2folder_local : TYPE, optional
DESCRIPTION. The default is '/mnt/telg/tmp/aws_tmp/'.
satellite : TYPE, optional
DESCRIPTION. The default is '16'.
product : str, optional
Note this is the product name described at
https://docs.opendata.aws/noaa-goes16/cics-readme.html
but without the scan sector. The default is 'ABI-L2-AOD'.
scan_sector : str, optional
(C)onus, (F)ull_disk, (M)eso. The default is 'C'.
start : TYPE, optional
DESCRIPTION. The default is '2020-08-08 20:00:00'.
end : TYPE, optional
DESCRIPTION. The default is '2020-08-09 18:00:00'.
process: dict,
This is still in development and might be buggy.
Example:
dict(concatenate = 'daily',
function = lambda row: some_function(row, *args, **kwargs),
prefix = 'ABI_L2_AOD_processed',
path2processed = '/path2processed/')
keep_files: bool, optional
Default is True unless process is given which changes the default
False.
Returns
-------
None.
"""
self.satellite = satellite
self.path2folder_aws = _pl.Path(f'noaa-goes{self.satellite}')
self.scan_sector = scan_sector
self.product = product
self.start = _pd.to_datetime(start)
self.end = _pd.to_datetime(end)
self.path2folder_local = _pl.Path(path2folder_local)
if isinstance(process, dict):
self._process = True
# self._process_concatenate = process['concatenate']
self._process_function = process['function']
self._process_name_prefix = process['prefix']
self._process_path2processed = _pl.Path(process['path2processed'])
# self._process_path2processed_tmp = self._process_path2processed.joinpath('tmp')
# self._process_path2processed_tmp.mkdir(exist_ok=True)
self.keep_files = False
# self.check_if_file_exist = False
else:
self._process = False
self.aws = _s3fs.S3FileSystem(anon=True)
self.aws.clear_instance_cache() # strange things happen if the is not the only query one is doing during a session
# properties
self._workplan = None
@property
def product(self):
return self._product
@product.setter
def product(self, value):
if value[-1] == self.scan_sector:
value = value[:-1]
self._product = value
return
def info_on_current_query(self):
nooffiles = self.workplan.shape[0]
if nooffiles == 0:
info = 'no file found or all files already on disk.'
else:
du = self.estimate_disk_usage()
disk_space_needed = du['disk_space_needed'] * 1e-6
disk_space_free_after_download = du['disk_space_free_after_download']
info = (f'no of files: {nooffiles}\n'
f'estimated disk usage: {disk_space_needed:0.0f} mb\n'
f'remaining disk space after download: {disk_space_free_after_download:0.0f} %\n')
return info
# def print_readme(self):
# url = 'https://docs.opendata.aws/noaa-goes16/cics-readme.html'
# html = _urllib.request.urlopen(url).read().decode("utf-8")
# out = _html2text.html2text(html)
# print(out)
def estimate_disk_usage(self, sample_size = 10): #mega bites
step_size = int(self.workplan.shape[0]/sample_size)
if step_size < 1:
step_size = 1
sizes = self.workplan.iloc[::step_size].apply(lambda row: self.aws.disk_usage(row.path2file_aws), axis = 1)
# sizes = self.workplan.iloc[::int(self.workplan.shape[0]/sample_size)].apply(lambda row: self.aws.disk_usage(row.path2file_aws), axis = 1)
disk_space_needed = sizes.mean() * self.workplan.shape[0]
# get remaining disk space after download
du = _psutil.disk_usage(self.path2folder_local)
disk_space_free_after_download = 100 - (100* (du.used + disk_space_needed)/du.total )
out = {}
out['disk_space_needed'] = disk_space_needed
out['disk_space_free_after_download'] = disk_space_free_after_download
return out
@property
def workplan(self):
if isinstance(self._workplan, type(None)):
# #### bug: problem below is that time ranges that span over multiple years will not work!
# # get the julian days (thus folders on aws) needed
# start_julian = int(_pd.to_datetime(self.start.date()).to_julian_date() - _pd.to_datetime(f'{self.start.year:04d}-01-01').to_julian_date()) + 1
# end_julian = int(_pd.to_datetime(self.end.date()).to_julian_date() - _pd.to_datetime(f'{self.end.year:04d}-01-01').to_julian_date()) + 1
# days = list(range(start_julian, end_julian+1))
# # get all the files available
# # base_folder = pl.Path(f'noaa-goes{self.satellite}')
# base_folder = self.path2folder_aws
# product_folder = base_folder.joinpath(f'{self.product}{self.scan_sector}')
# files_available = []
# year_folder = product_folder.joinpath(f'{self.start.year}')
# for day in days:
# day_folder = year_folder.joinpath(f'{day:03d}')
# hours_available = self.aws.glob(day_folder.joinpath('*').as_posix())
# hours_available = [h.split('/')[-1] for h in hours_available]
# for hour in hours_available:
# hour_folder = day_folder.joinpath(f'{hour}')
# glob_this = hour_folder.joinpath('*').as_posix()
# last_glob = self.aws.glob(glob_this)
# files_available += last_glob
#### make a data frame to all the available files in the time range
# create a dataframe with all hours in the time range
df = _pd.DataFrame(index = _pd.date_range(self.start, self.end, freq='h'), columns=['path'])
# create the path to the directory of each row above (one per houre)
product_folder = self.path2folder_aws.joinpath(f'{self.product}{self.scan_sector}')
df['path'] = df.apply(lambda row: product_folder.joinpath(str(row.name.year)).joinpath(f'{row.name.day_of_year:03d}').joinpath(f'{row.name.hour:02d}').joinpath('*'), axis= 1)
# get the path to each file in all the folders
files_available = []
for idx,row in df.iterrows():
files_available += self.aws.glob(row.path.as_posix())
#### Make workplan
workplan = _pd.DataFrame([_pl.Path(f) for f in files_available], columns=['path2file_aws'])
workplan['path2file_local'] = workplan.apply(lambda row: self.path2folder_local.joinpath(row.path2file_aws.name), axis = 1)
#### remove if local file exists
if not self._process:
workplan = workplan[~workplan.apply(lambda row: row.path2file_local.is_file(), axis = 1)]
# get file sizes ... takes to long to do for each file
# workplan['file_size_mb'] = workplan.apply(lambda row: self.aws.disk_usage(row.path2file_aws)/1e6, axis = 1)
#### get the timestamp
def row2timestamp(row):
sos = row.path2file_aws.name.split('_')[-3]
assert(sos[0] == 's'), f'Something needs fixing, this string ({sos}) should start with s.'
ts = _pd.to_datetime(sos[1:-1],format = '%Y%j%H%M%S')
return ts
workplan.index = workplan.apply(lambda row: row2timestamp(row), axis = 1)
#### truncate ... remember so far we did not consider times in start and end, only the entire days
workplan = workplan.sort_index()
workplan = workplan.truncate(self.start, self.end)
#### processing additions
if self._process:
### add path to processed file names
workplan["path2file_local_processed"] = workplan.apply(lambda row: self._process_path2processed.joinpath(f'{self._process_name_prefix}_{row.name.year}{row.name.month:02d}{row.name.day:02d}_{row.name.hour:02d}{row.name.minute:02d}{row.name.second:02d}.nc'), axis = 1)
### remove if file exists
workplan = workplan[~workplan.apply(lambda row: row.path2file_local_processed.is_file(), axis = True)]
# workplan['path2file_tmp'] = workplan.apply(lambda row: self._process_path2processed_tmp.joinpath(row.name.__str__()), axis = 1)
self._workplan = workplan
return self._workplan
@workplan.setter
def workplan(self, new_workplan):
self._workplan = new_workplan
@property
def product_available_since(self):
product_folder = self.path2folder_aws.joinpath(f'{self.product}{self.scan_sector}')
years = self.aws.glob(product_folder.joinpath('*').as_posix())
years.sort()
is2000 = True
while is2000:
yearfolder = years.pop(0)
firstyear = yearfolder.split('/')[-1]
# print(firstyear)
if firstyear != '2000':
is2000 = False
yearfolder = _pl.Path(yearfolder)
days = self.aws.glob(yearfolder.joinpath('*').as_posix())
days.sort()
firstday = int(days[0].split('/')[-1])
firstday_ts = _pd.to_datetime(firstyear) + _pd.to_timedelta(firstday, "D")
return firstday_ts
def download(self, test = False, overwrite = False, alternative_workplan = False,
error_if_low_disk_space = True):
"""
Parameters
----------
test : TYPE, optional
DESCRIPTION. The default is False.
overwrite : TYPE, optional
DESCRIPTION. The default is False.
alternative_workplan : pandas.Dataframe, optional
This will ignore the instance workplan and use the provided one
instead. The default is False.
error_if_low_disk_space : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
out : TYPE
DESCRIPTION.
"""
if isinstance(alternative_workplan, _pd.DataFrame):
workplan = alternative_workplan
else:
workplan = self.workplan
if error_if_low_disk_space:
disk_space_free_after_download = self.estimate_disk_usage()['disk_space_free_after_download']
assert(disk_space_free_after_download<90), f"This download will bring the disk usage above 90% ({disk_space_free_after_download:0.0f}%). Turn off this error by setting error_if_low_disk_space to False."
for idx, row in workplan.iterrows():
if not overwrite:
if row.path2file_local.is_file():
continue
out = self.aws.get(row.path2file_aws.as_posix(), row.path2file_local.as_posix())
if test:
break
return out
def process(self):
# deprecated first grouping is required
# group = self.workplan.groupby('path2file_local_processed')
# for p2flp, p2flpgrp in group:
# break
## for each file in group
for dt, row in self.workplan.iterrows():
if row.path2file_local_processed.is_file():
continue
if not row.path2file_local.is_file():
# print('downloading')
#### download
# download_output =
self.aws.get(row.path2file_aws.as_posix(), row.path2file_local.as_posix())
#### process
try:
self._process_function(row)
except:
print(f'error applying function on one file {row.path2file_local.name}. The raw fill will still be removed (unless keep_files is True) to avoid storage issues')
#### remove raw file
if not self.keep_files:
row.path2file_local.unlink()
#### todo: concatenate
# if this is actually desired I would think this should be done seperately, not as part of this package
# try:
# ds = _xr.open_mfdataset(p2flpgrp.path2file_tmp)
# #### save final product
# ds.to_netcdf(p2flp)
# #### remove all tmp files
# if not keep_tmp_files:
# for dt, row in p2flpgrp.iterrows():
# try:
# row.path2file_tmp.unlink()
# except FileNotFoundError:
# pass
# except:
# print('something went wrong with the concatenation. The file will not be removed')
|
[
"pandas.DataFrame",
"pandas.date_range",
"psutil.disk_usage",
"pathlib.Path",
"s3fs.S3FileSystem",
"pandas.to_datetime",
"pandas.to_timedelta",
"numpy.all"
] |
[((502, 531), 's3fs.S3FileSystem', '_s3fs.S3FileSystem', ([], {'anon': '(True)'}), '(anon=True)\n', (520, 531), True, 'import s3fs as _s3fs\n'), ((542, 557), 'pandas.DataFrame', '_pd.DataFrame', ([], {}), '()\n', (555, 557), True, 'import pandas as _pd\n'), ((865, 890), 'numpy.all', '_np.all', (['(df[16] == df[17])'], {}), '(df[16] == df[17])\n', (872, 890), True, 'import numpy as _np\n'), ((657, 690), 'pathlib.Path', '_pl.Path', (['f"""noaa-goes{satellite}"""'], {}), "(f'noaa-goes{satellite}')\n", (665, 690), True, 'import pathlib as _pl\n'), ((2996, 3034), 'pathlib.Path', '_pl.Path', (['f"""noaa-goes{self.satellite}"""'], {}), "(f'noaa-goes{self.satellite}')\n", (3004, 3034), True, 'import pathlib as _pl\n'), ((3145, 3167), 'pandas.to_datetime', '_pd.to_datetime', (['start'], {}), '(start)\n', (3160, 3167), True, 'import pandas as _pd\n'), ((3188, 3208), 'pandas.to_datetime', '_pd.to_datetime', (['end'], {}), '(end)\n', (3203, 3208), True, 'import pandas as _pd\n'), ((3251, 3278), 'pathlib.Path', '_pl.Path', (['path2folder_local'], {}), '(path2folder_local)\n', (3259, 3278), True, 'import pathlib as _pl\n'), ((3943, 3972), 's3fs.S3FileSystem', '_s3fs.S3FileSystem', ([], {'anon': '(True)'}), '(anon=True)\n', (3961, 3972), True, 'import s3fs as _s3fs\n'), ((5853, 5895), 'psutil.disk_usage', '_psutil.disk_usage', (['self.path2folder_local'], {}), '(self.path2folder_local)\n', (5871, 5895), True, 'import psutil as _psutil\n'), ((11345, 11365), 'pathlib.Path', '_pl.Path', (['yearfolder'], {}), '(yearfolder)\n', (11353, 11365), True, 'import pathlib as _pl\n'), ((3582, 3617), 'pathlib.Path', '_pl.Path', (["process['path2processed']"], {}), "(process['path2processed'])\n", (3590, 3617), True, 'import pathlib as _pl\n'), ((11521, 11547), 'pandas.to_datetime', '_pd.to_datetime', (['firstyear'], {}), '(firstyear)\n', (11536, 11547), True, 'import pandas as _pd\n'), ((11550, 11581), 'pandas.to_timedelta', '_pd.to_timedelta', (['firstday', '"""D"""'], {}), "(firstday, 'D')\n", (11566, 11581), True, 'import pandas as _pd\n'), ((9507, 9554), 'pandas.to_datetime', '_pd.to_datetime', (['sos[1:-1]'], {'format': '"""%Y%j%H%M%S"""'}), "(sos[1:-1], format='%Y%j%H%M%S')\n", (9522, 9554), True, 'import pandas as _pd\n'), ((7913, 7959), 'pandas.date_range', '_pd.date_range', (['self.start', 'self.end'], {'freq': '"""h"""'}), "(self.start, self.end, freq='h')\n", (7927, 7959), True, 'import pandas as _pd\n'), ((8645, 8656), 'pathlib.Path', '_pl.Path', (['f'], {}), '(f)\n', (8653, 8656), True, 'import pathlib as _pl\n')]
|
"""
CanvasItem module contains classes related to canvas items.
"""
from __future__ import annotations
# standard libraries
import collections
import concurrent.futures
import contextlib
import copy
import datetime
import enum
import functools
import imageio
import logging
import operator
import sys
import threading
import types
import typing
import warnings
import weakref
# third party libraries
import numpy
# local libraries
from nion.ui import DrawingContext
from nion.utils import Event
from nion.utils import Geometry
from nion.utils import Observable
from nion.utils import Stream
if typing.TYPE_CHECKING:
from nion.ui import UserInterface
from nion.ui import MouseTrackingCanvasItem
from nion.ui import Widgets
MAX_VALUE = sys.maxsize
class Orientation(enum.Enum):
Vertical = 0
Horizontal = 1
class Constraint:
""" A constraint on an item in a layout. Preferred is only used when free sizing. """
def __init__(self) -> None:
self.minimum: typing.Optional[int] = None
self.maximum: typing.Optional[int] = None
self.preferred: typing.Optional[int] = None
def __repr__(self) -> str:
return "Constraint (min={0}, max={1}, pref={2})".format(self.minimum, self.maximum, self.preferred)
class SolverItem:
def __init__(self, constraint: Constraint) -> None:
self.constraint = constraint
self.size: typing.Optional[int] = None
self.is_constrained = False
ConstraintResultType = typing.Tuple[typing.List[int], typing.List[int]]
def constraint_solve(canvas_origin: int, canvas_size: int, canvas_item_constraints: typing.Sequence[Constraint], spacing: int = 0) -> ConstraintResultType:
"""
Solve the layout by assigning space and enforcing constraints.
Returns origins, sizes tuple.
"""
# setup information from each item
solver_items = [SolverItem(constraint) for constraint in canvas_item_constraints]
# assign preferred size, if any, to each item. items with preferred size are still
# free to change as long as they don't become constrained.
for solver_item in solver_items:
if solver_item.constraint.preferred is not None:
solver_item.size = solver_item.constraint.preferred
assert solver_item.constraint.minimum is not None
assert solver_item.constraint.maximum is not None
if solver_item.size < solver_item.constraint.minimum:
solver_item.size = solver_item.constraint.minimum
if solver_item.size > solver_item.constraint.maximum:
solver_item.size = solver_item.constraint.maximum
solver_item.is_constrained = True
if solver_item.size > solver_item.constraint.maximum:
solver_item.size = solver_item.constraint.maximum
if solver_item.size < solver_item.constraint.minimum:
solver_item.size = solver_item.constraint.minimum
solver_item.is_constrained = True
# put these here to avoid linter warnings
remaining_canvas_size = canvas_size
remaining_count = len(solver_items)
# assign the free space to the remaining items. first figure out how much space is left
# and how many items remain. then divide the space up.
finished = False
while not finished:
finished = True
remaining_canvas_size = canvas_size
remaining_count = len(solver_items)
# reset the items that we can, i.e. those that aren't already constrained and don't have a preferred size
for solver_item in solver_items:
if not solver_item.is_constrained and solver_item.constraint.preferred is None:
solver_item.size = None
# figure out how many free range items there are, i.e. those that don't already have a size assigned
for solver_item in solver_items:
if solver_item.size is not None:
remaining_canvas_size -= solver_item.size
remaining_count -= 1
# again attempt to assign sizes
for solver_item in solver_items:
if solver_item.size is None:
size = remaining_canvas_size // remaining_count
assert solver_item.constraint.minimum is not None
assert solver_item.constraint.maximum is not None
if size < solver_item.constraint.minimum:
size = solver_item.constraint.minimum
solver_item.is_constrained = True
finished = False
if size > solver_item.constraint.maximum:
size = solver_item.constraint.maximum
solver_item.is_constrained = True
finished = False
solver_item.size = size
remaining_canvas_size -= size
remaining_count -= 1
if not finished:
break
# go through again and assign any remaining space
for solver_item in solver_items:
if solver_item.size is None:
solver_item.size = remaining_canvas_size // remaining_count
# check if we're oversized. if so divide among unconstrained items, but honor minimum size.
finished = False
while not finished:
finished = True
actual_canvas_size = sum([solver_item.size for solver_item in solver_items])
assert actual_canvas_size is not None
if actual_canvas_size > canvas_size:
remaining_count = sum([not solver_item.is_constrained for solver_item in solver_items])
remaining_canvas_size = actual_canvas_size - canvas_size
if remaining_count > 0:
for solver_item in solver_items:
if not solver_item.is_constrained:
assert solver_item.size is not None
assert solver_item.constraint.minimum is not None
size = solver_item.size - remaining_canvas_size // remaining_count
if size < solver_item.constraint.minimum:
size = solver_item.constraint.minimum
solver_item.is_constrained = True
finished = False
adjustment = solver_item.size - size
solver_item.size = size
remaining_canvas_size -= adjustment
remaining_count -= 1
if not finished:
break
# check if we're undersized. if so add among unconstrained items, but honor maximum size.
finished = False
while not finished:
finished = True
actual_canvas_size = sum([solver_item.size for solver_item in solver_items])
assert actual_canvas_size is not None
if actual_canvas_size < canvas_size:
remaining_count = sum([not solver_item.is_constrained for solver_item in solver_items])
remaining_canvas_size = canvas_size - actual_canvas_size
if remaining_count > 0:
for solver_item in solver_items:
if not solver_item.is_constrained:
assert solver_item.size is not None
assert solver_item.constraint.maximum is not None
size = solver_item.size + remaining_canvas_size // remaining_count
if size > solver_item.constraint.maximum:
size = solver_item.constraint.maximum
solver_item.is_constrained = True
finished = False
adjustment = size - solver_item.size
solver_item.size = size
remaining_canvas_size -= adjustment
remaining_count -= 1
if not finished:
break
# assign layouts
# TODO: allow for various justification options (start - default, end, center, space-between, space-around)
# see https://css-tricks.com/snippets/css/a-guide-to-flexbox/
sizes = [(solver_item.size or 0) for solver_item in solver_items]
origins = list()
for index in range(len(canvas_item_constraints)):
origins.append(canvas_origin)
canvas_origin += sizes[index] + spacing
return origins, sizes
class Sizing:
"""
Describes the sizing for a particular canvas item.
Aspect ratio, width, and height can each specify minimums, maximums, and preferred values.
Width and height can be integer or floats. If floats, they specify a percentage of their
respective maximum.
Preferred values are only used when free sizing.
Collapsible items collapse to fixed size of 0 if they don't have children.
"""
def __init__(self) -> None:
self.__preferred_width: typing.Optional[typing.Union[int, float]] = None
self.__preferred_height: typing.Optional[typing.Union[int, float]] = None
self.__preferred_aspect_ratio: typing.Optional[float] = None
self.__minimum_width: typing.Optional[typing.Union[int, float]] = None
self.__minimum_height: typing.Optional[typing.Union[int, float]] = None
self.__minimum_aspect_ratio: typing.Optional[float] = None
self.__maximum_width: typing.Optional[typing.Union[int, float]] = None
self.__maximum_height: typing.Optional[typing.Union[int, float]] = None
self.__maximum_aspect_ratio: typing.Optional[float] = None
self.__collapsible: bool = False
def __repr__(self) -> str:
format_str = "Sizing (min_w={0}, max_w={1}, pref_w={2}, min_h={3}, max_h={4}, pref_h={5}, min_a={6}, max_a={7}, pref_a={8}, collapsible={9})"
return format_str.format(self.__minimum_width, self.__maximum_width, self.__preferred_width,
self.__minimum_height, self.__maximum_height, self.__preferred_height,
self.__minimum_aspect_ratio, self.__maximum_aspect_ratio, self.__preferred_aspect_ratio,
self.__collapsible)
def __eq__(self, other: typing.Any) -> bool:
if self.__preferred_width != other.preferred_width:
return False
if self.__preferred_height != other.preferred_height:
return False
if self.__preferred_aspect_ratio != other.preferred_aspect_ratio:
return False
if self.__minimum_width != other.minimum_width:
return False
if self.__minimum_height != other.minimum_height:
return False
if self.__minimum_aspect_ratio != other.minimum_aspect_ratio:
return False
if self.__maximum_width != other.maximum_width:
return False
if self.__maximum_height != other.maximum_height:
return False
if self.__maximum_aspect_ratio != other.maximum_aspect_ratio:
return False
if self.__collapsible != other.collapsible:
return False
return True
def __deepcopy__(self, memo: typing.Dict[typing.Any, typing.Any]) -> Sizing:
deepcopy = Sizing()
deepcopy._copy_from(self)
memo[id(self)] = deepcopy
return deepcopy
@property
def preferred_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__preferred_width
@property
def preferred_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__preferred_height
@property
def preferred_aspect_ratio(self) -> typing.Optional[float]:
return self.__preferred_aspect_ratio
@property
def minimum_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__minimum_width
@property
def minimum_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__minimum_height
@property
def minimum_aspect_ratio(self) -> typing.Optional[float]:
return self.__minimum_aspect_ratio
@property
def maximum_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__maximum_width
@property
def maximum_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__maximum_height
@property
def maximum_aspect_ratio(self) -> typing.Optional[float]:
return self.__maximum_aspect_ratio
@property
def collapsible(self) -> bool:
return self.__collapsible
@property
def _preferred_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__preferred_width
@_preferred_width.setter
def _preferred_width(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__preferred_width = value
def with_preferred_width(self, width: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._preferred_width = width
return sizing
@property
def _preferred_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__preferred_height
@_preferred_height.setter
def _preferred_height(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__preferred_height = value
def with_preferred_height(self, height: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._preferred_height = height
return sizing
@property
def _preferred_aspect_ratio(self) -> typing.Optional[float]:
return self.__preferred_aspect_ratio
@_preferred_aspect_ratio.setter
def _preferred_aspect_ratio(self, value: typing.Optional[float]) -> None:
self.__preferred_aspect_ratio = value
def with_preferred_aspect_ratio(self, aspect_ratio: typing.Optional[float]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._preferred_aspect_ratio = aspect_ratio
return sizing
@property
def _minimum_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__minimum_width
@_minimum_width.setter
def _minimum_width(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__minimum_width = value
def with_minimum_width(self, width: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._minimum_width = width
return sizing
@property
def _minimum_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__minimum_height
@_minimum_height.setter
def _minimum_height(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__minimum_height = value
def with_minimum_height(self, height: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._minimum_height = height
return sizing
@property
def _minimum_aspect_ratio(self) -> typing.Optional[float]:
return self.__minimum_aspect_ratio
@_minimum_aspect_ratio.setter
def _minimum_aspect_ratio(self, value: typing.Optional[float]) -> None:
self.__minimum_aspect_ratio = value
def with_minimum_aspect_ratio(self, aspect_ratio: typing.Optional[float]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._minimum_aspect_ratio = aspect_ratio
return sizing
@property
def _maximum_width(self) -> typing.Optional[typing.Union[int, float]]:
return self.__maximum_width
@_maximum_width.setter
def _maximum_width(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__maximum_width = value
def with_maximum_width(self, width: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._maximum_width = width
return sizing
@property
def _maximum_height(self) -> typing.Optional[typing.Union[int, float]]:
return self.__maximum_height
@_maximum_height.setter
def _maximum_height(self, value: typing.Optional[typing.Union[int, float]]) -> None:
self.__maximum_height = value
def with_maximum_height(self, height: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._maximum_height = height
return sizing
@property
def _maximum_aspect_ratio(self) -> typing.Optional[float]:
return self.__maximum_aspect_ratio
@_maximum_aspect_ratio.setter
def _maximum_aspect_ratio(self, value: typing.Optional[float]) -> None:
self.__maximum_aspect_ratio = value
def with_maximum_aspect_ratio(self, aspect_ratio: typing.Optional[float]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._maximum_aspect_ratio = aspect_ratio
return sizing
@property
def _collapsible(self) -> bool:
return self.__collapsible
@_collapsible.setter
def _collapsible(self, value: bool) -> None:
self.__collapsible = value
def with_collapsible(self, collapsible: bool) -> Sizing:
sizing = copy.deepcopy(self)
sizing._collapsible = collapsible
return sizing
def _copy_from(self, other: Sizing) -> None:
self.__preferred_width = other.preferred_width
self.__preferred_height = other.preferred_height
self.__preferred_aspect_ratio = other.preferred_aspect_ratio
self.__minimum_width = other.minimum_width
self.__minimum_height = other.minimum_height
self.__minimum_aspect_ratio = other.minimum_aspect_ratio
self.__maximum_width = other.maximum_width
self.__maximum_height = other.maximum_height
self.__maximum_aspect_ratio = other.maximum_aspect_ratio
self.__collapsible = other.collapsible
def _clear_height_constraint(self) -> None:
self.__preferred_height = None
self.__minimum_height = None
self.__maximum_height = None
def with_unconstrained_height(self) -> Sizing:
sizing = copy.deepcopy(self)
sizing._clear_height_constraint()
return sizing
def _clear_width_constraint(self) -> None:
self.__preferred_width = None
self.__minimum_width = None
self.__maximum_width = None
def with_unconstrained_width(self) -> Sizing:
sizing = copy.deepcopy(self)
sizing._clear_width_constraint()
return sizing
def _set_fixed_height(self, height: typing.Optional[typing.Union[int, float]]) -> None:
self.__preferred_height = height
self.__minimum_height = height
self.__maximum_height = height
def with_fixed_height(self, height: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._set_fixed_height(height)
return sizing
def _set_fixed_width(self, width: typing.Optional[typing.Union[int, float]]) -> None:
self.__preferred_width = width
self.__minimum_width = width
self.__maximum_width = width
def with_fixed_width(self, width: typing.Optional[typing.Union[int, float]]) -> Sizing:
sizing = copy.deepcopy(self)
sizing._set_fixed_width(width)
return sizing
def _set_fixed_size(self, size: Geometry.IntSizeTuple) -> None:
size_ = Geometry.IntSize.make(size)
self._set_fixed_height(size_.height)
self._set_fixed_width(size_.width)
def with_fixed_size(self, size: Geometry.IntSizeTuple) -> Sizing:
sizing = copy.deepcopy(self)
sizing._set_fixed_size(size)
return sizing
def get_width_constraint(self, width: typing.Union[int, float]) -> Constraint:
""" Create and return a new width Constraint object made from this sizing object. """
constraint = Constraint()
if self.minimum_width is not None:
if isinstance(self.minimum_width, float) and self.minimum_width <= 1.0:
constraint.minimum = int(width * self.minimum_width)
else:
constraint.minimum = int(self.minimum_width)
else:
constraint.minimum = 0
if self.maximum_width is not None:
if isinstance(self.maximum_width, float) and self.maximum_width <= 1.0:
constraint.maximum = int(width * self.maximum_width)
else:
constraint.maximum = int(self.maximum_width)
else:
constraint.maximum = MAX_VALUE
if self.preferred_width is not None:
if isinstance(self.preferred_width, float) and self.preferred_width <= 1.0:
constraint.preferred = int(width * self.preferred_width)
else:
constraint.preferred = int(self.preferred_width)
else:
constraint.preferred = None
return constraint
def get_height_constraint(self, height: typing.Union[int, float]) -> Constraint:
""" Create and return a new height Constraint object made from this sizing object. """
constraint = Constraint()
if self.minimum_height is not None:
if isinstance(self.minimum_height, float) and self.minimum_height <= 1.0:
constraint.minimum = int(height * self.minimum_height)
else:
constraint.minimum = int(self.minimum_height)
else:
constraint.minimum = 0
if self.maximum_height is not None:
if isinstance(self.maximum_height, float) and self.maximum_height <= 1.0:
constraint.maximum = int(height * self.maximum_height)
else:
constraint.maximum = int(self.maximum_height)
else:
constraint.maximum = MAX_VALUE
if self.preferred_height is not None:
if isinstance(self.preferred_height, float) and self.preferred_height <= 1.0:
constraint.preferred = int(height * self.preferred_height)
else:
constraint.preferred = int(self.preferred_height)
else:
constraint.preferred = None
return constraint
def get_unrestrained_width(self, maximum_width: typing.Union[int, float]) -> int:
if self.maximum_width is not None:
if isinstance(self.maximum_width, float) and self.maximum_width < 1.0:
return int(self.maximum_width * maximum_width)
return int(min(self.maximum_width, maximum_width))
return int(maximum_width)
def get_unrestrained_height(self, maximum_height: typing.Union[int, float]) -> int:
if self.maximum_height is not None:
if isinstance(self.maximum_height, float) and self.maximum_height < 1.0:
return int(self.maximum_height * maximum_height)
return int(min(self.maximum_height, maximum_height))
return int(maximum_height)
class KeyboardModifiers:
def __init__(self, shift: bool = False, control: bool = False, alt: bool = False, meta: bool = False, keypad: bool = False) -> None:
self.__shift = shift
self.__control = control
self.__alt = alt
self.__meta = meta
self.__keypad = keypad
@property
def any_modifier(self) -> bool:
return self.shift or self.control or self.alt or self.meta
# shift
@property
def shift(self) -> bool:
return self.__shift
@property
def only_shift(self) -> bool:
return self.__shift and not self.__control and not self.__alt and not self.__meta
# control (command key on mac)
@property
def control(self) -> bool:
return self.__control
@property
def only_control(self) -> bool:
return self.__control and not self.__shift and not self.__alt and not self.__meta
# alt (option key on mac)
@property
def alt(self) -> bool:
return self.__alt
@property
def only_alt(self) -> bool:
return self.__alt and not self.__control and not self.__shift and not self.__meta
# option (alt key on windows)
@property
def option(self) -> bool:
return self.__alt
@property
def only_option(self) -> bool:
return self.__alt and not self.__control and not self.__shift and not self.__meta
# meta (control key on mac)
@property
def meta(self) -> bool:
return self.__meta
@property
def only_meta(self) -> bool:
return self.__meta and not self.__control and not self.__shift and not self.__alt
# keypad
@property
def keypad(self) -> bool:
return self.__keypad
@property
def only_keypad(self) -> bool:
return self.__keypad
@property
def native_control(self) -> bool:
return self.control
def visible_canvas_item(canvas_item: typing.Optional[AbstractCanvasItem]) -> typing.Optional[AbstractCanvasItem]:
return canvas_item if canvas_item and canvas_item.visible else None
class AbstractCanvasItem:
"""An item drawn on a canvas supporting mouse and keyboard actions.
CONTAINERS
A canvas item should be added to a container. It is an error to add a particular canvas item to more than one
container. The container in which the canvas item resides is accessible via the ``container`` property.
LAYOUT
The container is responsible for layout and will set the canvas bounds of this canvas item as a function of the
container layout algorithm and this canvas item's sizing information.
The ``sizing`` property is the intrinsic sizing constraints of this canvas item.
The ``layout_sizing`` property is a the sizing information used by the container layout algorithm.
If this canvas item is non-composite, then ``layout_sizing`` will be identical to this canvas item's ``sizing``.
However, if this canvas item is composite, then ``layout_sizing`` is determined by the layout algorithm and then
additionally constrained by this canvas item's ``sizing``. In this way, by leaving ``sizing`` unconstrained, the
layout can determine the sizing of this canvas item. Alternatively, by adding a constraint to ``sizing``, the layout
can be constrained. This corresponds to the contents determining the size of the container vs. the container
determining the size of the layout.
Unpredictable layout may occur if an unconstrained item is placed into an unrestrained container. Be sure to
either restrain (implicitly or explicitly) the content or the container.
Layout occurs when the structure of the item hierarchy changes, such as when a new canvas item is added to a
container. Clients can also call ``refresh_layout`` explicitly as needed.
UPDATES AND DRAWING
Update is the mechanism by which the container is notified that one of its child canvas items needs updating.
The update message will ultimately end up at the root container at which point the root container will trigger a
repaint on a thread.
Subclasses should override _repaint or _repaint_visible to implement drawing. Drawing should take place within the
canvas bounds.
"""
def __init__(self) -> None:
super().__init__()
self.__container: typing.Optional[CanvasItemComposition] = None
self.__canvas_size: typing.Optional[Geometry.IntSize] = None
self.__canvas_origin: typing.Optional[Geometry.IntPoint] = None
self.__sizing = Sizing()
self.__focused = False
self.__focusable = False
self.wants_mouse_events = False
self.wants_drag_events = False
self.on_focus_changed: typing.Optional[typing.Callable[[bool], None]] = None
self.on_layout_updated: typing.Optional[typing.Callable[[typing.Optional[Geometry.IntPoint], typing.Optional[Geometry.IntSize], bool], None]] = None
self.__cursor_shape: typing.Optional[str] = None
self.__tool_tip: typing.Optional[str] = None
self.__background_color: typing.Optional[str] = None
self.__border_color: typing.Optional[str] = None
self.__visible = True
self._has_layout = False
self.__thread = threading.current_thread()
self.__pending_update = True
self.__repaint_drawing_context: typing.Optional[DrawingContext.DrawingContext] = None
# stats for testing
self._update_count = 0
self._repaint_count = 0
self.is_root_opaque = False
def close(self) -> None:
""" Close the canvas object. """
if threading.current_thread() != self.__thread:
warnings.warn('CanvasItem closed on different thread')
import traceback
traceback.print_stack()
self.__container = None
self.on_focus_changed = None
self.on_layout_updated = None
@property
def is_ui_interaction_active(self) -> bool:
root_container = self.root_container
if root_container:
return root_container.is_ui_interaction_active
return False
@property
def canvas_size(self) -> typing.Optional[Geometry.IntSize]:
""" Returns size of canvas_rect (external coordinates). """
return self.__canvas_size
def _set_canvas_size(self, canvas_size: typing.Optional[Geometry.IntSizeTuple]) -> None:
canvas_size_ = Geometry.IntSize.make(canvas_size) if canvas_size is not None else None
if ((self.__canvas_size is None) != (canvas_size_ is None)) or (self.__canvas_size != canvas_size_):
self.__canvas_size = canvas_size_
self.update()
@property
def canvas_origin(self) -> typing.Optional[Geometry.IntPoint]:
""" Returns origin of canvas_rect (external coordinates). """
return self.__canvas_origin
def _set_canvas_origin(self, canvas_origin: typing.Optional[Geometry.IntPointTuple]) -> None:
canvas_origin_ = Geometry.IntPoint.make(canvas_origin) if canvas_origin is not None else None
if ((self.__canvas_origin is None) != (canvas_origin_ is None)) or (self.__canvas_origin != canvas_origin_):
self.__canvas_origin = canvas_origin_
self.update()
def _begin_container_layout_changed(self) -> None:
pass
def _finish_container_layout_changed(self) -> None:
pass
def _container_layout_changed(self) -> None:
pass
@property
def canvas_widget(self) -> typing.Optional[UserInterface.CanvasWidget]:
return self.container.canvas_widget if self.container else None
@property
def canvas_bounds(self) -> typing.Optional[Geometry.IntRect]:
""" Returns a rect of the internal coordinates. """
if self.canvas_size is not None:
return Geometry.IntRect((0, 0), self.canvas_size)
return None
@property
def canvas_rect(self) -> typing.Optional[Geometry.IntRect]:
""" Returns a rect of the external coordinates. """
if self.canvas_origin is not None and self.canvas_size is not None:
return Geometry.IntRect(self.canvas_origin, self.canvas_size)
return None
@property
def container(self) -> typing.Optional[CanvasItemComposition]:
""" Return the container, if any. """
return self.__container
@container.setter
def container(self, container: typing.Optional[CanvasItemComposition]) -> None:
""" Set container. """
assert self.__container is None or container is None
self.__container = container
@property
def layer_container(self) -> typing.Optional[CanvasItemComposition]:
""" Return the root container, if any. """
return self.__container.layer_container if self.__container else None
@property
def root_container(self) -> typing.Optional[RootCanvasItem]:
""" Return the root container, if any. """
return self.__container.root_container if self.__container else None
@property
def background_color(self) -> typing.Optional[str]:
return self.__background_color
@background_color.setter
def background_color(self, background_color: typing.Optional[str]) -> None:
self.__background_color = background_color
self.update()
@property
def border_color(self) -> typing.Optional[str]:
return self.__border_color
@border_color.setter
def border_color(self, border_color: typing.Optional[str]) -> None:
self.__border_color = border_color
self.update()
@property
def focusable(self) -> bool:
""" Return whether the canvas item is focusable. """
return self.__focusable
@focusable.setter
def focusable(self, focusable: bool) -> None:
"""
Set whether the canvas item is focusable.
If this canvas item is focusable and contains other canvas items, they should
not be focusable.
"""
self.__focusable = focusable
@property
def focused(self) -> bool:
""" Return whether the canvas item is focused. """
return self.__focused
def _set_focused(self, focused: bool) -> None:
""" Set whether the canvas item is focused. Only called from container. """
if focused != self.__focused:
self.__focused = focused
self.update()
if callable(self.on_focus_changed):
self.on_focus_changed(focused)
def _request_focus(self, p: typing.Optional[Geometry.IntPoint] = None,
modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
# protected method
if not self.focused:
root_container = self.root_container
if root_container:
root_container._request_root_focus(self, p, modifiers)
def request_focus(self) -> None:
"""Request focus.
Subclasses should not override. Override _request_focus instead."""
self._request_focus()
def adjust_secondary_focus(self, p: Geometry.IntPoint, modifiers: UserInterface.KeyboardModifiers) -> None:
"""Adjust secondary focus. Default does nothing."""
pass
def clear_focus(self) -> None:
""" Relinquish focus. """
if self.focused:
root_container = self.root_container
if root_container:
root_container._set_focused_item(None)
def drag(self, mime_data: UserInterface.MimeData, thumbnail: typing.Optional[DrawingContext.RGBA32Type] = None,
hot_spot_x: typing.Optional[int] = None, hot_spot_y: typing.Optional[int] = None,
drag_finished_fn: typing.Optional[typing.Callable[[str], None]] = None) -> None:
root_container = self.root_container
if root_container:
root_container.drag(mime_data, thumbnail, hot_spot_x, hot_spot_y, drag_finished_fn)
def show_tool_tip_text(self, text: str, gx: int, gy: int) -> None:
root_container = self.root_container
if root_container:
root_container.show_tool_tip_text(text, gx, gy)
@property
def tool_tip(self) -> typing.Optional[str]:
return self.__tool_tip
@tool_tip.setter
def tool_tip(self, value: typing.Optional[str]) -> None:
self.__tool_tip = value
@property
def cursor_shape(self) -> typing.Optional[str]:
return self.__cursor_shape
@cursor_shape.setter
def cursor_shape(self, cursor_shape: typing.Optional[str]) -> None:
self.__cursor_shape = cursor_shape
root_container = self.root_container
if root_container:
root_container._cursor_shape_changed(self)
def map_to_canvas_item(self, p: Geometry.IntPointTuple, canvas_item: AbstractCanvasItem) -> Geometry.IntPoint:
""" Map the point to the local coordinates of canvas_item. """
o1 = self.map_to_root_container(Geometry.IntPoint())
o2 = canvas_item.map_to_root_container(Geometry.IntPoint())
return Geometry.IntPoint.make(p) + o1 - o2
def map_to_root_container(self, p: Geometry.IntPoint) -> Geometry.IntPoint:
""" Map the point to the coordinates of the root container. """
canvas_item: typing.Optional[AbstractCanvasItem] = self
while canvas_item: # handle case where last canvas item was root
canvas_item_origin = canvas_item.canvas_origin
if canvas_item_origin is not None: # handle case where canvas item is not root but has no parent
p = p + canvas_item_origin
canvas_item = canvas_item.container
else:
break
return p
def map_to_container(self, p: Geometry.IntPoint) -> Geometry.IntPoint:
""" Map the point to the coordinates of the container. """
canvas_origin = self.canvas_origin
assert canvas_origin
return p + canvas_origin
def map_to_global(self, p: Geometry.IntPoint) -> Geometry.IntPoint:
root_container = self.root_container
assert root_container
return root_container.map_to_global(self.map_to_root_container(p))
def _inserted(self, container: typing.Optional[AbstractCanvasItem]) -> None:
"""Subclasses may override to know when inserted into a container."""
pass
def _removed(self, container: typing.Optional[AbstractCanvasItem]) -> None:
"""Subclasses may override to know when removed from a container."""
pass
def prepare_render(self) -> None:
"""Subclasses may override to prepare for layout and repaint. DEPRECATED see _prepare_render."""
pass
def _prepare_render(self) -> None:
"""Subclasses may override to prepare for layout and repaint."""
self._prepare_render_self()
def _prepare_render_self(self) -> None:
"""Subclasses may override to prepare for layout and repaint."""
pass
def update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""Update the layout with a new canvas_origin and canvas_size.
canvas_origin and canvas_size are the external bounds.
This method will be called on the render thread.
Subclasses can override this method to take action when the size of the canvas item changes, but they should
typically call super to do the actual layout.
The on_layout_updated callable will be called with the new canvas_origin and canvas_size.
The canvas_origin and canvas_size properties are valid after calling this method and _has_layout is True.
"""
self._update_self_layout(canvas_origin, canvas_size, immediate=immediate)
self._has_layout = self.canvas_origin is not None and self.canvas_size is not None
def _update_self_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""Update the canvas origin and size and call notification methods."""
self._set_canvas_origin(canvas_origin)
self._set_canvas_size(canvas_size)
if callable(self.on_layout_updated):
self.on_layout_updated(self.canvas_origin, self.canvas_size, immediate)
self._has_layout = self.canvas_origin is not None and self.canvas_size is not None
def refresh_layout_immediate(self) -> None:
"""Immediate re-layout the item."""
self.refresh_layout()
self.update_layout(self.canvas_origin, self.canvas_size, immediate=True)
def refresh_layout(self) -> None:
"""Invalidate the layout and trigger layout.
Items get layout from their container, so the default implementation asks the container to layout.
"""
if self.__container:
self.__container._needs_layout(self)
def _needs_layout(self, canvas_item: AbstractCanvasItem) -> None:
# pass the needs layout up the chain.
if self.__container:
self.__container._needs_layout(canvas_item)
@property
def visible(self) -> bool:
return self.__visible
@visible.setter
def visible(self, value: bool) -> None:
if self.__visible != value:
self.__visible = value
if self.__container:
self.__container.refresh_layout()
@property
def sizing(self) -> Sizing:
"""
Return sizing information for this canvas item.
The sizing property is read only, but the object itself
can be modified.
"""
return copy.deepcopy(self.__sizing)
@property
def layout_sizing(self) -> Sizing:
"""
Return layout sizing information for this canvas item.
The layout sizing is read only and cannot be modified. It is
used from the layout engine.
"""
return copy.deepcopy(self.sizing)
def copy_sizing(self) -> Sizing:
return self.sizing
def update_sizing(self, new_sizing: Sizing) -> None:
if new_sizing != self.sizing:
self.__sizing._copy_from(new_sizing)
self.refresh_layout()
def update(self) -> None:
"""Mark canvas item as needing a display update.
The canvas item will be repainted by the root canvas item.
"""
self._update_with_items()
def _update_with_items(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> None:
self._update_count += 1
self._updated(canvas_items)
def _updated(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> None:
# Notify this canvas item that a child has been updated, repaint if needed at next opportunity.
self.__pending_update = True
self._update_container(canvas_items)
def _update_container(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> None:
# if not in the middle of a nested update, and if this canvas item has
# a layout, update the container.
container = self.__container
if container and self._has_layout:
canvas_items = list(canvas_items) if canvas_items else list()
canvas_items.append(self)
container._update_with_items(canvas_items)
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
"""Repaint the canvas item to the drawing context.
Subclasses should override this method to paint.
This method will be called on a thread.
The drawing should take place within the canvas_bounds.
"""
assert self.canvas_size is not None
self._repaint_count += 1
def _repaint_template(self, drawing_context: DrawingContext.DrawingContext, immediate: bool) -> None:
"""A wrapper method for _repaint.
Callers should always call this method instead of _repaint directly. This helps keep the _repaint
implementations simple and easy to understand.
"""
self._repaint(drawing_context)
def _repaint_if_needed(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> None:
# Repaint if no cached version of the last paint is available.
# If no cached drawing context is available, regular _repaint is used to make a new one which is then cached.
# The cached drawing context is typically cleared during the update method.
# Subclasses will typically not need to override this method, except in special cases.
pending_update, self.__pending_update = self.__pending_update, False
if pending_update:
repaint_drawing_context = DrawingContext.DrawingContext()
self._repaint_template(repaint_drawing_context, immediate)
self.__repaint_drawing_context = repaint_drawing_context
if self.__repaint_drawing_context:
drawing_context.add(self.__repaint_drawing_context)
def _repaint_finished(self, drawing_context: DrawingContext.DrawingContext) -> None:
# when the thread finishes the repaint, this method gets called. the normal container update
# has not been called yet since the repaint wasn't finished until now. this method performs
# the container update.
self._update_container()
def repaint_immediate(self, drawing_context: DrawingContext.DrawingContext, canvas_size: Geometry.IntSize) -> None:
self.update_layout(Geometry.IntPoint(), canvas_size)
self._repaint_template(drawing_context, immediate=True)
def _draw_background(self, drawing_context: DrawingContext.DrawingContext) -> None:
"""Draw the background. Subclasses can call this."""
background_color = self.__background_color
if background_color:
rect = self.canvas_bounds
if rect:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(rect.left, rect.top, rect.width, rect.height)
drawing_context.fill_style = background_color
drawing_context.fill()
def _draw_border(self, drawing_context: DrawingContext.DrawingContext) -> None:
"""Draw the border. Subclasses can call this."""
border_color = self.__border_color
if border_color:
rect = self.canvas_bounds
if rect:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(rect.left, rect.top, rect.width, rect.height)
drawing_context.stroke_style = border_color
drawing_context.stroke()
def _repaint_visible(self, drawing_context: DrawingContext.DrawingContext, visible_rect: Geometry.IntRect) -> None:
"""
Repaint the canvas item to the drawing context within the visible area.
Subclasses can override this method to paint.
This method will be called on a thread.
The drawing should take place within the canvas_bounds.
The default implementation calls _repaint(drawing_context)
"""
self._repaint_if_needed(drawing_context)
def canvas_item_at_point(self, x: int, y: int) -> typing.Optional[AbstractCanvasItem]:
canvas_items = self.canvas_items_at_point(x, y)
return canvas_items[0] if len(canvas_items) > 0 else None
def canvas_items_at_point(self, x: int, y: int) -> typing.List[AbstractCanvasItem]:
""" Return the canvas item at the point. May return None. """
canvas_bounds = self.canvas_bounds
if canvas_bounds and canvas_bounds.contains_point(Geometry.IntPoint(x=x, y=y)):
return [self]
return []
def get_root_opaque_canvas_items(self) -> typing.List[AbstractCanvasItem]:
return [self] if self.is_root_opaque else list()
def mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
""" Handle a mouse click within this canvas item. Return True if handled. """
return False
def mouse_double_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
""" Handle a mouse double click within this canvas item. Return True if handled. """
return False
def mouse_entered(self) -> bool:
""" Handle a mouse entering this canvas item. Return True if handled. """
return False
def mouse_exited(self) -> bool:
""" Handle a mouse exiting this canvas item. Return True if handled. """
return False
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
""" Handle a mouse press within this canvas item. Return True if handled. """
return False
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
""" Handle a mouse release within this canvas item. Return True if handled. """
return False
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
""" Handle a mouse move within this canvas item. Return True if handled. """
return False
def wheel_changed(self, x: int, y: int, dx: int, dy: int, is_horizontal: bool) -> bool:
""" Handle a mouse wheel changed within this canvas item. Return True if handled. """
return False
def context_menu_event(self, x: int, y: int, gx: int, gy: int) -> bool:
""" Handle a context menu event. x, y are local coordinates. gx, gy are global coordinates. """
return False
def key_pressed(self, key: UserInterface.Key) -> bool:
""" Handle a key pressed while this canvas item has focus. Return True if handled. """
return False
def key_released(self, key: UserInterface.Key) -> bool:
""" Handle a key released while this canvas item has focus. Return True if handled. """
return False
def wants_drag_event(self, mime_data: UserInterface.MimeData, x: int, y: int) -> bool:
""" Determines if the item should handle certain mime_data at a certain point. Return True if handled."""
return self.wants_drag_events
def drag_enter(self, mime_data: UserInterface.MimeData) -> str:
""" Handle a drag event entering this canvas item. Return action if handled. """
return "ignore"
def drag_leave(self) -> str:
""" Handle a drag event leaving this canvas item. Return action if handled. """
return "ignore"
def drag_move(self, mime_data: UserInterface.MimeData, x: int, y: int) -> str:
""" Handle a drag event moving within this canvas item. Return action if handled. """
return "ignore"
def drop(self, mime_data: UserInterface.MimeData, x: int, y: int) -> str:
""" Handle a drop event in this canvas item. Return action if handled. """
return "ignore"
def handle_tool_tip(self, x: int, y: int, gx: int, gy: int) -> bool:
return False
def pan_gesture(self, dx: int, dy: int) -> bool:
""" Handle a pan gesture in this canvas item. Return action if handled. """
return False
def _dispatch_any(self, method: str, *args: typing.Any, **kwargs: typing.Any) -> bool:
if hasattr(self, method):
return typing.cast(bool, getattr(self, method)(*args, **kwargs))
return False
def _can_dispatch_any(self, method: str) -> bool:
return hasattr(self, method)
def _get_menu_item_state(self, command_id: str) -> typing.Optional[UserInterface.MenuItemState]:
handle_method = "handle_" + command_id
menu_item_state_method = "get_" + command_id + "_menu_item_state"
if hasattr(self, menu_item_state_method):
menu_item_state = getattr(self, menu_item_state_method)()
if menu_item_state:
return typing.cast(UserInterface.MenuItemState, menu_item_state)
if hasattr(self, handle_method):
return UserInterface.MenuItemState(title=None, enabled=True, checked=False)
return None
def simulate_click(self, p: Geometry.IntPointTuple, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
modifiers_ = modifiers or typing.cast("UserInterface.KeyboardModifiers", KeyboardModifiers())
self.mouse_pressed(p[1], p[0], modifiers_)
self.mouse_released(p[1], p[0], modifiers_)
def simulate_drag(self, p1: Geometry.IntPointTuple, p2: Geometry.IntPointTuple, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
modifiers_ = modifiers or typing.cast("UserInterface.KeyboardModifiers", KeyboardModifiers())
self.mouse_pressed(p1[1], p1[0], modifiers_)
self.mouse_position_changed(p1[1], p1[0], modifiers_)
midpoint = Geometry.midpoint(Geometry.IntPoint.make(p1).to_float_point(), Geometry.IntPoint.make(p2).to_float_point())
self.mouse_position_changed(round(midpoint[1]), round(midpoint[0]), modifiers_)
self.mouse_position_changed(p2[1], p2[0], modifiers_)
self.mouse_released(p2[1], p2[0], modifiers_)
def simulate_press(self, p: Geometry.IntPointTuple, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
modifiers_ = modifiers or typing.cast("UserInterface.KeyboardModifiers", KeyboardModifiers())
self.mouse_pressed(p[1], p[0], modifiers_)
def simulate_move(self, p: Geometry.IntPointTuple, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
modifiers_ = modifiers or typing.cast("UserInterface.KeyboardModifiers", KeyboardModifiers())
self.mouse_position_changed(p[1], p[0], modifiers_)
def simulate_release(self, p: Geometry.IntPointTuple, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
modifiers_ = modifiers or typing.cast("UserInterface.KeyboardModifiers", KeyboardModifiers())
self.mouse_released(p[1], p[0], modifiers_)
class CanvasItemAbstractLayout:
"""
Layout canvas items within a larger space.
Subclasses must implement layout method.
NOTE: origin=0 is at the top
"""
def __init__(self, margins: typing.Optional[Geometry.Margins] = None, spacing: typing.Optional[int] = None) -> None:
self.margins = margins if margins is not None else Geometry.Margins(0, 0, 0, 0)
self.spacing = spacing if spacing else 0
def calculate_row_layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem]) -> ConstraintResultType:
""" Use constraint_solve to return the positions of canvas items as if they are in a row. """
canvas_item_count = len(canvas_items)
spacing_count = canvas_item_count - 1
content_left = canvas_origin.x + self.margins.left
content_width = canvas_size.width - self.margins.left - self.margins.right - self.spacing * spacing_count
constraints = [canvas_item.layout_sizing.get_width_constraint(content_width) for canvas_item in canvas_items]
return constraint_solve(content_left, content_width, constraints, self.spacing)
def calculate_column_layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem]) -> ConstraintResultType:
""" Use constraint_solve to return the positions of canvas items as if they are in a column. """
canvas_item_count = len(canvas_items)
spacing_count = canvas_item_count - 1
content_top = canvas_origin.y + self.margins.top
content_height = canvas_size.height - self.margins.top - self.margins.bottom - self.spacing * spacing_count
constraints = [canvas_item.layout_sizing.get_height_constraint(content_height) for canvas_item in canvas_items]
return constraint_solve(content_top, content_height, constraints, self.spacing)
def update_canvas_item_layout(self, canvas_item_origin: Geometry.IntPoint, canvas_item_size: Geometry.IntSize,
canvas_item: AbstractCanvasItem, *, immediate: bool = False) -> None:
""" Given a container box, adjust a single canvas item within the box according to aspect_ratio constraints. """
# TODO: Also adjust canvas items for maximums, and positioning
aspect_ratio = canvas_item_size.aspect_ratio
rect = Geometry.IntRect(origin=canvas_item_origin, size=canvas_item_size)
layout_sizing = canvas_item.layout_sizing
if layout_sizing.minimum_aspect_ratio is not None and aspect_ratio < layout_sizing.minimum_aspect_ratio:
rect = Geometry.fit_to_aspect_ratio(rect, layout_sizing.minimum_aspect_ratio).to_int_rect()
elif layout_sizing.maximum_aspect_ratio is not None and aspect_ratio > layout_sizing.maximum_aspect_ratio:
rect = Geometry.fit_to_aspect_ratio(rect, layout_sizing.maximum_aspect_ratio).to_int_rect()
elif layout_sizing.preferred_aspect_ratio is not None:
rect = Geometry.fit_to_aspect_ratio(rect, layout_sizing.preferred_aspect_ratio).to_int_rect()
canvas_item.update_layout(rect.origin, rect.size, immediate=immediate)
def layout_canvas_items(self, x_positions: typing.Sequence[int], y_positions: typing.Sequence[int],
widths: typing.Sequence[int], heights: typing.Sequence[int],
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
""" Set the container boxes for the canvas items using update_canvas_item_layout on the individual items. """
for index, canvas_item in enumerate(canvas_items):
if canvas_item is not None:
canvas_item_origin = Geometry.IntPoint(x=x_positions[index], y=y_positions[index])
canvas_item_size = Geometry.IntSize(width=widths[index], height=heights[index])
self.update_canvas_item_layout(canvas_item_origin, canvas_item_size, canvas_item, immediate=immediate)
def _combine_sizing_property(self, sizing: Sizing, canvas_item_sizing: Sizing, property: str,
combiner: typing.Callable[[typing.Any, typing.Any], typing.Any],
clear_if_missing: bool = False) -> None:
""" Utility method for updating the property of the sizing object using the combiner function and the canvas_item_sizing. """
property = "_" + property
canvas_item_value = getattr(canvas_item_sizing, property)
value = getattr(sizing, property)
if canvas_item_value is not None:
if clear_if_missing:
setattr(sizing, property, combiner(value, canvas_item_value) if value is not None else None)
else:
setattr(sizing, property, combiner(value, canvas_item_value) if value is not None else canvas_item_value)
elif clear_if_missing:
setattr(sizing, property, None)
def _get_overlap_sizing(self, canvas_items: typing.Sequence[typing.Optional[AbstractCanvasItem]]) -> Sizing:
"""
A commonly used sizing method to determine the preferred/min/max assuming everything is stacked/overlapping.
Does not include spacing or margins.
"""
sizing = Sizing()
sizing._maximum_width = 0
sizing._maximum_height = 0
sizing._preferred_width = 0
sizing._preferred_height = 0
for canvas_item in canvas_items:
if canvas_item is not None:
canvas_item_sizing = canvas_item.layout_sizing
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_width", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_height", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_width", max) # if any minimum_width is present, take the maximum one
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_height", max)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_width", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_height", max, True)
if sizing.maximum_width == 0 or len(canvas_items) == 0:
sizing._maximum_width = None
if sizing.maximum_height == 0 or len(canvas_items) == 0:
sizing._maximum_height = None
if sizing.preferred_width == 0 or len(canvas_items) == 0:
sizing._preferred_width = None
if sizing.preferred_height == 0 or len(canvas_items) == 0:
sizing._preferred_height = None
return sizing
def _get_column_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem])-> Sizing:
"""
A commonly used sizing method to determine the preferred/min/max assuming everything is a column.
Does not include spacing or margins.
"""
sizing = Sizing()
sizing._maximum_width = 0
sizing._maximum_height = 0
sizing._preferred_width = 0
for canvas_item in canvas_items:
if canvas_item is not None:
canvas_item_sizing = canvas_item.layout_sizing
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_width", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_height", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_width", max)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_height", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_width", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_height", operator.add, True)
if sizing.maximum_width == 0 or len(canvas_items) == 0:
sizing._maximum_width = None
if sizing.preferred_width == 0 or len(canvas_items) == 0:
sizing._preferred_width = None
if sizing.maximum_height == MAX_VALUE or len(canvas_items) == 0:
sizing._maximum_height = None
return sizing
def _get_row_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
"""
A commonly used sizing method to determine the preferred/min/max assuming everything is a column.
Does not include spacing or margins.
"""
sizing = Sizing()
sizing._maximum_width = 0
sizing._maximum_height = 0
sizing._preferred_height = 0
for canvas_item in canvas_items:
if canvas_item is not None:
canvas_item_sizing = canvas_item.layout_sizing
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_width", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_height", max, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_width", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_height", max)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_width", operator.add, True)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_height", max, True)
if sizing.maximum_width == MAX_VALUE or len(canvas_items) == 0:
sizing._maximum_width = None
if sizing.maximum_height == 0 or len(canvas_items) == 0:
sizing._maximum_height = None
if sizing.preferred_height == 0 or len(canvas_items) == 0:
sizing._preferred_height = None
return sizing
def _adjust_sizing(self, sizing: Sizing, x_spacing: int, y_spacing: int) -> None:
""" Adjust the sizing object by adding margins and spacing. Spacing is total, not per item. """
if sizing._minimum_width is not None:
sizing._minimum_width += self.margins.left + self.margins.right + x_spacing
if sizing._maximum_width is not None:
sizing._maximum_width += self.margins.left + self.margins.right + x_spacing
if sizing._preferred_width is not None:
sizing._preferred_width += self.margins.left + self.margins.right + x_spacing
if sizing._minimum_height is not None:
sizing._minimum_height += self.margins.top + self.margins.bottom + y_spacing
if sizing._maximum_height is not None:
sizing._maximum_height += self.margins.top + self.margins.bottom + y_spacing
if sizing._preferred_height is not None:
sizing._preferred_height += self.margins.top + self.margins.bottom + y_spacing
def add_canvas_item(self, canvas_item: AbstractCanvasItem, pos: typing.Optional[Geometry.IntPoint]) -> None:
"""
Subclasses may override this method to get position specific information when a canvas item is added to
the layout.
"""
pass
def remove_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
"""
Subclasses may override this method to clean up position specific information when a canvas item is removed
from the layout.
"""
pass
def layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
""" Subclasses must override this method to layout canvas item. """
raise NotImplementedError()
def get_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
"""
Return the sizing object for this layout. Includes spacing and margins.
Subclasses must implement.
"""
raise NotImplementedError()
def create_spacing_item(self, spacing: int) -> AbstractCanvasItem:
raise NotImplementedError()
def create_stretch_item(self) -> AbstractCanvasItem:
raise NotImplementedError()
class CanvasItemLayout(CanvasItemAbstractLayout):
"""
Default layout which overlays all items on one another.
Pass margins.
"""
def __init__(self, margins: typing.Optional[Geometry.Margins] = None, spacing: typing.Optional[int] = None) -> None:
super().__init__(margins, spacing)
def layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
for canvas_item in canvas_items:
self.update_canvas_item_layout(canvas_origin, canvas_size, canvas_item, immediate=immediate)
def get_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
sizing = self._get_overlap_sizing(canvas_items)
self._adjust_sizing(sizing, 0, 0)
return sizing
def create_spacing_item(self, spacing: int) -> AbstractCanvasItem:
raise NotImplementedError()
def create_stretch_item(self) -> AbstractCanvasItem:
raise NotImplementedError()
class CanvasItemColumnLayout(CanvasItemAbstractLayout):
"""
Layout items in a column.
Pass margins and spacing.
"""
def __init__(self, margins: typing.Optional[Geometry.Margins] = None, spacing: typing.Optional[int] = None,
alignment: typing.Optional[str] = None) -> None:
super().__init__(margins, spacing)
self.alignment = alignment
def layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
# calculate the vertical placement
y_positions, heights = self.calculate_column_layout(canvas_origin, canvas_size, canvas_items)
widths = [canvas_item.layout_sizing.get_unrestrained_width(canvas_size.width - self.margins.left - self.margins.right) for canvas_item in canvas_items]
available_width = canvas_size.width - self.margins.left - self.margins.right
if self.alignment == "start":
x_positions = [canvas_origin.x + self.margins.left for width in widths]
elif self.alignment == "end":
x_positions = [canvas_origin.x + self.margins.left + (available_width - width) for width in widths]
else:
x_positions = [round(canvas_origin.x + self.margins.left + (available_width - width) * 0.5) for width in widths]
self.layout_canvas_items(x_positions, y_positions, widths, heights, canvas_items, immediate=immediate)
def get_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
sizing = self._get_column_sizing(canvas_items)
self._adjust_sizing(sizing, 0, self.spacing * (len(canvas_items) - 1))
return sizing
def create_spacing_item(self, spacing: int) -> AbstractCanvasItem:
spacing_item = EmptyCanvasItem()
spacing_item.update_sizing(spacing_item.sizing.with_fixed_height(spacing).with_fixed_width(0))
return spacing_item
def create_stretch_item(self) -> AbstractCanvasItem:
spacing_item = EmptyCanvasItem()
spacing_item.update_sizing(spacing_item.sizing.with_fixed_width(0))
return spacing_item
class CanvasItemRowLayout(CanvasItemAbstractLayout):
"""
Layout items in a row.
Pass margins and spacing.
"""
def __init__(self, margins: typing.Optional[Geometry.Margins] = None, spacing: typing.Optional[int] = None,
alignment: typing.Optional[str] = None) -> None:
super().__init__(margins, spacing)
self.alignment = alignment
def layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
# calculate the vertical placement
x_positions, widths = self.calculate_row_layout(canvas_origin, canvas_size, canvas_items)
heights = [canvas_item.layout_sizing.get_unrestrained_height(canvas_size.height - self.margins.top - self.margins.bottom) for canvas_item in canvas_items]
available_height = canvas_size.height - self.margins.top - self.margins.bottom
if self.alignment == "start":
y_positions = [canvas_origin.y + self.margins.top for width in widths]
elif self.alignment == "end":
y_positions = [canvas_origin.y + self.margins.top + (available_height - height) for height in heights]
else:
y_positions = [round(canvas_origin.y + self.margins.top + (available_height - height) // 2) for height in heights]
self.layout_canvas_items(x_positions, y_positions, widths, heights, canvas_items, immediate=immediate)
def get_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
sizing = self._get_row_sizing(canvas_items)
self._adjust_sizing(sizing, self.spacing * (len(canvas_items) - 1), 0)
return sizing
def create_spacing_item(self, spacing: int) -> AbstractCanvasItem:
spacing_item = EmptyCanvasItem()
spacing_item.update_sizing(spacing_item.sizing.with_fixed_width(spacing).with_fixed_height(0))
return spacing_item
def create_stretch_item(self) -> AbstractCanvasItem:
spacing_item = EmptyCanvasItem()
spacing_item.update_sizing(spacing_item.sizing.with_fixed_height(0))
return spacing_item
class CanvasItemGridLayout(CanvasItemAbstractLayout):
"""
Layout items in a grid specified by size (IntSize).
Pass margins and spacing.
Canvas items must be added to container canvas item using
add_canvas_item with the position (IntPoint) passed as pos
parameter.
"""
def __init__(self, size: Geometry.IntSize, margins: typing.Optional[Geometry.Margins] = None, spacing: typing.Optional[int] = None) -> None:
super().__init__(margins, spacing)
assert size.width > 0 and size.height > 0
self.__size = size
self.__columns: typing.List[typing.List[typing.Optional[AbstractCanvasItem]]] = [[None for _ in range(self.__size.height)] for _ in range(self.__size.width)]
def add_canvas_item(self, canvas_item: AbstractCanvasItem, pos: typing.Optional[Geometry.IntPoint]) -> None:
assert pos
assert pos.x >= 0 and pos.x < self.__size.width
assert pos.y >= 0 and pos.y < self.__size.height
self.__columns[pos.x][pos.y] = canvas_item
def remove_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
canvas_item.close()
for x in range(self.__size.width):
for y in range(self.__size.height):
if self.__columns[x][y] == canvas_item:
self.__columns[x][y] = None
def layout(self, canvas_origin: Geometry.IntPoint, canvas_size: Geometry.IntSize,
canvas_items: typing.Sequence[AbstractCanvasItem], *, immediate: bool = False) -> None:
# calculate the horizontal placement
# calculate the sizing (x, width) for each column
canvas_item_count = self.__size.width
spacing_count = canvas_item_count - 1
content_left = canvas_origin.x + self.margins.left
content_width = canvas_size.width - self.margins.left - self.margins.right - self.spacing * spacing_count
constraints = list()
for x in range(self.__size.width):
sizing = self._get_overlap_sizing([visible_canvas_item(self.__columns[x][y]) for y in range(self.__size.height)])
constraints.append(sizing.get_width_constraint(content_width))
# run the layout engine
x_positions, widths = constraint_solve(content_left, content_width, constraints, self.spacing)
# calculate the vertical placement
# calculate the sizing (y, height) for each row
canvas_item_count = self.__size.height
spacing_count = canvas_item_count - 1
content_top = canvas_origin.y + self.margins.top
content_height = canvas_size.height - self.margins.top - self.margins.bottom - self.spacing * spacing_count
constraints = list()
for y in range(self.__size.height):
sizing = self._get_overlap_sizing([visible_canvas_item(self.__columns[x][y]) for x in range(self.__size.width)])
constraints.append(sizing.get_height_constraint(content_height))
# run the layout engine
y_positions, heights = constraint_solve(content_top, content_height, constraints, self.spacing)
# do the layout
combined_xs = list()
combined_ys = list()
combined_widths = list()
combined_heights = list()
combined_canvas_items = list()
for x in range(self.__size.width):
for y in range(self.__size.height):
canvas_item = visible_canvas_item(self.__columns[x][y])
if canvas_item is not None:
combined_xs.append(x_positions[x])
combined_ys.append(y_positions[y])
combined_widths.append(widths[x])
combined_heights.append(heights[y])
combined_canvas_items.append(canvas_item)
self.layout_canvas_items(combined_xs, combined_ys, combined_widths, combined_heights, combined_canvas_items, immediate=immediate)
def get_sizing(self, canvas_items: typing.Sequence[AbstractCanvasItem]) -> Sizing:
"""
Calculate the sizing for the grid. Treat columns and rows independently.
Override from abstract layout.
"""
sizing = Sizing().with_maximum_width(0).with_maximum_height(0).with_preferred_height(0)
# the widths
canvas_item_sizings = list()
for x in range(self.__size.width):
canvas_items_ = [visible_canvas_item(self.__columns[x][y]) for y in range(self.__size.height)]
canvas_item_sizings.append(self._get_overlap_sizing(canvas_items_))
for canvas_item_sizing in canvas_item_sizings:
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_width", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_width", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_width", operator.add, True)
# the heights
canvas_item_sizings = list()
for y in range(self.__size.height):
canvas_items_ = [visible_canvas_item(self.__columns[x][y]) for x in range(self.__size.width)]
canvas_item_sizings.append(self._get_overlap_sizing(canvas_items_))
for canvas_item_sizing in canvas_item_sizings:
self._combine_sizing_property(sizing, canvas_item_sizing, "preferred_height", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "minimum_height", operator.add)
self._combine_sizing_property(sizing, canvas_item_sizing, "maximum_height", operator.add, True)
if sizing.maximum_width == MAX_VALUE or len(canvas_items_) == 0:
sizing._maximum_width = None
if sizing.maximum_height == MAX_VALUE or len(canvas_items_) == 0:
sizing._maximum_height = None
if sizing.maximum_width == 0 or len(canvas_items_) == 0:
sizing._maximum_width = None
if sizing.preferred_width == 0 or len(canvas_items_) == 0:
sizing._preferred_width = None
if sizing.maximum_height == 0 or len(canvas_items_) == 0:
sizing._maximum_height = None
if sizing.preferred_height == 0 or len(canvas_items_) == 0:
sizing._preferred_height = None
self._adjust_sizing(sizing, self.spacing * (self.__size.width - 1), self.spacing * (self.__size.height - 1))
return sizing
class CompositionLayoutRenderTrait:
"""A trait (a set of methods for extending a class) allow customization of composition layout/rendering.
Since traits aren't supported directly in Python, this works by having associated methods in the
CanvasItemComposition class directly invoke the methods of this or a subclass of this object.
"""
def __init__(self, canvas_item_composition: CanvasItemComposition):
self._canvas_item_composition = canvas_item_composition
def close(self) -> None:
self._stop_render_behavior()
self._canvas_item_composition = None # type: ignore
def _stop_render_behavior(self) -> None:
pass
@property
def _needs_layout_for_testing(self) -> bool:
return False
@property
def is_layer_container(self) -> bool:
return False
def register_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
pass
def unregister_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
pass
def _container_layout_changed(self) -> None:
pass
def _try_update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint], canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> bool:
return False
def _try_needs_layout(self, canvas_item: AbstractCanvasItem) -> bool:
return False
def _try_update_with_items(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> bool:
return False
def _try_updated(self) -> bool:
return False
def _try_repaint_template(self, drawing_context: DrawingContext.DrawingContext, immediate: bool) -> bool:
return False
def _try_repaint_if_needed(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> bool:
return False
def layout_immediate(self, canvas_size: Geometry.IntSize, force: bool=True) -> None:
self._canvas_item_composition._prepare_render()
self._canvas_item_composition._update_self_layout(Geometry.IntPoint(), canvas_size, immediate=True)
self._canvas_item_composition._update_child_layouts(canvas_size, immediate=True)
def _try_repaint_immediate(self, drawing_context: DrawingContext.DrawingContext, canvas_size: Geometry.IntSize) -> bool:
return False
class CanvasItemComposition(AbstractCanvasItem):
"""A composite canvas item comprised of other canvas items.
Optionally includes a layout. Compositions without an explicit layout are stacked to fit this container.
Access child canvas items using canvas_items.
Child canvas items with higher indexes are considered to be foremost.
"""
def __init__(self, layout_render_trait: typing.Optional[CompositionLayoutRenderTrait] = None) -> None:
super().__init__()
self.__canvas_items: typing.List[AbstractCanvasItem] = list()
self.layout: CanvasItemAbstractLayout = CanvasItemLayout()
self.__layout_lock = threading.RLock()
self.__layout_render_trait = layout_render_trait or CompositionLayoutRenderTrait(self)
self.__container_layout_changed_count = 0
def close(self) -> None:
self.__layout_render_trait.close()
self.__layout_render_trait = typing.cast(typing.Any, None)
with self.__layout_lock:
canvas_items = self.canvas_items
for canvas_item in canvas_items:
canvas_item.close()
# this goes after closing; if this goes before closing, threaded canvas items don't get closed properly
# since they notify their container (to cull). to reproduce the bug, create a 1x2, then a 4x3 in the bottom.
# then close several panels and undo. not sure if this is the permanent fix or not.
self.__canvas_items = typing.cast(typing.Any, None)
super().close()
def _stop_render_behavior(self) -> None:
if self.__layout_render_trait:
self.__layout_render_trait._stop_render_behavior()
@property
def _needs_layout_for_testing(self) -> bool:
return self.__layout_render_trait._needs_layout_for_testing
@property
def layer_container(self) -> typing.Optional[CanvasItemComposition]:
return self if self.__layout_render_trait.is_layer_container else super().layer_container
def register_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
"""DEPRECATED see _prepare_render."""
self.__layout_render_trait.register_prepare_canvas_item(canvas_item)
def unregister_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
"""DEPRECATED see _prepare_render."""
self.__layout_render_trait.unregister_prepare_canvas_item(canvas_item)
def _begin_container_layout_changed(self) -> None:
# recursively increase the changed count
self.__container_layout_changed_count += 1
for canvas_item in self.canvas_items:
canvas_item._begin_container_layout_changed()
def _finish_container_layout_changed(self) -> None:
# recursively decrease the changed count
self.__container_layout_changed_count -= 1
for canvas_item in self.canvas_items:
canvas_item._finish_container_layout_changed()
# when the change count is zero, call container layout changed.
# the effect is this will occur once per composite item. only
# layers will actually do something (re-render with new layout).
if self.__container_layout_changed_count == 0:
self._container_layout_changed()
def _redraw_container(self) -> None:
self.__layout_render_trait._container_layout_changed()
def _prepare_render(self) -> None:
for canvas_item in self.__canvas_items:
canvas_item._prepare_render()
super()._prepare_render()
@property
def canvas_items_count(self) -> int:
"""Return count of canvas items managed by this composition."""
return len(self.__canvas_items)
@property
def canvas_items(self) -> typing.List[AbstractCanvasItem]:
""" Return a copy of the canvas items managed by this composition. """
return copy.copy(self.__canvas_items)
@property
def visible_canvas_items(self) -> typing.List[AbstractCanvasItem]:
with self.__layout_lock:
if self.__canvas_items is not None:
return [canvas_item for canvas_item in self.__canvas_items if canvas_item and canvas_item.visible]
return list()
def update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""Override from abstract canvas item."""
if immediate or not self.__layout_render_trait._try_update_layout(canvas_origin, canvas_size, immediate=immediate):
self._update_layout(canvas_origin, canvas_size, immediate=immediate)
def layout_immediate(self, canvas_size: Geometry.IntSize, force: bool = True) -> None:
# useful for tests
self.__layout_render_trait.layout_immediate(canvas_size, force)
def _update_with_items(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> None:
# extra check for behavior during closing
if self.__layout_render_trait and not self.__layout_render_trait._try_update_with_items(canvas_items):
super()._update_with_items(canvas_items)
def _updated(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> None:
# extra check for behavior during closing
if self.__layout_render_trait and not self.__layout_render_trait._try_updated():
super()._updated(canvas_items)
def _update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""Private method, but available to tests."""
with self.__layout_lock:
if self.__canvas_items is not None:
assert canvas_origin is not None
assert canvas_size is not None
canvas_origin_ = Geometry.IntPoint.make(canvas_origin)
canvas_size_ = Geometry.IntSize.make(canvas_size)
self._update_self_layout(canvas_origin_, canvas_size_, immediate=immediate)
self._update_child_layouts(canvas_size_, immediate=immediate)
def _update_child_layouts(self, canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
with self.__layout_lock:
if self.__canvas_items is not None:
assert canvas_size is not None
canvas_size = Geometry.IntSize.make(canvas_size)
self.layout.layout(Geometry.IntPoint(), canvas_size, self.visible_canvas_items, immediate=immediate)
def _needs_layout(self, canvas_item: AbstractCanvasItem) -> None:
# extra check for behavior during closing
if self.__layout_render_trait and not self.__layout_render_trait._try_needs_layout(canvas_item):
super()._needs_layout(canvas_item)
# override sizing information. let layout provide it.
@property
def layout_sizing(self) -> Sizing:
sizing = self.sizing
layout_sizing = self.layout.get_sizing(self.visible_canvas_items)
if sizing.minimum_width is not None:
layout_sizing._minimum_width = sizing.minimum_width
if sizing.maximum_width is not None:
layout_sizing._maximum_width = sizing.maximum_width
if sizing.preferred_width is not None:
layout_sizing._preferred_width = sizing.preferred_width
if sizing.minimum_height is not None:
layout_sizing._minimum_height = sizing.minimum_height
if sizing.maximum_height is not None:
layout_sizing._maximum_height = sizing.maximum_height
if sizing.preferred_height is not None:
layout_sizing._preferred_height = sizing.preferred_height
if sizing.minimum_aspect_ratio is not None:
layout_sizing._minimum_aspect_ratio = sizing.minimum_aspect_ratio
if sizing.maximum_aspect_ratio is not None:
layout_sizing._maximum_aspect_ratio = sizing.maximum_aspect_ratio
if sizing.preferred_aspect_ratio is not None:
layout_sizing._preferred_aspect_ratio = sizing.preferred_aspect_ratio
if len(self.visible_canvas_items) == 0 and sizing.collapsible:
layout_sizing._minimum_width = 0
layout_sizing._preferred_width = 0
layout_sizing._maximum_width = 0
layout_sizing._minimum_height = 0
layout_sizing._preferred_height = 0
layout_sizing._maximum_height = 0
return layout_sizing
def canvas_item_layout_sizing_changed(self, canvas_item: AbstractCanvasItem) -> None:
""" Contained canvas items call this when their layout_sizing changes. """
self.refresh_layout()
def _insert_canvas_item_direct(self, before_index: int, canvas_item: AbstractCanvasItem,
pos: typing.Optional[Geometry.IntPoint] = None) -> None:
self.insert_canvas_item(before_index, canvas_item, pos)
def insert_canvas_item(self, before_index: int, canvas_item: AbstractCanvasItem,
pos: typing.Optional[typing.Any] = None) -> AbstractCanvasItem:
""" Insert canvas item into layout. pos parameter is layout specific. """
self.__canvas_items.insert(before_index, canvas_item)
canvas_item.container = self
canvas_item._inserted(self)
self.layout.add_canvas_item(canvas_item, pos)
self.refresh_layout()
self.update()
return canvas_item
def insert_spacing(self, before_index: int, spacing: int) -> AbstractCanvasItem:
spacing_item = self.layout.create_spacing_item(spacing)
return self.insert_canvas_item(before_index, spacing_item)
def insert_stretch(self, before_index: int) -> AbstractCanvasItem:
stretch_item = self.layout.create_stretch_item()
return self.insert_canvas_item(before_index, stretch_item)
def add_canvas_item(self, canvas_item: AbstractCanvasItem, pos: typing.Optional[typing.Any] = None) -> AbstractCanvasItem:
""" Add canvas item to layout. pos parameter is layout specific. """
return self.insert_canvas_item(len(self.__canvas_items), canvas_item, pos)
def add_spacing(self, spacing: int) -> AbstractCanvasItem:
return self.insert_spacing(len(self.__canvas_items), spacing)
def add_stretch(self) -> AbstractCanvasItem:
return self.insert_stretch(len(self.__canvas_items))
def _remove_canvas_item_direct(self, canvas_item: AbstractCanvasItem) -> None:
self.__canvas_items.remove(canvas_item)
def _remove_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
canvas_item._removed(self)
canvas_item.close()
self.layout.remove_canvas_item(canvas_item)
canvas_item.container = None
self.__canvas_items.remove(canvas_item)
self.refresh_layout()
self.update()
def remove_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
""" Remove canvas item from layout. Canvas item is closed. """
self._remove_canvas_item(canvas_item)
def remove_all_canvas_items(self) -> None:
""" Remove all canvas items from layout. Canvas items are closed. """
for canvas_item in reversed(copy.copy(self.__canvas_items)):
self._remove_canvas_item(canvas_item)
def replace_canvas_item(self, old_canvas_item: AbstractCanvasItem, new_canvas_item: AbstractCanvasItem) -> None:
""" Replace the given canvas item with the new one. Canvas item is closed. """
index = self.__canvas_items.index(old_canvas_item)
self.remove_canvas_item(old_canvas_item)
self.insert_canvas_item(index, new_canvas_item)
def wrap_canvas_item(self, canvas_item: AbstractCanvasItem, canvas_item_container: CanvasItemComposition) -> None:
""" Replace the given canvas item with the container and move the canvas item into the container. """
canvas_origin = canvas_item.canvas_origin
canvas_size = canvas_item.canvas_size
index = self.__canvas_items.index(canvas_item)
# remove the existing canvas item, but without closing it.
self.layout.remove_canvas_item(canvas_item)
canvas_item.container = None
self._remove_canvas_item_direct(canvas_item)
# insert the canvas item container
# self.insert_canvas_item(index, canvas_item_container) # this would adjust splitters. don't do it.
self._insert_canvas_item_direct(index, canvas_item_container)
# insert the canvas item into the container
canvas_item_container.add_canvas_item(canvas_item)
# perform the layout using existing origin/size.
if canvas_origin is not None and canvas_size is not None:
canvas_item_container._set_canvas_origin(canvas_origin)
canvas_item_container._set_canvas_size(canvas_size)
canvas_item._set_canvas_origin(Geometry.IntPoint())
self.refresh_layout()
def unwrap_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
""" Replace the canvas item container with the canvas item. """
container = canvas_item.container
assert container
assert len(container.canvas_items) == 1
assert container.canvas_items[0] == canvas_item
enclosing_container = container.container
assert enclosing_container
index = enclosing_container.canvas_items.index(container)
# remove the existing canvas item from the container, but without closing it.
container.layout.remove_canvas_item(canvas_item)
canvas_item.container = None
container._remove_canvas_item_direct(canvas_item)
# remove container from enclosing container
enclosing_container._remove_canvas_item_direct(container)
# insert canvas item into the enclosing container
# enclosing_container.insert_canvas_item(index, canvas_item) # this would adjust splitters. don't do it.
enclosing_container._insert_canvas_item_direct(index, canvas_item)
# update the layout if origin and size already known
self.refresh_layout()
def _repaint_template(self, drawing_context: DrawingContext.DrawingContext, immediate: bool) -> None:
if not self.__layout_render_trait._try_repaint_template(drawing_context, immediate):
self._repaint_children(drawing_context, immediate=immediate)
self._repaint(drawing_context)
def _repaint_if_needed(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> None:
if self.__layout_render_trait:
if not self.__layout_render_trait._try_repaint_if_needed(drawing_context, immediate=immediate):
super()._repaint_if_needed(drawing_context, immediate=immediate)
def repaint_immediate(self, drawing_context: DrawingContext.DrawingContext, canvas_size: Geometry.IntSize) -> None:
if not self.__layout_render_trait._try_repaint_immediate(drawing_context, canvas_size):
super().repaint_immediate(drawing_context, canvas_size)
def _repaint_children(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> None:
"""Paint items from back to front."""
self._draw_background(drawing_context)
for canvas_item in self.visible_canvas_items:
if canvas_item._has_layout:
with drawing_context.saver():
canvas_item_rect = canvas_item.canvas_rect
if canvas_item_rect:
drawing_context.translate(canvas_item_rect.left, canvas_item_rect.top)
canvas_item._repaint_if_needed(drawing_context, immediate=immediate)
self._draw_border(drawing_context)
def _canvas_items_at_point(self, visible_canvas_items: typing.Sequence[AbstractCanvasItem], x: int, y: int) -> typing.List[AbstractCanvasItem]:
"""Returns list of canvas items under x, y, ordered from back to front."""
canvas_items: typing.List[AbstractCanvasItem] = []
point = Geometry.IntPoint(x=x, y=y)
for canvas_item in reversed(visible_canvas_items):
# the visible items can be changed while this method is running from the layout thread.
# and yet we don't want to allow this to occur; maybe the layout thread should have some
# sort of pending system, where once methods like this exit, they're allowed to update...?
canvas_item_rect = canvas_item.canvas_rect
if canvas_item_rect and canvas_item_rect.contains_point(point):
canvas_origin = typing.cast(Geometry.IntPoint, canvas_item.canvas_origin)
canvas_point = point - canvas_origin
canvas_items.extend(canvas_item.canvas_items_at_point(canvas_point.x, canvas_point.y))
canvas_items.extend(super().canvas_items_at_point(x, y))
return canvas_items
def canvas_items_at_point(self, x: int, y: int) -> typing.List[AbstractCanvasItem]:
"""Returns list of canvas items under x, y, ordered from back to front."""
return self._canvas_items_at_point(self.visible_canvas_items, x, y)
def get_root_opaque_canvas_items(self) -> typing.List[AbstractCanvasItem]:
if self.is_root_opaque:
return [self]
canvas_items = list()
for canvas_item in self.canvas_items:
canvas_items.extend(canvas_item.get_root_opaque_canvas_items())
return canvas_items
def pan_gesture(self, dx: int, dy: int) -> bool:
for canvas_item in reversed(self.visible_canvas_items):
if canvas_item.pan_gesture(dx, dy):
return True
return False
_threaded_rendering_enabled = True
class LayerLayoutRenderTrait(CompositionLayoutRenderTrait):
_layer_id = 0
_executor = concurrent.futures.ThreadPoolExecutor()
def __init__(self, canvas_item_composition: CanvasItemComposition):
super().__init__(canvas_item_composition)
LayerLayoutRenderTrait._layer_id += 1
self.__layer_id = LayerLayoutRenderTrait._layer_id
self.__layer_lock = threading.RLock()
self.__layer_drawing_context: typing.Optional[DrawingContext.DrawingContext] = None
self.__layer_seed = 0
self.__executing = False
self.__cancel = False
self.__needs_layout = False
self.__needs_repaint = False
self.__prepare_canvas_items: typing.List[AbstractCanvasItem] = list()
self._layer_thread_suppress = not _threaded_rendering_enabled # for testing
self.__layer_thread_condition = threading.Condition()
# Python 3.9+: Optional[concurrent.futures.Future[Any]]
self.__repaint_one_future: typing.Optional[typing.Any] = None
def close(self) -> None:
self._sync_repaint()
super().close()
def _stop_render_behavior(self) -> None:
self.__cancel = True
self._sync_repaint()
self.__layer_drawing_context = None
@property
def _needs_layout_for_testing(self) -> bool:
return self.__needs_layout
@property
def is_layer_container(self) -> bool:
return True
def register_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
assert canvas_item not in self.__prepare_canvas_items
self.__prepare_canvas_items.append(canvas_item)
def unregister_prepare_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
assert canvas_item in self.__prepare_canvas_items
self.__prepare_canvas_items.remove(canvas_item)
def _container_layout_changed(self) -> None:
# the section drawing code has no layout information; so it's possible for the sections to
# overlap, particularly during resizing, resulting in one layer drawing only to be overwritten
# by an older layer whose size hasn't been updated. this method is called quickly when the
# enclosing container changes layout and helps ensure that all layers in the container are drawn
# with the correct size.
if self.__layer_drawing_context:
self._canvas_item_composition._repaint_finished(self.__layer_drawing_context)
def _try_update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint], canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> bool:
# layout self, but not the children. layout for children goes to thread.
self._canvas_item_composition._update_self_layout(canvas_origin, canvas_size)
self.__trigger_layout()
return True
def _try_needs_layout(self, canvas_item: AbstractCanvasItem) -> bool:
self.__trigger_layout()
return True
def _sync_repaint(self) -> None:
done_event = threading.Event()
with self.__layer_thread_condition:
if self.__repaint_one_future:
# Python 3.9: Optional[concurrent.futures.Future[Any]]
def repaint_done(future: typing.Any) -> None:
done_event.set()
self.__repaint_one_future.add_done_callback(repaint_done)
else:
done_event.set()
done_event.wait()
# Python 3.9: Optional[concurrent.futures.Future[Any]]
def __repaint_done(self, future: typing.Any) -> None:
with self.__layer_thread_condition:
self.__repaint_one_future = None
if self.__needs_layout or self.__needs_repaint:
self.__queue_repaint()
def __queue_repaint(self) -> None:
with self.__layer_thread_condition:
if not self.__cancel and not self.__repaint_one_future:
self.__repaint_one_future = LayerLayoutRenderTrait._executor.submit(self.__repaint_layer)
self.__repaint_one_future.add_done_callback(self.__repaint_done)
def _try_updated(self) -> bool:
with self.__layer_thread_condition:
self.__needs_repaint = True
if not self._layer_thread_suppress:
self.__queue_repaint()
# normally, this method would mark a pending update and forward the update to the container;
# however with the layer, since drawing occurs on a thread, this must occur after the thread
# is finished. if the thread is suppressed (typically during testing), use the regular flow.
if self._layer_thread_suppress:
# pass through updates in the thread is suppressed, so that updates actually occur.
return False
return True
def _try_repaint_template(self, drawing_context: DrawingContext.DrawingContext, immediate: bool) -> bool:
if immediate:
canvas_size = self._canvas_item_composition.canvas_size
if canvas_size:
self._canvas_item_composition.repaint_immediate(drawing_context, canvas_size)
else:
with self.__layer_lock:
layer_drawing_context = self.__layer_drawing_context
layer_seed = self.__layer_seed
canvas_size = self._canvas_item_composition.canvas_size
if canvas_size:
drawing_context.begin_layer(self.__layer_id, layer_seed, 0, 0, *tuple(canvas_size))
if layer_drawing_context:
drawing_context.add(layer_drawing_context)
drawing_context.end_layer(self.__layer_id, layer_seed, 0, 0, *tuple(canvas_size))
return True
def _try_repaint_if_needed(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> bool:
# If the render behavior is a layer, it will have its own cached drawing context. Use it.
self._canvas_item_composition._repaint_template(drawing_context, immediate)
return True
def layout_immediate(self, canvas_size: Geometry.IntSize, force: bool = True) -> None:
# used for testing
orphan = len(self.__prepare_canvas_items) == 0
if orphan:
self._canvas_item_composition._inserted(None)
if force or self.__needs_layout:
self.__needs_layout = False
layer_thread_suppress, self._layer_thread_suppress = self._layer_thread_suppress, True
for canvas_item in copy.copy(self.__prepare_canvas_items):
canvas_item.prepare_render()
self._canvas_item_composition._prepare_render()
self._canvas_item_composition._update_self_layout(Geometry.IntPoint(), canvas_size, immediate=True)
self._canvas_item_composition._update_child_layouts(canvas_size, immediate=True)
self._layer_thread_suppress = layer_thread_suppress
if orphan:
self._canvas_item_composition._removed(None)
def _try_repaint_immediate(self, drawing_context: DrawingContext.DrawingContext, canvas_size: Geometry.IntSize) -> bool:
orphan = len(self.__prepare_canvas_items) == 0
if orphan:
self._canvas_item_composition._inserted(None)
layer_thread_suppress, self._layer_thread_suppress = self._layer_thread_suppress, True
self._layer_thread_suppress = True
for canvas_item in copy.copy(self.__prepare_canvas_items):
canvas_item.prepare_render()
self._canvas_item_composition._update_self_layout(Geometry.IntPoint(), canvas_size, immediate=True)
self._canvas_item_composition._update_child_layouts(canvas_size, immediate=True)
self._canvas_item_composition._repaint_children(drawing_context, immediate=True)
self._canvas_item_composition._repaint(drawing_context)
self._layer_thread_suppress = layer_thread_suppress
if orphan:
self._canvas_item_composition._removed(None)
return True
def __repaint_layer(self) -> None:
with self.__layer_thread_condition:
needs_layout = self.__needs_layout
needs_repaint = self.__needs_repaint
self.__needs_layout = False
self.__needs_repaint = False
if not self.__cancel and (needs_repaint or needs_layout):
if self._canvas_item_composition._has_layout:
try:
for canvas_item in copy.copy(self.__prepare_canvas_items):
canvas_item.prepare_render()
self._canvas_item_composition._prepare_render()
# layout or repaint that occurs during prepare render should be handled
# but not trigger another repaint after this one.
with self.__layer_thread_condition:
needs_layout = needs_layout or self.__needs_layout
self.__needs_layout = False
self.__needs_repaint = False
if needs_layout:
assert self._canvas_item_composition.canvas_size is not None
self._canvas_item_composition._update_child_layouts(
self._canvas_item_composition.canvas_size)
drawing_context = DrawingContext.DrawingContext()
self._canvas_item_composition._repaint_children(drawing_context)
self._canvas_item_composition._repaint(drawing_context)
with self.__layer_lock:
self.__layer_seed += 1
self.__layer_drawing_context = drawing_context
if not self.__cancel:
self._canvas_item_composition._repaint_finished(self.__layer_drawing_context)
except Exception as e:
import traceback
logging.debug("CanvasItem Render Error: %s", e)
traceback.print_exc()
traceback.print_stack()
def __trigger_layout(self) -> None:
with self.__layer_thread_condition:
self.__needs_layout = True
if not self._layer_thread_suppress:
self.__queue_repaint()
class LayerCanvasItem(CanvasItemComposition):
"""A composite canvas item that does layout and repainting in a thread."""
def __init__(self) -> None:
super().__init__(LayerLayoutRenderTrait(self))
def _container_layout_changed(self) -> None:
# override. a layer needs to redraw in the user interface.
self._redraw_container()
class ScrollAreaCanvasItem(AbstractCanvasItem):
"""
A scroll area canvas item with content.
The content property holds the content of the scroll area.
This scroll area controls the canvas_origin of the content, but not the
size. When the scroll area is resized, update_layout will be called on
the content, during which the content is free to adjust its canvas size.
When the call to update_layout returns, this scroll area will adjust
the canvas origin separately.
The content canvas_rect property describes the position that the content
is drawn within the scroll area. This means that content items must
already have a layout when they're added to this scroll area.
The content canvas_origin will typically be negative if the content
canvas_size is larger than the scroll area canvas size.
The content canvas_origin will typically be positive (or zero) if the
content canvas_size is smaller than the scroll area canvas size.
"""
def __init__(self, content: typing.Optional[AbstractCanvasItem] = None) -> None:
super().__init__()
self.__content: typing.Optional[AbstractCanvasItem] = None
if content:
self.content = content
self.auto_resize_contents = False
self._constrain_position = True
self.content_updated_event = Event.Event()
def close(self) -> None:
content = self.__content
self.__content = None
if content:
content.close()
super().close()
@property
def content(self) -> typing.Optional[AbstractCanvasItem]:
""" Return the content of the scroll area. """
return self.__content
@content.setter
def content(self, content: AbstractCanvasItem) -> None:
""" Set the content of the scroll area. """
# remove the old content
if self.__content:
self.__content.container = None
self.__content.on_layout_updated = None
# add the new content
self.__content = content
content.container = typing.cast(CanvasItemComposition, self) # argh
content.on_layout_updated = self.__content_layout_updated
self.update()
@property
def visible_rect(self) -> Geometry.IntRect:
content = self.__content
if content:
content_canvas_origin = content.canvas_origin
canvas_size = self.canvas_size
if content_canvas_origin and canvas_size:
return Geometry.IntRect(origin=-content_canvas_origin, size=canvas_size)
return Geometry.IntRect(origin=Geometry.IntPoint(), size=Geometry.IntSize())
def update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""Override from abstract canvas item.
After setting the canvas origin and canvas size, like the abstract canvas item,
update the layout of the content if it has no assigned layout yet. Whether it has
an assigned layout is determined by whether the canvas origin and canvas size are
None or not.
"""
self._set_canvas_origin(canvas_origin)
self._set_canvas_size(canvas_size)
content = self.__content
if content:
canvas_origin = content.canvas_origin
canvas_size = content.canvas_size
if canvas_origin is None or canvas_size is None:
# if content has no assigned layout, update its layout relative to this object.
# it will get a 0,0 origin but the same size as this scroll area.
content.update_layout(Geometry.IntPoint(), self.canvas_size, immediate=immediate)
elif self.auto_resize_contents:
# if content has no assigned layout, update its layout relative to this object.
# it will get a 0,0 origin but the same size as this scroll area.
content.update_layout(canvas_origin, self.canvas_size, immediate=immediate)
# validate the content origin. this is used for the scroll bar canvas item to ensure that the content is
# consistent with the scroll bar.
self.__content_layout_updated(canvas_origin, canvas_size, immediate=immediate)
# NOTE: super is never called for this implementation
# call on_layout_updated, just like the super implementation.
if callable(self.on_layout_updated):
self.on_layout_updated(self.canvas_origin, self.canvas_size, immediate)
self._has_layout = self.canvas_origin is not None and self.canvas_size is not None
def __content_layout_updated(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], immediate: bool = False) -> None:
# whenever the content layout changes, this method gets called.
# adjust the canvas_origin of the content if necessary. pass the canvas_origin, canvas_size of the content.
# this method is used in the scroll bar canvas item to ensure that the content stays within view and
# consistent with the scroll bar when the scroll area gets a new layout.
if self._constrain_position and canvas_origin is not None and canvas_size is not None and self.canvas_origin is not None and self.canvas_size is not None:
# when the scroll area content layout changes, this method will get called.
# ensure that the content matches the scroll position.
visible_size = self.canvas_size
content = self.__content
if content:
content_size = content.canvas_size
if content_size:
scroll_range_h = max(content_size.width - visible_size.width, 0)
scroll_range_v = max(content_size.height - visible_size.height, 0)
canvas_origin = Geometry.IntPoint(x=canvas_origin.x, y=max(min(canvas_origin.y, 0), -scroll_range_v))
canvas_origin = Geometry.IntPoint(x=max(min(canvas_origin.x, 0), -scroll_range_h), y=canvas_origin.y)
content._set_canvas_origin(canvas_origin)
self.content_updated_event.fire()
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
super()._repaint(drawing_context)
with drawing_context.saver():
canvas_origin = self.canvas_origin
canvas_size = self.canvas_size
if canvas_origin and canvas_size:
drawing_context.clip_rect(canvas_origin.x, canvas_origin.y, canvas_size.width, canvas_size.height)
content = self.__content
if content:
content_canvas_origin = content.canvas_origin
if content_canvas_origin:
drawing_context.translate(content_canvas_origin.x, content_canvas_origin.y)
visible_rect = Geometry.IntRect(origin=-content_canvas_origin, size=canvas_size)
content._repaint_visible(drawing_context, visible_rect)
def canvas_items_at_point(self, x: int, y: int) -> typing.List[AbstractCanvasItem]:
canvas_items: typing.List[AbstractCanvasItem] = []
point = Geometry.IntPoint(x=x, y=y)
content = self.__content
if content and content.canvas_rect and content.canvas_rect.contains_point(point):
content_canvas_origin = content.canvas_origin
if content_canvas_origin:
canvas_point = point - content_canvas_origin
canvas_items.extend(content.canvas_items_at_point(canvas_point.x, canvas_point.y))
canvas_items.extend(super().canvas_items_at_point(x, y))
return canvas_items
def wheel_changed(self, x: int, y: int, dx: int, dy: int, is_horizontal: bool) -> bool:
canvas_origin = self.canvas_origin
if canvas_origin:
x -= canvas_origin.x
y -= canvas_origin.y
content = self.__content
if content:
return content.wheel_changed(x, y, dx, dy, is_horizontal)
return False
def pan_gesture(self, dx: int, dy: int) -> bool:
content = self.__content
if content:
return content.pan_gesture(dx, dy)
return False
class SplitterCanvasItem(CanvasItemComposition):
def __init__(self, orientation: typing.Optional[str] = None) -> None:
super().__init__()
self.orientation = orientation if orientation else "vertical"
self.wants_mouse_events = True
self.__lock = threading.RLock()
self.__sizings: typing.List[Sizing] = []
self.__shadow_canvas_items: typing.List[AbstractCanvasItem] = []
self.__actual_sizings: typing.List[Sizing] = []
self.__tracking = False
self.on_splits_will_change: typing.Optional[typing.Callable[[], None]] = None
self.on_splits_changed: typing.Optional[typing.Callable[[], None]] = None
@classmethod
def __calculate_layout(self, orientation: str, canvas_size: Geometry.IntSize, sizings: typing.Sequence[Sizing]) -> ConstraintResultType:
if orientation == "horizontal":
content_origin = 0
content_size = canvas_size.height
constraints = [sizing.get_height_constraint(content_size) for sizing in sizings]
else:
content_origin = 0
content_size = canvas_size.width
constraints = [sizing.get_width_constraint(content_size) for sizing in sizings]
return constraint_solve(content_origin, content_size, constraints)
@property
def splits(self) -> typing.Sequence[float]:
""" Return the canvas item splits, which represent the relative size of each child. """
if self.canvas_size:
canvas_size = self.canvas_size
else:
canvas_size = Geometry.IntSize(w=640, h=480)
if self.orientation == "horizontal":
content_size = canvas_size.height
else:
content_size = canvas_size.width
with self.__lock:
sizings = copy.deepcopy(self.__sizings)
_, sizes = SplitterCanvasItem.__calculate_layout(self.orientation, canvas_size, sizings)
return [float(size) / content_size for size in sizes]
@splits.setter
def splits(self, splits: typing.Sequence[float]) -> None:
with self.__lock:
sizings = copy.deepcopy(self.__sizings)
assert len(splits) == len(sizings)
for split, sizing in zip(splits, sizings):
if self.orientation == "horizontal":
sizing._preferred_height = split
else:
sizing._preferred_width = split
with self.__lock:
self.__sizings = sizings
self.refresh_layout()
def _insert_canvas_item_direct(self, before_index: int, canvas_item: AbstractCanvasItem,
pos: typing.Optional[Geometry.IntPoint] = None) -> None:
super().insert_canvas_item(before_index, canvas_item)
def insert_canvas_item(self, before_index: int, canvas_item: AbstractCanvasItem,
sizing: typing.Optional[typing.Any] = None) -> AbstractCanvasItem:
sizing = copy.copy(sizing) if sizing else Sizing()
if self.orientation == "horizontal":
sizing._preferred_height = None
if sizing._minimum_height is None:
sizing._minimum_height = 0.1
else:
sizing._preferred_width = None
if sizing._minimum_width is None:
sizing._minimum_width = 0.1
with self.__lock:
self.__sizings.insert(before_index, sizing)
return super().insert_canvas_item(before_index, canvas_item)
def remove_canvas_item(self, canvas_item: AbstractCanvasItem) -> None:
with self.__lock:
del self.__sizings[self.canvas_items.index(canvas_item)]
super().remove_canvas_item(canvas_item)
def update_layout(self, canvas_origin: typing.Optional[Geometry.IntPoint],
canvas_size: typing.Optional[Geometry.IntSize], *, immediate: bool = False) -> None:
"""
wrap the updates in container layout changes to avoid a waterfall of
change messages. this is specific to splitter for now, but it's a general
behavior that should eventually wrap all update layout calls.
canvas items that cache their drawing bitmap (layers) need to know as quickly as possible
that their layout has changed to a new size to avoid partially updated situations where
their bitmaps overlap and a newer bitmap gets overwritten by a older overlapping bitmap,
resulting in drawing anomaly. request a repaint for each canvas item at its new size here.
this can also be tested by doing a 1x2 split; then 5x4 on the bottom; adding some images
to the bottom; resizing the 1x2 split; then undo/redo. it helps to run on a slower machine.
"""
self._begin_container_layout_changed()
try:
with self.__lock:
canvas_items = copy.copy(self.canvas_items)
sizings = copy.deepcopy(self.__sizings)
assert len(canvas_items) == len(sizings)
if canvas_size:
origins, sizes = SplitterCanvasItem.__calculate_layout(self.orientation, canvas_size, sizings)
if self.orientation == "horizontal":
for canvas_item, (origin, size) in zip(canvas_items, zip(origins, sizes)):
canvas_item_origin = Geometry.IntPoint(y=origin, x=0) # origin within the splitter
canvas_item_size = Geometry.IntSize(height=size, width=canvas_size.width)
canvas_item.update_layout(canvas_item_origin, canvas_item_size, immediate=immediate)
assert canvas_item._has_layout
for sizing, size in zip(sizings, sizes):
sizing._preferred_height = size
else:
for canvas_item, (origin, size) in zip(canvas_items, zip(origins, sizes)):
canvas_item_origin = Geometry.IntPoint(y=0, x=origin) # origin within the splitter
canvas_item_size = Geometry.IntSize(height=canvas_size.height, width=size)
canvas_item.update_layout(canvas_item_origin, canvas_item_size, immediate=immediate)
assert canvas_item._has_layout
for sizing, size in zip(sizings, sizes):
sizing._preferred_width = size
with self.__lock:
self.__actual_sizings = sizings
self.__shadow_canvas_items = canvas_items
# instead of calling the canvas item composition, call the one for abstract canvas item.
self._update_self_layout(canvas_origin, canvas_size, immediate=immediate)
self._has_layout = self.canvas_origin is not None and self.canvas_size is not None
# the next update is required because the children will trigger updates; but the updates
# might not go all the way up the chain if this splitter has no layout. by now, it will
# have a layout, so force an update.
self.update()
finally:
self._finish_container_layout_changed()
def canvas_items_at_point(self, x: int, y: int) -> typing.List[AbstractCanvasItem]:
assert self.canvas_origin is not None and self.canvas_size is not None
with self.__lock:
canvas_items = copy.copy(self.__shadow_canvas_items)
sizings = copy.deepcopy(self.__actual_sizings)
origins, _ = SplitterCanvasItem.__calculate_layout(self.orientation, self.canvas_size, sizings)
if self.orientation == "horizontal":
for origin in origins[1:]: # don't check the '0' origin
if abs(y - origin) < 6:
return [self]
else:
for origin in origins[1:]: # don't check the '0' origin
if abs(x - origin) < 6:
return [self]
return self._canvas_items_at_point(canvas_items, x, y)
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
super()._repaint(drawing_context)
assert self.canvas_origin is not None and self.canvas_size is not None
with self.__lock:
sizings = copy.deepcopy(self.__actual_sizings)
origins, _ = SplitterCanvasItem.__calculate_layout(self.orientation, self.canvas_size, sizings)
with drawing_context.saver():
drawing_context.begin_path()
for origin in origins[1:]: # don't paint the '0' origin
canvas_bounds = self.canvas_bounds
if canvas_bounds:
if self.orientation == "horizontal":
drawing_context.move_to(canvas_bounds.left, origin)
drawing_context.line_to(canvas_bounds.right, origin)
else:
drawing_context.move_to(origin, canvas_bounds.top)
drawing_context.line_to(origin, canvas_bounds.bottom)
drawing_context.line_width = 0.5
drawing_context.stroke_style = "#666"
drawing_context.stroke()
def __hit_test(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> str:
with self.__lock:
sizings = copy.deepcopy(self.__actual_sizings)
canvas_size = self.canvas_size
if canvas_size:
origins, _ = SplitterCanvasItem.__calculate_layout(self.orientation, canvas_size, sizings)
if self.orientation == "horizontal":
for index, origin in enumerate(origins[1:]): # don't check the '0' origin
if abs(y - origin) < 6:
return "horizontal"
else:
for index, origin in enumerate(origins[1:]): # don't check the '0' origin
if abs(x - origin) < 6:
return "vertical"
return "horizontal"
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
assert self.canvas_origin is not None and self.canvas_size is not None
with self.__lock:
sizings = copy.deepcopy(self.__actual_sizings)
origins, _ = SplitterCanvasItem.__calculate_layout(self.orientation, self.canvas_size, sizings)
if self.orientation == "horizontal":
for index, origin in enumerate(origins[1:]): # don't check the '0' origin
if abs(y - origin) < 6:
self.__tracking = True
self.__tracking_start_pos = Geometry.IntPoint(y=y, x=x)
self.__tracking_start_adjust = y - origin
self.__tracking_start_index = index
self.__tracking_start_preferred = int(sizings[index].preferred_height or 0)
self.__tracking_start_preferred_next = int(sizings[index + 1].preferred_height or 0)
if callable(self.on_splits_will_change):
self.on_splits_will_change()
return True
else:
for index, origin in enumerate(origins[1:]): # don't check the '0' origin
if abs(x - origin) < 6:
self.__tracking = True
self.__tracking_start_pos = Geometry.IntPoint(y=y, x=x)
self.__tracking_start_adjust = x - origin
self.__tracking_start_index = index
self.__tracking_start_preferred = int(sizings[index].preferred_width or 0)
self.__tracking_start_preferred_next = int(sizings[index + 1].preferred_width or 0)
if callable(self.on_splits_will_change):
self.on_splits_will_change()
return True
return super().mouse_pressed(x, y, modifiers)
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__tracking = False
if callable(self.on_splits_changed):
self.on_splits_changed()
return True
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__tracking:
with self.__lock:
old_sizings = copy.deepcopy(self.__sizings)
temp_sizings = copy.deepcopy(self.__actual_sizings)
tracking_start_preferred_next = self.__tracking_start_preferred_next or 0
tracking_start_preferred = self.__tracking_start_preferred or 0
snaps: typing.List[int] = list()
canvas_bounds = self.canvas_bounds
if canvas_bounds:
if self.orientation == "horizontal":
offset = y - self.__tracking_start_pos.y
if not modifiers.shift:
snaps.append((tracking_start_preferred_next - tracking_start_preferred) // 2)
snaps.append(canvas_bounds.height // 3 - self.__tracking_start_pos.y - self.__tracking_start_adjust)
snaps.append(2 * canvas_bounds.height // 3 - self.__tracking_start_pos.y - self.__tracking_start_adjust)
for snap in snaps:
if abs(offset - snap) < 12:
offset = snap
break
temp_sizings[self.__tracking_start_index]._preferred_height = tracking_start_preferred + offset
temp_sizings[self.__tracking_start_index + 1]._preferred_height = tracking_start_preferred_next - offset
else:
offset = x - self.__tracking_start_pos.x
if not modifiers.shift:
snaps.append((tracking_start_preferred_next - tracking_start_preferred) // 2)
snaps.append(canvas_bounds.width // 3 - self.__tracking_start_pos.x - self.__tracking_start_adjust)
snaps.append(2 * canvas_bounds.width // 3 - self.__tracking_start_pos.x - self.__tracking_start_adjust)
for snap in snaps:
if abs(offset - snap) < 12:
offset = snap
break
temp_sizings[self.__tracking_start_index]._preferred_width = tracking_start_preferred + offset
temp_sizings[self.__tracking_start_index + 1]._preferred_width = tracking_start_preferred_next - offset
# fix the size of all children except for the two in question
for index, sizing in enumerate(temp_sizings):
if index != self.__tracking_start_index and index != self.__tracking_start_index + 1:
if self.orientation == "horizontal":
sizing._set_fixed_height(sizing.preferred_height)
else:
sizing._set_fixed_width(sizing.preferred_width)
# update the layout
with self.__lock:
self.__sizings = temp_sizings
self.refresh_layout()
self.update_layout(self.canvas_origin, self.canvas_size)
# restore the freedom of the others
new_sizings = list()
for index, (old_sizing, temp_sizing) in enumerate(zip(old_sizings, temp_sizings)):
sizing = Sizing()
sizing._copy_from(old_sizing)
if index == self.__tracking_start_index or index == self.__tracking_start_index + 1:
if self.orientation == "horizontal":
sizing._preferred_height = temp_sizing.preferred_height
else:
sizing._preferred_width = temp_sizing.preferred_width
new_sizings.append(sizing)
with self.__lock:
self.__sizings = new_sizings
# update once more with restored sizings. addresses issue nionswift/605
self.refresh_layout()
return True
else:
control = self.__hit_test(x, y, modifiers)
if control == "horizontal":
self.cursor_shape = "split_vertical"
elif control == "vertical":
self.cursor_shape = "split_horizontal"
else:
self.cursor_shape = None
return super().mouse_position_changed(x, y, modifiers)
class SliderCanvasItem(AbstractCanvasItem, Observable.Observable):
"""Slider."""
thumb_width = 8
thumb_height = 16
bar_offset = 1
bar_height = 4
def __init__(self) -> None:
super().__init__()
self.wants_mouse_events = True
self.__tracking = False
self.__tracking_start = Geometry.IntPoint()
self.__tracking_value = 0.0
self.update_sizing(self.sizing.with_fixed_height(20))
self.value_stream = Stream.ValueStream[float]().add_ref()
self.value_change_stream = Stream.ValueChangeStream(self.value_stream).add_ref()
def close(self) -> None:
self.value_change_stream.remove_ref()
self.value_change_stream = typing.cast(typing.Any, None)
self.value_stream.remove_ref()
self.value_stream = typing.cast(typing.Any, None)
super().close()
@property
def value(self) -> float:
return self.value_stream.value or 0.0
@value.setter
def value(self, value: float) -> None:
if self.value != value:
self.value_stream.value = max(0.0, min(1.0, value))
self.update()
self.notify_property_changed("value")
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
thumb_rect = self.__get_thumb_rect()
bar_rect = self.__get_bar_rect()
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(bar_rect.left, bar_rect.top, bar_rect.width, bar_rect.height)
drawing_context.fill_style = "#CCC"
drawing_context.fill()
drawing_context.stroke_style = "#888"
drawing_context.stroke()
drawing_context.begin_path()
drawing_context.rect(thumb_rect.left, thumb_rect.top, thumb_rect.width, thumb_rect.height)
drawing_context.fill_style = "#007AD8"
drawing_context.fill()
def __get_bar_rect(self) -> Geometry.FloatRect:
canvas_size = self.canvas_size
if canvas_size:
thumb_width = self.thumb_width
bar_offset = self.bar_offset
bar_width = canvas_size.width - thumb_width - bar_offset * 2
bar_height = self.bar_height
return Geometry.FloatRect.from_tlhw(canvas_size.height / 2 - bar_height / 2, bar_offset + thumb_width / 2, bar_height, bar_width)
return Geometry.FloatRect.empty_rect()
def __get_thumb_rect(self) -> Geometry.IntRect:
canvas_size = self.canvas_size
if canvas_size:
thumb_width = self.thumb_width
thumb_height = self.thumb_height
bar_offset = self.bar_offset
bar_width = canvas_size.width - thumb_width - bar_offset * 2
# use tracking value to avoid thumb jumping around while dragging, which occurs when value gets integerized and set.
value = self.value if not self.__tracking else self.__tracking_value
return Geometry.FloatRect.from_tlhw(canvas_size.height / 2 - thumb_height / 2, value * bar_width + bar_offset, thumb_height, thumb_width).to_int_rect()
return Geometry.IntRect.empty_rect()
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
thumb_rect = self.__get_thumb_rect()
pos = Geometry.IntPoint(x=x, y=y)
if thumb_rect.inset(-2, -2).contains_point(pos):
self.__tracking = True
self.__tracking_start = pos
self.__tracking_value = self.value
self.value_change_stream.begin()
self.update()
return True
elif x < thumb_rect.left:
self.__adjust_thumb(-1)
return True
elif x > thumb_rect.right:
self.__adjust_thumb(1)
return True
return super().mouse_pressed(x, y, modifiers)
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__tracking:
self.__tracking = False
self.value_change_stream.end()
self.update()
return True
return super().mouse_released(x, y, modifiers)
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__tracking:
pos = Geometry.FloatPoint(x=x, y=y)
bar_rect = self.__get_bar_rect()
value = (pos.x - bar_rect.left) / bar_rect.width
self.__tracking_value = max(0.0, min(1.0, value))
self.value = value
return super().mouse_position_changed(x, y, modifiers)
def __adjust_thumb(self, amount: float) -> None:
self.value_change_stream.begin()
self.value = max(0.0, min(1.0, self.value + amount * 0.1))
self.value_change_stream.end()
PositionLength = collections.namedtuple("PositionLength", ["position", "length"])
class ScrollBarCanvasItem(AbstractCanvasItem):
""" A scroll bar for a scroll area. """
def __init__(self, scroll_area_canvas_item: ScrollAreaCanvasItem, orientation: typing.Optional[Orientation] = None) -> None:
super().__init__()
orientation = orientation if orientation is not None else Orientation.Vertical
self.wants_mouse_events = True
self.__scroll_area_canvas_item = scroll_area_canvas_item
self.__scroll_area_canvas_item_content_updated_listener = self.__scroll_area_canvas_item.content_updated_event.listen(self.update)
self.__tracking = False
self.__orientation = orientation
if self.__orientation == Orientation.Vertical:
self.update_sizing(self.sizing.with_fixed_width(16))
else:
self.update_sizing(self.sizing.with_fixed_height(16))
def close(self) -> None:
self.__scroll_area_canvas_item_content_updated_listener.close()
self.__scroll_area_canvas_item_content_updated_listener = typing.cast(typing.Any, None)
super().close()
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
# canvas size, thumb rect
canvas_size = self.canvas_size
thumb_rect = self.thumb_rect
if canvas_size:
# draw it
with drawing_context.saver():
# draw the border of the scroll bar
drawing_context.begin_path()
drawing_context.rect(0, 0, canvas_size.width, canvas_size.height)
if self.__orientation == Orientation.Vertical:
gradient = drawing_context.create_linear_gradient(canvas_size.width, canvas_size.height, 0, 0, canvas_size.width, 0)
else:
gradient = drawing_context.create_linear_gradient(canvas_size.width, canvas_size.height, 0, 0, 0, canvas_size.height)
gradient.add_color_stop(0.0, "#F2F2F2")
gradient.add_color_stop(0.35, "#FDFDFD")
gradient.add_color_stop(0.65, "#FDFDFD")
gradient.add_color_stop(1.0, "#F2F2F2")
drawing_context.fill_style = gradient
drawing_context.fill()
# draw the thumb, if any
if thumb_rect.height > 0 and thumb_rect.width > 0:
with drawing_context.saver():
drawing_context.begin_path()
if self.__orientation == Orientation.Vertical:
drawing_context.move_to(thumb_rect.width - 8, thumb_rect.top + 6)
drawing_context.line_to(thumb_rect.width - 8, thumb_rect.bottom - 6)
else:
drawing_context.move_to(thumb_rect.left + 6, thumb_rect.height - 8)
drawing_context.line_to(thumb_rect.right - 6, thumb_rect.height - 8)
drawing_context.line_width = 8.0
drawing_context.line_cap = "round"
drawing_context.stroke_style = "#888" if self.__tracking else "#CCC"
drawing_context.stroke()
# draw inside edge
drawing_context.begin_path()
drawing_context.move_to(0, 0)
if self.__orientation == Orientation.Vertical:
drawing_context.line_to(0, canvas_size.height)
else:
drawing_context.line_to(canvas_size.width, 0)
drawing_context.line_width = 0.5
drawing_context.stroke_style = "#E3E3E3"
drawing_context.stroke()
# draw outside
drawing_context.begin_path()
if self.__orientation == Orientation.Vertical:
drawing_context.move_to(canvas_size.width, 0)
else:
drawing_context.move_to(0, canvas_size.height)
drawing_context.line_to(canvas_size.width, canvas_size.height)
drawing_context.line_width = 0.5
drawing_context.stroke_style = "#999999"
drawing_context.stroke()
def get_thumb_position_and_length(self, canvas_length: int, visible_length: int, content_length: int, content_offset: int) -> PositionLength:
"""
Return the thumb position and length as a tuple of ints.
The canvas_length is the size of the canvas of the scroll bar.
The visible_length is the size of the visible area of the scroll area.
The content_length is the size of the content of the scroll area.
The content_offset is the position of the content within the scroll area. It
will always be negative or zero.
"""
# the scroll_range defines the maximum negative value of the content_offset.
scroll_range = max(content_length - visible_length, 0)
# content_offset should be negative, but not more negative than the scroll_range.
content_offset = max(-scroll_range, min(0, content_offset))
# assert content_offset <= 0 and content_offset >= -scroll_range
# the length of the thumb is the visible_length multiplied by the ratio of
# visible_length to the content_length. however, a minimum height is enforced
# so that the user can always grab it. if the thumb is invisible (the content_length
# is less than or equal to the visible_length) then the thumb will have a length of zero.
if content_length > visible_length:
thumb_length = int(canvas_length * (float(visible_length) / content_length))
thumb_length = max(thumb_length, 32)
# the position of the thumb is the content_offset over the content_length multiplied by
# the free range of the thumb which is the canvas_length minus the thumb_length.
thumb_position = int((canvas_length - thumb_length) * (float(-content_offset) / scroll_range))
else:
thumb_length = 0
thumb_position = 0
return PositionLength(thumb_position, thumb_length)
@property
def thumb_rect(self) -> Geometry.IntRect:
# return the thumb rect for the given canvas_size
canvas_size = self.canvas_size
if canvas_size:
index = 0 if self.__orientation == Orientation.Vertical else 1
scroll_area_canvas_size = self.__scroll_area_canvas_item.canvas_size
scroll_area_content = self.__scroll_area_canvas_item.content
if scroll_area_content and scroll_area_canvas_size:
visible_length = scroll_area_canvas_size[index]
scroll_area_rect = scroll_area_content.canvas_rect
if scroll_area_rect:
content_length = scroll_area_rect.size[index]
content_offset = scroll_area_rect.origin[index]
thumb_position, thumb_length = self.get_thumb_position_and_length(canvas_size[index], visible_length, content_length, content_offset)
if self.__orientation == Orientation.Vertical:
thumb_origin = Geometry.IntPoint(x=0, y=thumb_position)
thumb_size = Geometry.IntSize(width=canvas_size.width, height=thumb_length)
else:
thumb_origin = Geometry.IntPoint(x=thumb_position, y=0)
thumb_size = Geometry.IntSize(width=thumb_length, height=canvas_size.height)
return Geometry.IntRect(origin=thumb_origin, size=thumb_size)
return Geometry.IntRect.empty_rect()
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
thumb_rect = self.thumb_rect
pos = Geometry.IntPoint(x=x, y=y)
if thumb_rect.contains_point(pos):
self.__tracking = True
self.__tracking_start = pos
scroll_area_content = self.__scroll_area_canvas_item.content
self.__tracking_content_offset = scroll_area_content.canvas_origin if scroll_area_content else Geometry.IntPoint()
self.update()
return True
elif self.__orientation == Orientation.Vertical and y < thumb_rect.top:
self.__adjust_thumb(-1)
return True
elif self.__orientation == Orientation.Vertical and y > thumb_rect.bottom:
self.__adjust_thumb(1)
return True
elif self.__orientation != Orientation.Vertical and x < thumb_rect.left:
self.__adjust_thumb(-1)
return True
elif self.__orientation != Orientation.Vertical and x > thumb_rect.right:
self.__adjust_thumb(1)
return True
return super().mouse_pressed(x, y, modifiers)
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__tracking = False
self.update()
return super().mouse_released(x, y, modifiers)
def __adjust_thumb(self, amount: float) -> None:
# adjust the position up or down one visible screen worth
index = 0 if self.__orientation == Orientation.Vertical else 1
scroll_area_rect = self.__scroll_area_canvas_item.canvas_rect
if scroll_area_rect:
visible_length = scroll_area_rect.size[index]
content = self.__scroll_area_canvas_item.content
if content:
content_canvas_origin = content.canvas_origin
if content_canvas_origin:
if self.__orientation == Orientation.Vertical:
new_content_offset = Geometry.IntPoint(y=round(content_canvas_origin[0] - visible_length * amount), x=content_canvas_origin[1])
else:
new_content_offset = Geometry.IntPoint(y=content_canvas_origin[0], x=round(content_canvas_origin[1] - visible_length * amount))
content.update_layout(new_content_offset, content.canvas_size)
content.update()
def adjust_content_offset(self, canvas_length: int, visible_length: int, content_length: int, content_offset: int, mouse_offset: int) -> int:
"""
Return the adjusted content offset.
The canvas_length is the size of the canvas of the scroll bar.
The visible_length is the size of the visible area of the scroll area.
The content_length is the size of the content of the scroll area.
The content_offset is the position of the content within the scroll area. It
will always be negative or zero.
The mouse_offset is the offset of the mouse.
"""
scroll_range = max(content_length - visible_length, 0)
_, thumb_length = self.get_thumb_position_and_length(canvas_length, visible_length, content_length, content_offset)
offset_rel = int(scroll_range * float(mouse_offset) / (canvas_length - thumb_length))
return max(min(content_offset - offset_rel, 0), -scroll_range)
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__tracking:
pos = Geometry.IntPoint(x=x, y=y)
canvas_size = self.canvas_size
scroll_area_canvas_size = self.__scroll_area_canvas_item.canvas_size
if canvas_size and scroll_area_canvas_size:
scroll_area_content = self.__scroll_area_canvas_item.content
if scroll_area_content:
tracking_content_offset = self.__tracking_content_offset
scroll_area_content_canvas_size = scroll_area_content.canvas_size
if tracking_content_offset and scroll_area_content_canvas_size:
if self.__orientation == Orientation.Vertical:
mouse_offset_v = pos.y - self.__tracking_start.y
visible_height = scroll_area_canvas_size[0]
content_height = scroll_area_content_canvas_size[0]
new_content_offset_v = self.adjust_content_offset(canvas_size[0], visible_height, content_height, tracking_content_offset[0], mouse_offset_v)
new_content_offset = Geometry.IntPoint(x=tracking_content_offset[1], y=new_content_offset_v)
else:
mouse_offset_h = pos.x - self.__tracking_start.x
visible_width = scroll_area_canvas_size[1]
content_width = scroll_area_content_canvas_size[1]
new_content_offset_h = self.adjust_content_offset(canvas_size[1], visible_width, content_width, tracking_content_offset[1], mouse_offset_h)
new_content_offset = Geometry.IntPoint(x=new_content_offset_h, y=tracking_content_offset[0])
scroll_area_content._set_canvas_origin(new_content_offset)
scroll_area_content.update()
self.update()
return super().mouse_position_changed(x, y, modifiers)
class RootLayoutRenderTrait(CompositionLayoutRenderTrait):
next_section_id = 0
def __init__(self, canvas_item_composition: CanvasItemComposition) -> None:
super().__init__(canvas_item_composition)
self.__needs_repaint = False
self.__section_ids_lock = threading.RLock()
self.__section_map: typing.Dict[AbstractCanvasItem, int] = dict()
def close(self) -> None:
with self.__section_ids_lock:
section_map = self.__section_map
self.__section_map = dict()
for section_id in section_map.values():
canvas_widget = self._canvas_item_composition.canvas_widget
if canvas_widget:
canvas_widget.remove_section(section_id)
super().close()
@property
def is_layer_container(self) -> bool:
return True
def _try_needs_layout(self, canvas_item: AbstractCanvasItem) -> bool:
if self._canvas_item_composition.canvas_size:
# if this is a normal canvas item, tell it's container to layout again.
# otherwise, if this is the root, just layout the root.
container = canvas_item.container if canvas_item != self._canvas_item_composition else canvas_item
if container and container.canvas_size:
container.update_layout(container.canvas_origin, container.canvas_size)
if container == self._canvas_item_composition:
# when the root is resized, be sure to update all of the opaque items since layout
# doesn't do it automatically right now.
for canvas_item in self._canvas_item_composition.get_root_opaque_canvas_items():
canvas_item.update()
return True
def _try_update_with_items(self, canvas_items: typing.Optional[typing.Sequence[AbstractCanvasItem]] = None) -> bool:
drawing_context = DrawingContext.DrawingContext()
if self._canvas_item_composition._has_layout and self._canvas_item_composition.canvas_widget and canvas_items:
for canvas_item in canvas_items:
if canvas_item.is_root_opaque:
self._canvas_item_composition._update_count += 1
canvas_size = canvas_item.canvas_size
if canvas_size:
canvas_rect = Geometry.IntRect(canvas_item.map_to_root_container(Geometry.IntPoint(0, 0)), canvas_size)
canvas_item._repaint_template(drawing_context, immediate=False)
drawing_context.translate(-canvas_rect.left, -canvas_rect.top)
with self.__section_ids_lock:
section_id = self.__section_map.get(canvas_item, None)
if not section_id:
RootLayoutRenderTrait.next_section_id += 1
section_id = RootLayoutRenderTrait.next_section_id
self.__section_map[canvas_item] = section_id
self._canvas_item_composition.canvas_widget.draw_section(section_id, drawing_context, canvas_rect)
# break
self.__cull_unused_sections()
return True
def __cull_unused_sections(self) -> None:
canvas_items = self._canvas_item_composition.get_root_opaque_canvas_items()
with self.__section_ids_lock:
section_map = self.__section_map
self.__section_map = dict()
for canvas_item in canvas_items:
section_id = section_map.pop(canvas_item, None)
if section_id:
self.__section_map[canvas_item] = section_id
for section_id in section_map.values():
canvas_widget = self._canvas_item_composition.canvas_widget
if canvas_widget:
canvas_widget.remove_section(section_id)
RootLayoutRender = "root"
DefaultLayoutRender: typing.Optional[str] = None
class RootCanvasItem(CanvasItemComposition):
"""A root layer to interface to the widget world.
The root canvas item acts as a bridge between the higher level ui widget and a canvas hierarchy. It connects size
notifications, mouse activity, keyboard activity, focus activity, and drag and drop actions to the canvas item.
The root canvas item provides a canvas_widget property which is the canvas widget associated with this root item.
The root canvas may be focusable or not. There are two focus states that this root canvas item handles: the widget
focus and the canvas item focus. The widget focus comes from the enclosing widget. If this root canvas item has a
widget focus, then it can also have a canvas item focus to specify which specific canvas item is the focus in this
root canvas item's hierarchy.
"""
def __init__(self, canvas_widget: UserInterface.CanvasWidget, *, layout_render: typing.Optional[str] = DefaultLayoutRender) -> None:
super().__init__(RootLayoutRenderTrait(self) if layout_render == RootLayoutRender and _threaded_rendering_enabled else LayerLayoutRenderTrait(self))
self.__canvas_widget = canvas_widget
self.__canvas_widget.on_size_changed = self.size_changed
self.__canvas_widget.on_mouse_clicked = self.__mouse_clicked
self.__canvas_widget.on_mouse_double_clicked = self.__mouse_double_clicked
self.__canvas_widget.on_mouse_entered = self.__mouse_entered
self.__canvas_widget.on_mouse_exited = self.__mouse_exited
self.__canvas_widget.on_mouse_pressed = self.__mouse_pressed
self.__canvas_widget.on_mouse_released = self.__mouse_released
self.__canvas_widget.on_mouse_position_changed = self.__mouse_position_changed
self.__canvas_widget.on_grabbed_mouse_position_changed = self.__grabbed_mouse_position_changed
self.__canvas_widget.on_wheel_changed = self.wheel_changed
self.__canvas_widget.on_context_menu_event = self.__context_menu_event
self.__canvas_widget.on_key_pressed = self.__key_pressed
self.__canvas_widget.on_key_released = self.__key_released
self.__canvas_widget.on_focus_changed = self.__focus_changed
self.__canvas_widget.on_drag_enter = self.__drag_enter
self.__canvas_widget.on_drag_leave = self.__drag_leave
self.__canvas_widget.on_drag_move = self.__drag_move
self.__canvas_widget.on_drop = self.__drop
self.__canvas_widget.on_tool_tip = self.handle_tool_tip
self.__canvas_widget.on_pan_gesture = self.pan_gesture
self.__canvas_widget.on_dispatch_any = self.__dispatch_any
self.__canvas_widget.on_can_dispatch_any = self.__can_dispatch_any
self.__canvas_widget.on_get_menu_item_state = self.__get_menu_item_state
setattr(self.__canvas_widget, "_root_canvas_item", weakref.ref(self)) # for debugging
self.__drawing_context_updated = False
self.__interaction_count = 0
self.__focused_item: typing.Optional[AbstractCanvasItem] = None
self.__last_focused_item: typing.Optional[AbstractCanvasItem] = None
self.__mouse_canvas_item: typing.Optional[AbstractCanvasItem] = None # not None when the mouse is pressed
self.__mouse_tracking = False
self.__mouse_tracking_canvas_item: typing.Optional[AbstractCanvasItem] = None
self.__drag_tracking = False
self.__drag_tracking_canvas_item: typing.Optional[AbstractCanvasItem] = None
self.__grab_canvas_item: typing.Optional[MouseTrackingCanvasItem.TrackingCanvasItem] = None
self._set_canvas_origin(Geometry.IntPoint())
def close(self) -> None:
# shut down the repaint thread first
self._stop_render_behavior() # call first so that it doesn't use canvas widget
self.__mouse_tracking_canvas_item = None
self.__drag_tracking_canvas_item = None
self.__grab_canvas_item = None
self.__focused_item = None
self.__last_focused_item = None
self.__canvas_widget.on_size_changed = None
self.__canvas_widget.on_mouse_clicked = None
self.__canvas_widget.on_mouse_double_clicked = None
self.__canvas_widget.on_mouse_entered = None
self.__canvas_widget.on_mouse_exited = None
self.__canvas_widget.on_mouse_pressed = None
self.__canvas_widget.on_mouse_released = None
self.__canvas_widget.on_mouse_position_changed = None
self.__canvas_widget.on_grabbed_mouse_position_changed = None
self.__canvas_widget.on_wheel_changed = None
self.__canvas_widget.on_context_menu_event = None
self.__canvas_widget.on_key_pressed = None
self.__canvas_widget.on_key_released = None
self.__canvas_widget.on_focus_changed = None
self.__canvas_widget.on_drag_enter = None
self.__canvas_widget.on_drag_leave = None
self.__canvas_widget.on_drag_move = None
self.__canvas_widget.on_drop = None
self.__canvas_widget.on_tool_tip = None
self.__canvas_widget.on_pan_gesture = None
super().close()
# culling will require the canvas widget; clear it here (after close) so that it is availahle.
self.__canvas_widget = typing.cast(typing.Any, None)
def _repaint_finished(self, drawing_context: DrawingContext.DrawingContext) -> None:
self.__canvas_widget.draw(drawing_context)
def refresh_layout(self) -> None:
self._needs_layout(self)
@property
def root_container(self) -> typing.Optional[RootCanvasItem]:
return self
@property
def canvas_widget(self) -> UserInterface.CanvasWidget:
""" Return the canvas widget. """
return self.__canvas_widget
def map_to_global(self, p: Geometry.IntPoint) -> Geometry.IntPoint:
return self.__canvas_widget.map_to_global(p)
@property
def is_ui_interaction_active(self) -> bool:
return self.__interaction_count > 0
def _adjust_ui_interaction(self, value: int) -> None:
self.__interaction_count += value
class UIInteractionContext:
def __init__(self, root_canvas_item: RootCanvasItem) -> None:
self.__root_canvas_item = root_canvas_item
def close(self) -> None:
self.__root_canvas_item._adjust_ui_interaction(-1)
def __enter__(self) -> RootCanvasItem.UIInteractionContext:
self.__root_canvas_item._adjust_ui_interaction(1)
return self
def __exit__(self, exception_type: typing.Optional[typing.Type[BaseException]],
value: typing.Optional[BaseException], traceback: typing.Optional[types.TracebackType]) -> typing.Optional[bool]:
self.close()
return None
def _ui_interaction(self) -> contextlib.AbstractContextManager[RootCanvasItem.UIInteractionContext]:
return RootCanvasItem.UIInteractionContext(self)
@property
def focusable(self) -> bool:
""" Return whether the canvas widget is focusable. """
return self.canvas_widget.focusable
@focusable.setter
def focusable(self, focusable: bool) -> None:
""" Set whether the canvas widget is focusable. """
self.canvas_widget.focusable = focusable
def size_changed(self, width: int, height: int) -> None:
""" Called when size changes. """
# logging.debug("{} {} x {}".format(id(self), width, height))
if width > 0 and height > 0:
self._set_canvas_origin(Geometry.IntPoint())
self._set_canvas_size(Geometry.IntSize(height=height, width=width))
self._has_layout = self.canvas_origin is not None and self.canvas_size is not None
self.refresh_layout()
@property
def focused_item(self) -> typing.Optional[AbstractCanvasItem]:
"""
Return the canvas focused item. May return None.
The focused item is either this item itself or one of its
children.
"""
return self.__focused_item
def _set_focused_item(self, focused_item: typing.Optional[AbstractCanvasItem], p: typing.Optional[Geometry.IntPoint] = None, modifiers: typing.Optional[UserInterface.KeyboardModifiers] = None) -> None:
""" Set the canvas focused item. This will also update the focused property of both old item (if any) and new item (if any). """
if not modifiers or not modifiers.any_modifier:
if focused_item != self.__focused_item:
if self.__focused_item:
self.__focused_item._set_focused(False)
self.__focused_item = focused_item
if self.__focused_item:
self.__focused_item._set_focused(True)
if self.__focused_item:
self.__last_focused_item = self.__focused_item
elif focused_item:
focused_item.adjust_secondary_focus(p or Geometry.IntPoint(), modifiers)
def __focus_changed(self, focused: bool) -> None:
""" Called when widget focus changes. """
if focused and not self.focused_item:
self._set_focused_item(self.__last_focused_item)
elif not focused and self.focused_item:
self._set_focused_item(None)
def _request_root_focus(self, focused_item: typing.Optional[AbstractCanvasItem], p: typing.Optional[Geometry.IntPoint], modifiers: typing.Optional[UserInterface.KeyboardModifiers]) -> None:
"""Requests that the root widget gets focus.
This focused is different from the focus within the canvas system. This is
the external focus in the widget system.
If the canvas widget is already focused, this simply sets the focused item
to be the requested one. Otherwise, the widget has to request focus. When
it receives focus, a __focus_changed from the widget which will restore the
last focused item to be the new focused canvas item.
"""
if self.__canvas_widget.focused:
self._set_focused_item(focused_item, p, modifiers)
else:
self._set_focused_item(None, p, modifiers)
self.__last_focused_item = focused_item
self.__canvas_widget.focused = True # this will trigger focus changed to set the focus
def wheel_changed(self, x: int, y: int, dx: int, dy: int, is_horizontal: bool) -> bool:
# always give the mouse canvas item priority (for tracking outside bounds)
canvas_items = self.canvas_items_at_point(x, y)
for canvas_item in reversed(canvas_items):
if canvas_item != self:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), canvas_item)
if canvas_item.wheel_changed(canvas_item_point.x, canvas_item_point.y, dx, dy, is_horizontal):
return True
return False
def handle_tool_tip(self, x: int, y: int, gx: int, gy: int) -> bool:
canvas_items = self.canvas_items_at_point(x, y)
for canvas_item in reversed(canvas_items):
if canvas_item != self:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), canvas_item)
if canvas_item.handle_tool_tip(canvas_item_point.x, canvas_item_point.y, gx, gy):
return True
return False
def __dispatch_any(self, method: str, *args: typing.Any, **kwargs: typing.Any) -> bool:
focused_item = self.focused_item
if focused_item:
return focused_item._dispatch_any(method, *args, **kwargs)
return False
def __can_dispatch_any(self, method: str) -> bool:
focused_item = self.focused_item
if focused_item:
return focused_item._can_dispatch_any(method)
return False
def __get_menu_item_state(self, command_id: str) -> typing.Optional[UserInterface.MenuItemState]:
focused_item = self.focused_item
if focused_item:
menu_item_state = focused_item._get_menu_item_state(command_id)
if menu_item_state:
return menu_item_state
return None
def _cursor_shape_changed(self, item: AbstractCanvasItem) -> None:
if item == self.__mouse_tracking_canvas_item and self.__mouse_tracking_canvas_item:
self.__canvas_widget.set_cursor_shape(self.__mouse_tracking_canvas_item.cursor_shape)
def _restore_cursor_shape(self) -> None:
# if self.__mouse_tracking_canvas_item:
# self.__canvas_widget.set_cursor_shape(self.__mouse_tracking_canvas_item.cursor_shape)
# else:
self.__canvas_widget.set_cursor_shape(None)
def __mouse_entered(self) -> None:
self.__mouse_tracking = True
def __mouse_exited(self) -> None:
if self.__mouse_tracking_canvas_item:
self.__mouse_tracking_canvas_item.mouse_exited()
self.__mouse_tracking = False
self.__mouse_tracking_canvas_item = None
self.__canvas_widget.set_cursor_shape(None)
self.__canvas_widget.tool_tip = None
def __mouse_canvas_item_at_point(self, x: int, y: int) -> typing.Optional[AbstractCanvasItem]:
if self.__mouse_canvas_item:
return self.__mouse_canvas_item
canvas_items = self.canvas_items_at_point(x, y)
for canvas_item in canvas_items:
if canvas_item.wants_mouse_events:
return canvas_item
return None
def __request_focus(self, canvas_item: AbstractCanvasItem, p: Geometry.IntPoint, modifiers: UserInterface.KeyboardModifiers) -> None:
canvas_item_: typing.Optional[AbstractCanvasItem] = canvas_item
while canvas_item_:
if canvas_item_.focusable:
canvas_item_._request_focus(p, modifiers)
break
canvas_item_ = canvas_item_.container
def __mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
with self._ui_interaction():
canvas_item = self.__mouse_canvas_item_at_point(x, y)
if canvas_item:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), canvas_item)
return canvas_item.mouse_clicked(canvas_item_point.x, canvas_item_point.y, modifiers)
return False
def __mouse_double_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
with self._ui_interaction():
canvas_item = self.__mouse_canvas_item_at_point(x, y)
if canvas_item:
self.__request_focus(canvas_item, Geometry.IntPoint(x=x, y=y), modifiers)
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), canvas_item)
return canvas_item.mouse_double_clicked(canvas_item_point.x, canvas_item_point.y, modifiers)
return False
def __mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._adjust_ui_interaction(1)
self.__mouse_position_changed(x, y, modifiers)
if not self.__mouse_tracking_canvas_item:
self.__mouse_tracking_canvas_item = self.__mouse_canvas_item_at_point(x, y)
if self.__mouse_tracking_canvas_item:
self.__mouse_tracking_canvas_item.mouse_entered()
self.__canvas_widget.set_cursor_shape(self.__mouse_tracking_canvas_item.cursor_shape)
self.__canvas_widget.tool_tip = self.__mouse_tracking_canvas_item.tool_tip
if self.__mouse_tracking_canvas_item:
self.__mouse_canvas_item = self.__mouse_tracking_canvas_item
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), self.__mouse_canvas_item)
self.__request_focus_canvas_item = self.__mouse_canvas_item
return self.__mouse_canvas_item.mouse_pressed(canvas_item_point.x, canvas_item_point.y, modifiers)
return False
def __mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
result = False
if self.__mouse_canvas_item:
if self.__request_focus_canvas_item:
self.__request_focus(self.__request_focus_canvas_item, Geometry.IntPoint(x=x, y=y), modifiers)
self.__request_focus_canvas_item = typing.cast(typing.Any, None)
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), self.__mouse_canvas_item)
result = self.__mouse_canvas_item.mouse_released(canvas_item_point.x, canvas_item_point.y, modifiers)
self.__mouse_canvas_item = None
self.__mouse_position_changed(x, y, modifiers)
self._adjust_ui_interaction(-1)
return result
def bypass_request_focus(self) -> None:
self.__request_focus_canvas_item = typing.cast(typing.Any, None)
def __mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> None:
if not self.__mouse_tracking:
# handle case where mouse is suddenly within this canvas item but it never entered. this can happen when
# the user activates the application.
self.mouse_entered()
if self.__mouse_tracking and not self.__mouse_tracking_canvas_item:
# find the existing canvas item that is or wants to track the mouse. if it's new, call entered and update
# the cursor.
self.__mouse_tracking_canvas_item = self.__mouse_canvas_item_at_point(x, y)
if self.__mouse_tracking_canvas_item:
self.__mouse_tracking_canvas_item.mouse_entered()
self.__canvas_widget.set_cursor_shape(self.__mouse_tracking_canvas_item.cursor_shape)
self.__canvas_widget.tool_tip = self.__mouse_tracking_canvas_item.tool_tip
new_mouse_canvas_item = self.__mouse_canvas_item_at_point(x, y)
if self.__mouse_tracking_canvas_item != new_mouse_canvas_item:
# if the mouse tracking canvas item changes, exit the old one and enter the new one.
if self.__mouse_tracking_canvas_item:
# there may be a case where the mouse has moved outside the canvas item and the canvas
# item has also been closed. for instance, context menu item which closes the canvas item.
# so double check whether the mouse tracking canvas item is still in the hierarchy by checking
# its container. only call mouse existed if the item is still in the hierarchy.
if self.__mouse_tracking_canvas_item.container:
self.__mouse_tracking_canvas_item.mouse_exited()
self.__canvas_widget.set_cursor_shape(None)
self.__canvas_widget.tool_tip = None
self.__mouse_tracking_canvas_item = new_mouse_canvas_item
if self.__mouse_tracking_canvas_item:
self.__mouse_tracking_canvas_item.mouse_entered()
self.__canvas_widget.set_cursor_shape(self.__mouse_tracking_canvas_item.cursor_shape)
self.__canvas_widget.tool_tip = self.__mouse_tracking_canvas_item.tool_tip
# finally, send out the actual position changed message to the (possibly new) current mouse tracking canvas
# item. also make note of the last time the cursor changed for tool tip tracking.
if self.__mouse_tracking_canvas_item:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), self.__mouse_tracking_canvas_item)
self.__mouse_tracking_canvas_item.mouse_position_changed(canvas_item_point.x, canvas_item_point.y, modifiers)
def __grabbed_mouse_position_changed(self, dx: int, dy: int, modifiers: UserInterface.KeyboardModifiers) -> None:
if self.__grab_canvas_item:
self.__grab_canvas_item.grabbed_mouse_position_changed(dx, dy, modifiers)
def __context_menu_event(self, x: int, y: int, gx: int, gy: int) -> bool:
with self._ui_interaction():
canvas_items = self.canvas_items_at_point(x, y)
for canvas_item in canvas_items:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), canvas_item)
if canvas_item.context_menu_event(canvas_item_point.x, canvas_item_point.y, gx, gy):
return True
return False
def __key_pressed(self, key: UserInterface.Key) -> bool:
self._adjust_ui_interaction(1)
if self.focused_item:
return self.focused_item.key_pressed(key)
return False
def __key_released(self, key: UserInterface.Key) -> bool:
result = False
if self.focused_item:
result = self.focused_item.key_released(key)
self._adjust_ui_interaction(-1)
return result
def __drag_enter(self, mime_data: UserInterface.MimeData) -> str:
self.__drag_tracking = True
return "accept"
def __drag_leave(self) -> str:
if self.__drag_tracking_canvas_item:
self.__drag_tracking_canvas_item.drag_leave()
self.__drag_tracking = False
self.__drag_tracking_canvas_item = None
return "accept"
def __drag_canvas_item_at_point(self, x: int, y: int, mime_data: UserInterface.MimeData) -> typing.Optional[AbstractCanvasItem]:
canvas_items = self.canvas_items_at_point(x, y)
for canvas_item in canvas_items:
if canvas_item.wants_drag_event(mime_data, x, y):
return canvas_item
return None
def __drag_move(self, mime_data: UserInterface.MimeData, x: int, y: int) -> str:
response = "ignore"
if self.__drag_tracking and not self.__drag_tracking_canvas_item:
self.__drag_tracking_canvas_item = self.__drag_canvas_item_at_point(x, y, mime_data)
if self.__drag_tracking_canvas_item:
self.__drag_tracking_canvas_item.drag_enter(mime_data)
new_drag_canvas_item = self.__drag_canvas_item_at_point(x, y, mime_data)
if self.__drag_tracking_canvas_item != new_drag_canvas_item:
if self.__drag_tracking_canvas_item:
self.__drag_tracking_canvas_item.drag_leave()
self.__drag_tracking_canvas_item = new_drag_canvas_item
if self.__drag_tracking_canvas_item:
self.__drag_tracking_canvas_item.drag_enter(mime_data)
if self.__drag_tracking_canvas_item:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), self.__drag_tracking_canvas_item)
response = self.__drag_tracking_canvas_item.drag_move(mime_data, canvas_item_point.x, canvas_item_point.y)
return response
def __drop(self, mime_data: UserInterface.MimeData, x: int, y: int) -> str:
with self._ui_interaction():
response = "ignore"
if self.__drag_tracking_canvas_item:
canvas_item_point = self.map_to_canvas_item(Geometry.IntPoint(y=y, x=x), self.__drag_tracking_canvas_item)
response = self.__drag_tracking_canvas_item.drop(mime_data, canvas_item_point.x, canvas_item_point.y)
self.__drag_leave()
return response
def drag(self, mime_data: UserInterface.MimeData, thumbnail: typing.Optional[DrawingContext.RGBA32Type] = None,
hot_spot_x: typing.Optional[int] = None, hot_spot_y: typing.Optional[int] = None,
drag_finished_fn: typing.Optional[typing.Callable[[str], None]] = None) -> None:
self.__canvas_widget.drag(mime_data, thumbnail, hot_spot_x, hot_spot_y, drag_finished_fn)
def grab_gesture(self, gesture_type: str) -> None:
""" Grab gesture """
self._adjust_ui_interaction(1)
self.__canvas_widget.grab_gesture(gesture_type)
def release_gesture(self, gesture_type: str) -> None:
""" Ungrab gesture """
self.__canvas_widget.release_gesture(gesture_type)
self._adjust_ui_interaction(-1)
def grab_mouse(self, grabbed_canvas_item: MouseTrackingCanvasItem.TrackingCanvasItem, gx: int, gy: int) -> None:
self._adjust_ui_interaction(1)
self.__canvas_widget.grab_mouse(gx, gy)
self.__grab_canvas_item = grabbed_canvas_item
def release_mouse(self) -> None:
self.__canvas_widget.release_mouse()
self._restore_cursor_shape()
self.__grab_canvas_item = None
self._adjust_ui_interaction(-1)
def show_tool_tip_text(self, text: str, gx: int, gy: int) -> None:
self.__canvas_widget.show_tool_tip_text(text, gx, gy)
class BackgroundCanvasItem(AbstractCanvasItem):
""" Canvas item to draw background_color. """
def __init__(self, background_color: typing.Optional[str] = None) -> None:
super().__init__()
self.background_color = background_color or "#888"
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
# canvas size
canvas_size = self.canvas_size
if canvas_size:
canvas_width = canvas_size[1]
canvas_height = canvas_size[0]
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(0, 0, canvas_width, canvas_height)
drawing_context.fill_style = self.background_color
drawing_context.fill()
class CellCanvasItem(AbstractCanvasItem):
""" Canvas item to draw and respond to user events for a cell.
A cell must implement the following interface:
event: update_event() - fired when the canvas item needs an update
method: paint_cell(drawing_context, rect, style) - called to draw the cell
The style parameter passed to paint_cell is a list with zero or one strings from each of the aspects below:
disabled (default is enabled)
checked, partial (default is unchecked)
hover, active (default is none)
"""
def __init__(self, cell: typing.Optional[Widgets.CellLike] = None) -> None:
super().__init__()
self.__enabled = True
self.__check_state = "unchecked"
self.__mouse_inside = False
self.__mouse_pressed = False
self.__cell = None
self.__cell_update_event_listener: typing.Optional[Event.EventListener] = None
self.cell = cell
self.style: typing.Set[str] = set()
def close(self) -> None:
self.cell = None
super().close()
@property
def enabled(self) -> bool:
return self.__enabled
@enabled.setter
def enabled(self, value: bool) -> None:
if self.__enabled != value:
self.__enabled = value
self.__update_style()
@property
def check_state(self) -> str:
return self.__check_state
@check_state.setter
def check_state(self, value: str) -> None:
assert value in ["checked", "unchecked", "partial"]
if self.__check_state != value:
self.__check_state = value
self.__update_style()
@property
def checked(self) -> bool:
return self.check_state == "checked"
@checked.setter
def checked(self, value: bool) -> None:
self.check_state = "checked" if value else "unchecked"
@property
def _mouse_inside(self) -> bool:
return self.__mouse_inside
@_mouse_inside.setter
def _mouse_inside(self, value: bool) -> None:
self.__mouse_inside = value
self.__update_style()
@property
def _mouse_pressed(self) -> bool:
return self.__mouse_pressed
@_mouse_pressed.setter
def _mouse_pressed(self, value: bool) -> None:
self.__mouse_pressed = value
self.__update_style()
def __update_style(self) -> None:
old_style = copy.copy(self.style)
# enabled state
self.style.discard('disabled')
if not self.enabled:
self.style.add('disabled')
# checked state
self.style.discard('checked')
if self.check_state == "checked":
self.style.add('checked')
# hover state
self.style.discard('hover')
self.style.discard('active')
if self._mouse_inside and self._mouse_pressed:
self.style.add('active')
elif self.__mouse_inside:
self.style.add('hover')
if self.style != old_style:
self.update()
@property
def cell(self) -> typing.Optional[Widgets.CellLike]:
return self.__cell
@cell.setter
def cell(self, new_cell: typing.Optional[Widgets.CellLike]) -> None:
if self.__cell_update_event_listener:
self.__cell_update_event_listener.close()
self.__cell_update_event_listener = None
self.__cell = new_cell
if self.__cell:
self.__cell_update_event_listener = self.__cell.update_event.listen(self.update)
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
rect = self.canvas_bounds
if self.__cell and rect is not None:
with drawing_context.saver():
self.__cell.paint_cell(drawing_context, rect, self.style)
class TwistDownCell:
def __init__(self) -> None:
super().__init__()
self.update_event = Event.Event()
def paint_cell(self, drawing_context: DrawingContext.DrawingContext, rect: Geometry.IntRect, style: typing.Set[str]) -> None:
# disabled (default is enabled)
# checked, partial (default is unchecked)
# hover, active (default is none)
if "checked" in style:
drawing_context.begin_path()
drawing_context.move_to(rect.center.x, rect.center.y + 4)
drawing_context.line_to(rect.center.x + 4.5, rect.center.y - 4)
drawing_context.line_to(rect.center.x - 4.5, rect.center.y - 4)
drawing_context.close_path()
else:
drawing_context.begin_path()
drawing_context.move_to(rect.center.x + 4, rect.center.y)
drawing_context.line_to(rect.center.x - 4, rect.center.y + 4.5)
drawing_context.line_to(rect.center.x - 4, rect.center.y - 4.5)
drawing_context.close_path()
overlay_color = None
if "disabled" in style:
overlay_color = "rgba(255, 255, 255, 0.5)"
else:
if "active" in style:
overlay_color = "rgba(128, 128, 128, 0.5)"
elif "hover" in style:
overlay_color = "rgba(128, 128, 128, 0.1)"
drawing_context.fill_style = "#444"
drawing_context.fill()
drawing_context.stroke_style = "#444"
drawing_context.stroke()
if overlay_color:
rect_args = rect.left, rect.top, rect.width, rect.height
drawing_context.begin_path()
drawing_context.rect(*rect_args)
drawing_context.fill_style = overlay_color
drawing_context.fill()
class TwistDownCanvasItem(CellCanvasItem):
def __init__(self) -> None:
super().__init__()
self.cell = TwistDownCell()
self.wants_mouse_events = True
self.on_button_clicked: typing.Optional[typing.Callable[[], None]] = None
def close(self) -> None:
self.on_button_clicked = None
super().close()
def mouse_entered(self) -> bool:
self._mouse_inside = True
return True
def mouse_exited(self) -> bool:
self._mouse_inside = False
return True
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._mouse_pressed = True
return True
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._mouse_pressed = False
return True
def mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.enabled:
if callable(self.on_button_clicked):
self.on_button_clicked()
return True
class BitmapCell:
def __init__(self, rgba_bitmap_data: typing.Optional[DrawingContext.RGBA32Type] = None,
background_color: typing.Optional[str] = None, border_color: typing.Optional[str] = None) -> None:
super().__init__()
self.__rgba_bitmap_data = rgba_bitmap_data
self.__data: typing.Optional[DrawingContext.GrayscaleF32Type] = None
self.__display_limits: typing.Optional[typing.Tuple[float, float]] = None
self.__color_map_data: typing.Optional[DrawingContext.RGBA32Type] = None
self.__background_color = background_color
self.__border_color = border_color
self.update_event = Event.Event()
def set_rgba_bitmap_data(self, rgba_bitmap_data: typing.Optional[DrawingContext.RGBA32Type], trigger_update: bool = True) -> None:
self.__rgba_bitmap_data = rgba_bitmap_data
self.__data = None
self.__display_limits = None
self.__color_map_data = None
if trigger_update:
self.update_event.fire()
def set_data(self, data: typing.Optional[DrawingContext.GrayscaleF32Type],
display_limits: typing.Optional[typing.Tuple[float, float]],
color_map_data: typing.Optional[DrawingContext.RGBA32Type], trigger_update: bool = True) -> None:
self.__rgba_bitmap_data = None
self.__data = data
self.__display_limits = display_limits
self.__color_map_data = color_map_data
if trigger_update:
self.update_event.fire()
@property
def data(self) -> typing.Optional[DrawingContext.GrayscaleF32Type]:
return self.__data
@property
def rgba_bitmap_data(self) -> typing.Optional[DrawingContext.RGBA32Type]:
return self.__rgba_bitmap_data
@rgba_bitmap_data.setter
def rgba_bitmap_data(self, value: typing.Optional[DrawingContext.RGBA32Type]) -> None:
self.set_rgba_bitmap_data(value, trigger_update=True)
@property
def background_color(self) -> typing.Optional[str]:
return self.__background_color
@background_color.setter
def background_color(self, background_color: typing.Optional[str]) -> None:
self.__background_color = background_color
self.update_event.fire()
@property
def border_color(self) -> typing.Optional[str]:
return self.__border_color
@border_color.setter
def border_color(self, border_color: typing.Optional[str]) -> None:
self.__border_color = border_color
self.update_event.fire()
def paint_cell(self, drawing_context: DrawingContext.DrawingContext, rect: Geometry.IntRect, style: typing.Set[str]) -> None:
# set up the defaults
background_color = self.__background_color
border_color = self.__border_color
overlay_color = None
# configure based on style
if "disabled" in style:
overlay_color = "rgba(255, 255, 255, 0.5)"
if "checked" in style:
background_color = "rgb(64, 64, 64)"
else:
if "checked" in style:
background_color = "rgb(192, 192, 192)"
if "active" in style:
overlay_color = "rgba(128, 128, 128, 0.5)"
elif "hover" in style:
overlay_color = "rgba(128, 128, 128, 0.1)"
rect_args = rect.left, rect.top, rect.width, rect.height
bitmap_data = self.rgba_bitmap_data
raw_data = self.__data
# draw the background
if background_color:
drawing_context.begin_path()
drawing_context.rect(*rect_args)
drawing_context.fill_style = background_color
drawing_context.fill()
# draw the bitmap
if bitmap_data is not None:
image_size = typing.cast(Geometry.IntSizeTuple, bitmap_data.shape)
if image_size[0] > 0 and image_size[1] > 0:
display_rect = Geometry.fit_to_size(rect, image_size)
display_height = display_rect.height
display_width = display_rect.width
if display_rect and display_width > 0 and display_height > 0:
display_top = display_rect.top
display_left = display_rect.left
drawing_context.draw_image(bitmap_data, display_left, display_top, display_width, display_height)
if raw_data is not None:
image_size = typing.cast(Geometry.IntSizeTuple, raw_data.shape)
if image_size[0] > 0 and image_size[1] > 0:
display_rect = Geometry.fit_to_size(rect, image_size)
display_height = display_rect.height
display_width = display_rect.width
if display_rect and display_width > 0 and display_height > 0:
display_top = display_rect.top
display_left = display_rect.left
display_limits = self.__display_limits or (0.0, 0.0)
color_map_data = self.__color_map_data
drawing_context.draw_data(raw_data, display_left, display_top, display_width, display_height, display_limits[0], display_limits[1], color_map_data)
# draw the overlay style
if overlay_color:
drawing_context.begin_path()
drawing_context.rect(*rect_args)
drawing_context.fill_style = overlay_color
drawing_context.fill()
# draw the border
if border_color:
drawing_context.begin_path()
drawing_context.rect(*rect_args)
drawing_context.stroke_style = border_color
drawing_context.stroke()
class BitmapCanvasItem(CellCanvasItem):
""" Canvas item to draw rgba bitmap in bgra uint32 ndarray format. """
def __init__(self, rgba_bitmap_data: typing.Optional[DrawingContext.RGBA32Type] = None,
background_color: typing.Optional[str] = None, border_color: typing.Optional[str] = None) -> None:
super().__init__()
self.__bitmap_cell = BitmapCell(rgba_bitmap_data, background_color, border_color)
self.cell = self.__bitmap_cell
def set_rgba_bitmap_data(self, rgba_bitmap_data: typing.Optional[DrawingContext.RGBA32Type],
trigger_update: bool = True) -> None:
self.__bitmap_cell.set_rgba_bitmap_data(rgba_bitmap_data, trigger_update)
def set_data(self, data: typing.Optional[DrawingContext.GrayscaleF32Type],
display_limits: typing.Optional[typing.Tuple[float, float]],
color_map_data: typing.Optional[DrawingContext.RGBA32Type], trigger_update: bool = True) -> None:
self.__bitmap_cell.set_data(data, display_limits, color_map_data, trigger_update)
@property
def data(self) -> typing.Optional[DrawingContext.GrayscaleF32Type]:
return self.__bitmap_cell.data
@property
def rgba_bitmap_data(self) -> typing.Optional[DrawingContext.RGBA32Type]:
return self.__bitmap_cell.rgba_bitmap_data
@rgba_bitmap_data.setter
def rgba_bitmap_data(self, rgb_bitmap_data: typing.Optional[DrawingContext.RGBA32Type]) -> None:
self.__bitmap_cell.rgba_bitmap_data = rgb_bitmap_data
@property
def background_color(self) -> typing.Optional[str]:
return self.__bitmap_cell.background_color
@background_color.setter
def background_color(self, background_color: typing.Optional[str]) -> None:
self.__bitmap_cell.background_color = background_color
@property
def border_color(self) -> typing.Optional[str]:
return self.__bitmap_cell.border_color
@border_color.setter
def border_color(self, border_color: typing.Optional[str]) -> None:
self.__bitmap_cell.border_color = border_color
class BitmapButtonCanvasItem(BitmapCanvasItem):
""" Canvas item button to draw rgba bitmap in bgra uint32 ndarray format. """
def __init__(self, rgba_bitmap_data: typing.Optional[DrawingContext.RGBA32Type] = None,
background_color: typing.Optional[str] = None, border_color: typing.Optional[str] = None) -> None:
super().__init__(rgba_bitmap_data, background_color, border_color)
self.wants_mouse_events = True
self.on_button_clicked: typing.Optional[typing.Callable[[], None]] = None
def close(self) -> None:
self.on_button_clicked = None
super().close()
def mouse_entered(self) -> bool:
self._mouse_inside = True
return True
def mouse_exited(self) -> bool:
self._mouse_inside = False
return True
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._mouse_pressed = True
return True
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._mouse_pressed = False
return True
def mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.enabled:
if callable(self.on_button_clicked):
self.on_button_clicked()
return True
class StaticTextCanvasItem(AbstractCanvasItem):
def __init__(self, text: typing.Optional[str] = None) -> None:
super().__init__()
self.__text = text if text is not None else str()
self.__text_color = "#000"
self.__text_disabled_color = "#888"
self.__enabled = True
self.__font = "12px"
@property
def text(self) -> str:
return self.__text
@text.setter
def text(self, text: typing.Optional[str]) -> None:
text = text if text is not None else str()
if self.__text != text:
self.__text = text
self.update()
@property
def enabled(self) -> bool:
return self.__enabled
@enabled.setter
def enabled(self, value: bool) -> None:
if self.__enabled != value:
self.__enabled = value
self.update()
@property
def text_color(self) -> str:
return self.__text_color
@text_color.setter
def text_color(self, value: str) -> None:
if self.__text_color != value:
self.__text_color = value
self.update()
@property
def text_disabled_color(self) -> str:
return self.__text_disabled_color
@text_disabled_color.setter
def text_disabled_color(self, value: str) -> None:
if self.__text_disabled_color != value:
self.__text_disabled_color = value
self.update()
@property
def font(self) -> str:
return self.__font
@font.setter
def font(self, value: str) -> None:
if self.__font != value:
self.__font = value
self.update()
def size_to_content(self, get_font_metrics_fn: typing.Callable[[str, str], UserInterface.FontMetrics],
horizontal_padding: typing.Optional[int] = None,
vertical_padding: typing.Optional[int] = None) -> None:
""" Size the canvas item to the text content. """
if horizontal_padding is None:
horizontal_padding = 4
if vertical_padding is None:
vertical_padding = 4
font_metrics = get_font_metrics_fn(self.__font, self.__text)
new_sizing = self.copy_sizing()
new_sizing._set_fixed_width(font_metrics.width + 2 * horizontal_padding)
new_sizing._set_fixed_height(font_metrics.height + 2 * vertical_padding)
self.update_sizing(new_sizing)
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_bounds = self.canvas_bounds
if canvas_bounds:
canvas_bounds_center = canvas_bounds.center
with drawing_context.saver():
drawing_context.font = self.__font
drawing_context.text_align = 'center'
drawing_context.text_baseline = 'middle'
drawing_context.fill_style = self.__text_color if self.__enabled else self.__text_disabled_color
drawing_context.fill_text(self.__text, canvas_bounds_center.x, canvas_bounds_center.y + 1)
class TextButtonCanvasItem(StaticTextCanvasItem):
def __init__(self, text: typing.Optional[str] = None) -> None:
super().__init__(text)
self.wants_mouse_events = True
self.__border_enabled = True
self.__mouse_inside = False
self.__mouse_pressed = False
self.on_button_clicked: typing.Optional[typing.Callable[[], None]] = None
def close(self) -> None:
self.on_button_clicked = None
super().close()
@property
def border_enabled(self) -> bool:
return self.__border_enabled
@border_enabled.setter
def border_enabled(self, value: bool) -> None:
if self.__border_enabled != value:
self.__border_enabled = value
self.update()
def mouse_entered(self) -> bool:
self.__mouse_inside = True
self.update()
return True
def mouse_exited(self) -> bool:
self.__mouse_inside = False
self.update()
return True
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__mouse_pressed = True
self.update()
return True
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__mouse_pressed = False
self.update()
return True
def mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.enabled:
if callable(self.on_button_clicked):
self.on_button_clicked()
return True
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_size = self.canvas_size
if canvas_size:
with drawing_context.saver():
drawing_context.begin_path()
# drawing_context.rect(0, 0, canvas_size.width, canvas_size.height)
drawing_context.round_rect(1.0, 1.0, canvas_size.width - 2.0, canvas_size.height - 2.0, 4)
if self.enabled and self.__mouse_inside and self.__mouse_pressed:
drawing_context.fill_style = "rgba(128, 128, 128, 0.5)"
drawing_context.fill()
elif self.enabled and self.__mouse_inside:
drawing_context.fill_style = "rgba(128, 128, 128, 0.1)"
drawing_context.fill()
if self.border_enabled:
drawing_context.stroke_style = "#000"
drawing_context.line_width = 1.0
drawing_context.stroke()
super()._repaint(drawing_context)
class CheckBoxCanvasItem(AbstractCanvasItem):
def __init__(self, text: typing.Optional[str] = None) -> None:
super().__init__()
self.wants_mouse_events = True
self.__enabled = True
self.__mouse_inside = False
self.__mouse_pressed = False
self.__check_state = "unchecked"
self.__tristate = False
self.__text = text if text is not None else str()
self.__text_color = "#000"
self.__text_disabled_color = "#888"
self.__font = "12px"
self.on_checked_changed: typing.Optional[typing.Callable[[bool], None]] = None
self.on_check_state_changed: typing.Optional[typing.Callable[[str], None]] = None
def close(self) -> None:
self.on_checked_changed = None
self.on_check_state_changed = None
super().close()
@property
def enabled(self) -> bool:
return self.__enabled
@enabled.setter
def enabled(self, value: bool) -> None:
self.__enabled = value
self.update()
@property
def tristate(self) -> bool:
return self.__tristate
@tristate.setter
def tristate(self, value: bool) -> None:
self.__tristate = value
if not self.__tristate:
self.checked = self.check_state == "checked"
self.update()
@property
def check_state(self) -> str:
return self.__check_state
@check_state.setter
def check_state(self, value: str) -> None:
if self.tristate and value not in ("unchecked", "checked", "partial"):
value = "unchecked"
elif not self.tristate and value not in ("unchecked", "checked"):
value = "unchecked"
self.__check_state = value
self.update()
@property
def checked(self) -> bool:
return self.check_state == "checked"
@checked.setter
def checked(self, value: bool) -> None:
self.check_state = "checked" if value else "unchecked"
@property
def text(self) -> str:
return self.__text
@text.setter
def text(self, text: typing.Optional[str]) -> None:
text = text if text is not None else str()
if self.__text != text:
self.__text = text
self.update()
@property
def text_color(self) -> str:
return self.__text_color
@text_color.setter
def text_color(self, value: str) -> None:
if self.__text_color != value:
self.__text_color = value
self.update()
@property
def text_disabled_color(self) -> str:
return self.__text_disabled_color
@text_disabled_color.setter
def text_disabled_color(self, value: str) -> None:
if self.__text_disabled_color != value:
self.__text_disabled_color = value
self.update()
@property
def font(self) -> str:
return self.__font
@font.setter
def font(self, value: str) -> None:
if self.__font != value:
self.__font = value
self.update()
def mouse_entered(self) -> bool:
self.__mouse_inside = True
self.update()
return True
def mouse_exited(self) -> bool:
self.__mouse_inside = False
self.update()
return True
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__mouse_pressed = True
self.update()
return True
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__mouse_pressed = False
self.update()
return True
def mouse_clicked(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self._toggle_checked()
return True
def _toggle_checked(self) -> None:
if self.enabled:
if self.check_state == "checked":
self.check_state = "unchecked"
else:
self.check_state = "checked"
if callable(self.on_checked_changed):
self.on_checked_changed(self.check_state == "checked")
if callable(self.on_check_state_changed):
self.on_check_state_changed(self.check_state)
@property
def _mouse_inside(self) -> bool:
return self.__mouse_inside
@property
def _mouse_pressed(self) -> bool:
return self.__mouse_pressed
def size_to_content(self, get_font_metrics_fn: typing.Callable[[str, str], UserInterface.FontMetrics]) -> None:
""" Size the canvas item to the text content. """
horizontal_padding = 4
vertical_padding = 3
font_metrics = get_font_metrics_fn(self.__font, self.__text)
new_sizing = self.copy_sizing()
new_sizing._set_fixed_width(font_metrics.width + 2 * horizontal_padding + 14 + 4)
new_sizing._set_fixed_height(font_metrics.height + 2 * vertical_padding)
self.update_sizing(new_sizing)
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_size = self.canvas_size
if canvas_size:
with drawing_context.saver():
drawing_context.begin_path()
tx = 4 + 14 + 4
cx = 4 + 7
cy = canvas_size.height * 0.5
size = 14
size_half = 7
drawing_context.round_rect(4, cy - size_half, size, size, 4.0)
if self.check_state in ("checked", "partial"):
drawing_context.fill_style = "#FFF"
drawing_context.fill()
if self.enabled and self.__mouse_inside and self.__mouse_pressed:
drawing_context.fill_style = "rgba(128, 128, 128, 0.5)"
drawing_context.fill()
elif self.enabled and self.__mouse_inside:
drawing_context.fill_style = "rgba(128, 128, 128, 0.1)"
drawing_context.fill()
drawing_context.stroke_style = "#000"
drawing_context.line_width = 1.0
drawing_context.stroke()
if self.check_state == "checked":
drawing_context.begin_path()
drawing_context.move_to(cx - 3, cy - 2)
drawing_context.line_to(cx + 0, cy + 2)
drawing_context.line_to(cx + 8, cy - 9)
drawing_context.stroke_style = "#000"
drawing_context.line_width = 2.0
drawing_context.stroke()
elif self.check_state == "partial":
drawing_context.begin_path()
drawing_context.move_to(cx - 5, cy)
drawing_context.line_to(cx + 5, cy)
drawing_context.stroke_style = "#000"
drawing_context.line_width = 2.0
drawing_context.stroke()
drawing_context.font = self.__font
drawing_context.text_align = 'left'
drawing_context.text_baseline = 'middle'
drawing_context.fill_style = self.__text_color if self.__enabled else self.__text_disabled_color
drawing_context.fill_text(self.__text, tx, cy + 1)
super()._repaint(drawing_context)
class EmptyCanvasItem(AbstractCanvasItem):
""" Canvas item to act as a placeholder (spacer or stretch). """
def __init__(self) -> None:
super().__init__()
class RadioButtonGroup:
def __init__(self, buttons: typing.Sequence[BitmapButtonCanvasItem]) -> None:
self.__buttons = copy.copy(buttons)
self.__current_index = 0
self.on_current_index_changed: typing.Optional[typing.Callable[[int], None]] = None
for index, button in enumerate(self.__buttons):
button.checked = index == self.__current_index
for index, button in enumerate(self.__buttons):
def current_index_changed(index: int) -> None:
self.__current_index = index
for index, button in enumerate(self.__buttons):
button.checked = index == self.__current_index
if callable(self.on_current_index_changed):
self.on_current_index_changed(self.__current_index)
button.on_button_clicked = functools.partial(current_index_changed, index)
def close(self) -> None:
for button in self.__buttons:
button.on_button_clicked = None
self.on_current_index_changed = None
@property
def current_index(self) -> int:
return self.__current_index
@current_index.setter
def current_index(self, value: int) -> None:
self.__current_index = value
for index, button in enumerate(self.__buttons):
button.checked = index == self.__current_index
class DrawCanvasItem(AbstractCanvasItem):
def __init__(self, drawing_fn: typing.Callable[[DrawingContext.DrawingContext, Geometry.IntSize], None]) -> None:
super().__init__()
self.__drawing_fn = drawing_fn
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_size = self.canvas_size
if canvas_size:
self.__drawing_fn(drawing_context, canvas_size)
super()._repaint(drawing_context)
class DividerCanvasItem(AbstractCanvasItem):
def __init__(self, *, orientation: typing.Optional[str] = None, color: typing.Optional[str] = None):
super().__init__()
self.__orientation = orientation or "vertical"
if orientation == "vertical":
self.update_sizing(self.sizing.with_fixed_width(2))
else:
self.update_sizing(self.sizing.with_fixed_height(2))
self.__color = color or "#CCC"
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_size = self.canvas_size
if canvas_size:
with drawing_context.saver():
if self.__orientation == "vertical":
drawing_context.move_to(1, 0)
drawing_context.line_to(1, canvas_size.height)
else:
drawing_context.move_to(0, 1)
drawing_context.line_to(canvas_size.width, 1)
drawing_context.stroke_style = self.__color
drawing_context.stroke()
super()._repaint(drawing_context)
class ProgressBarCanvasItem(AbstractCanvasItem):
def __init__(self) -> None:
super().__init__()
self.__enabled = True
self.__progress = 0.0 # 0.0 to 1.0
self.update_sizing(self.sizing.with_fixed_height(4))
@property
def enabled(self) -> bool:
return self.__enabled
@enabled.setter
def enabled(self, value: bool) -> None:
self.__enabled = value
self.update()
@property
def progress(self) -> float:
return self.__progress
@progress.setter
def progress(self, value: float) -> None:
self.__progress = min(max(value, 0.0), 1.0)
self.update()
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
canvas_bounds = self.canvas_bounds
if canvas_bounds:
canvas_size = canvas_bounds.size
canvas_bounds_center = canvas_bounds.center
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(0, 0, canvas_size.width, canvas_size.height)
drawing_context.close_path()
drawing_context.stroke_style = "#CCC"
drawing_context.fill_style = "#CCC"
drawing_context.fill()
drawing_context.stroke()
if canvas_size.width * self.progress >= 1:
drawing_context.begin_path()
drawing_context.rect(0, 0, canvas_size.width * self.progress, canvas_size.height)
drawing_context.close_path()
drawing_context.stroke_style = "#6AB"
drawing_context.fill_style = "#6AB"
drawing_context.fill()
drawing_context.stroke()
if canvas_size.height >= 16 and canvas_size.width * self.progress >= 50: # TODO: use font metrics to find length of text
progress_text = str(round(self.progress * 100)) + "%"
drawing_context.begin_path()
drawing_context.font = "12px sans-serif"
drawing_context.text_align = 'center'
drawing_context.text_baseline = 'middle'
drawing_context.fill_style = "#fff"
drawing_context.line_width = 2
drawing_context.fill_text(progress_text, (canvas_size.width - 6) * self.progress - 19, canvas_bounds_center.y + 1)
drawing_context.fill()
drawing_context.close_path()
super()._repaint(drawing_context)
class TimestampCanvasItem(AbstractCanvasItem):
def __init__(self) -> None:
super().__init__()
self.__timestamp: typing.Optional[datetime.datetime] = None
@property
def timestamp(self) -> typing.Optional[datetime.datetime]:
return self.__timestamp
@timestamp.setter
def timestamp(self, value: typing.Optional[datetime.datetime]) -> None:
self.__timestamp = value
# self.update()
def _repaint_if_needed(self, drawing_context: DrawingContext.DrawingContext, *, immediate: bool = False) -> None:
if self.__timestamp:
drawing_context.timestamp(self.__timestamp.isoformat())
super()._repaint(drawing_context)
def load_rgba_data_from_bytes(b: typing.ByteString, format: typing.Optional[str] = None) -> typing.Optional[DrawingContext.RGBA32Type]:
image_rgba = None
image_argb = imageio.imread(b, format)
if image_argb is not None:
image_rgba = numpy.zeros_like(image_argb)
image_rgba[:, :, 0] = image_argb[:, :, 2]
image_rgba[:, :, 1] = image_argb[:, :, 1]
image_rgba[:, :, 2] = image_argb[:, :, 0]
image_rgba[:, :, 3] = image_argb[:, :, 3]
image_rgba = image_rgba.view(numpy.uint32).reshape(image_rgba.shape[:-1])
return image_rgba
|
[
"nion.utils.Geometry.fit_to_aspect_ratio",
"typing.cast",
"nion.utils.Geometry.IntPoint",
"weakref.ref",
"nion.utils.Geometry.FloatPoint",
"nion.utils.Geometry.IntRect",
"numpy.zeros_like",
"traceback.print_exc",
"threading.Condition",
"nion.ui.UserInterface.MenuItemState",
"threading.Event",
"traceback.print_stack",
"nion.utils.Geometry.IntRect.empty_rect",
"threading.current_thread",
"nion.utils.Geometry.fit_to_size",
"nion.utils.Geometry.IntPoint.make",
"nion.utils.Geometry.FloatRect.from_tlhw",
"functools.partial",
"copy.deepcopy",
"nion.utils.Event.Event",
"threading.RLock",
"imageio.imread",
"nion.utils.Geometry.Margins",
"nion.utils.Geometry.IntSize.make",
"logging.debug",
"nion.utils.Geometry.IntSize",
"copy.copy",
"nion.utils.Stream.ValueChangeStream",
"collections.namedtuple",
"nion.utils.Geometry.FloatRect.empty_rect",
"nion.ui.DrawingContext.DrawingContext",
"warnings.warn"
] |
[((137650, 137714), 'collections.namedtuple', 'collections.namedtuple', (['"""PositionLength"""', "['position', 'length']"], {}), "('PositionLength', ['position', 'length'])\n", (137672, 137714), False, 'import collections\n'), ((215555, 215580), 'imageio.imread', 'imageio.imread', (['b', 'format'], {}), '(b, format)\n', (215569, 215580), False, 'import imageio\n'), ((12936, 12955), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (12949, 12955), False, 'import copy\n'), ((13428, 13447), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (13441, 13447), False, 'import copy\n'), ((13907, 13926), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (13920, 13926), False, 'import copy\n'), ((14394, 14413), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14407, 14413), False, 'import copy\n'), ((14872, 14891), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14885, 14891), False, 'import copy\n'), ((15337, 15356), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (15350, 15356), False, 'import copy\n'), ((15822, 15841), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (15835, 15841), False, 'import copy\n'), ((16300, 16319), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (16313, 16319), False, 'import copy\n'), ((16765, 16784), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (16778, 16784), False, 'import copy\n'), ((17133, 17152), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (17146, 17152), False, 'import copy\n'), ((18064, 18083), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (18077, 18083), False, 'import copy\n'), ((18374, 18393), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (18387, 18393), False, 'import copy\n'), ((18781, 18800), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (18794, 18800), False, 'import copy\n'), ((19178, 19197), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (19191, 19197), False, 'import copy\n'), ((19344, 19371), 'nion.utils.Geometry.IntSize.make', 'Geometry.IntSize.make', (['size'], {}), '(size)\n', (19365, 19371), False, 'from nion.utils import Geometry\n'), ((19548, 19567), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (19561, 19567), False, 'import copy\n'), ((28112, 28138), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (28136, 28138), False, 'import threading\n'), ((40577, 40605), 'copy.deepcopy', 'copy.deepcopy', (['self.__sizing'], {}), '(self.__sizing)\n', (40590, 40605), False, 'import copy\n'), ((40881, 40907), 'copy.deepcopy', 'copy.deepcopy', (['self.sizing'], {}), '(self.sizing)\n', (40894, 40907), False, 'import copy\n'), ((55596, 55662), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', ([], {'origin': 'canvas_item_origin', 'size': 'canvas_item_size'}), '(origin=canvas_item_origin, size=canvas_item_size)\n', (55612, 55662), False, 'from nion.utils import Geometry\n'), ((80156, 80173), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (80171, 80173), False, 'import threading\n'), ((80429, 80458), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (80440, 80458), False, 'import typing\n'), ((83374, 83404), 'copy.copy', 'copy.copy', (['self.__canvas_items'], {}), '(self.__canvas_items)\n', (83383, 83404), False, 'import copy\n'), ((95631, 95658), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (95648, 95658), False, 'from nion.utils import Geometry\n'), ((97704, 97721), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (97719, 97721), False, 'import threading\n'), ((98183, 98204), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (98202, 98204), False, 'import threading\n'), ((100347, 100364), 'threading.Event', 'threading.Event', ([], {}), '()\n', (100362, 100364), False, 'import threading\n'), ((104718, 104756), 'copy.copy', 'copy.copy', (['self.__prepare_canvas_items'], {}), '(self.__prepare_canvas_items)\n', (104727, 104756), False, 'import copy\n'), ((109335, 109348), 'nion.utils.Event.Event', 'Event.Event', ([], {}), '()\n', (109346, 109348), False, 'from nion.utils import Event\n'), ((110056, 110096), 'typing.cast', 'typing.cast', (['CanvasItemComposition', 'self'], {}), '(CanvasItemComposition, self)\n', (110067, 110096), False, 'import typing\n'), ((115344, 115371), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (115361, 115371), False, 'from nion.utils import Geometry\n'), ((116687, 116704), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (116702, 116704), False, 'import threading\n'), ((133132, 133151), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (133149, 133151), False, 'from nion.utils import Geometry\n'), ((133516, 133545), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (133527, 133545), False, 'import typing\n'), ((133613, 133642), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (133624, 133642), False, 'import typing\n'), ((135204, 135235), 'nion.utils.Geometry.FloatRect.empty_rect', 'Geometry.FloatRect.empty_rect', ([], {}), '()\n', (135233, 135235), False, 'from nion.utils import Geometry\n'), ((135943, 135972), 'nion.utils.Geometry.IntRect.empty_rect', 'Geometry.IntRect.empty_rect', ([], {}), '()\n', (135970, 135972), False, 'from nion.utils import Geometry\n'), ((136130, 136157), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (136147, 136157), False, 'from nion.utils import Geometry\n'), ((138737, 138766), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (138748, 138766), False, 'import typing\n'), ((145345, 145374), 'nion.utils.Geometry.IntRect.empty_rect', 'Geometry.IntRect.empty_rect', ([], {}), '()\n', (145372, 145374), False, 'from nion.utils import Geometry\n'), ((145524, 145551), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (145541, 145551), False, 'from nion.utils import Geometry\n'), ((151190, 151207), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (151205, 151207), False, 'import threading\n'), ((152812, 152843), 'nion.ui.DrawingContext.DrawingContext', 'DrawingContext.DrawingContext', ([], {}), '()\n', (152841, 152843), False, 'from nion.ui import DrawingContext\n'), ((160146, 160175), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (160157, 160175), False, 'import typing\n'), ((171714, 171743), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (171725, 171743), False, 'import typing\n'), ((182634, 182655), 'copy.copy', 'copy.copy', (['self.style'], {}), '(self.style)\n', (182643, 182655), False, 'import copy\n'), ((184126, 184139), 'nion.utils.Event.Event', 'Event.Event', ([], {}), '()\n', (184137, 184139), False, 'from nion.utils import Event\n'), ((187551, 187564), 'nion.utils.Event.Event', 'Event.Event', ([], {}), '()\n', (187562, 187564), False, 'from nion.utils import Event\n'), ((209281, 209299), 'copy.copy', 'copy.copy', (['buttons'], {}), '(buttons)\n', (209290, 209299), False, 'import copy\n'), ((215633, 215661), 'numpy.zeros_like', 'numpy.zeros_like', (['image_argb'], {}), '(image_argb)\n', (215649, 215661), False, 'import numpy\n'), ((28479, 28505), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (28503, 28505), False, 'import threading\n'), ((28536, 28590), 'warnings.warn', 'warnings.warn', (['"""CanvasItem closed on different thread"""'], {}), "('CanvasItem closed on different thread')\n", (28549, 28590), False, 'import warnings\n'), ((28632, 28655), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (28653, 28655), False, 'import traceback\n'), ((29276, 29310), 'nion.utils.Geometry.IntSize.make', 'Geometry.IntSize.make', (['canvas_size'], {}), '(canvas_size)\n', (29297, 29310), False, 'from nion.utils import Geometry\n'), ((29841, 29878), 'nion.utils.Geometry.IntPoint.make', 'Geometry.IntPoint.make', (['canvas_origin'], {}), '(canvas_origin)\n', (29863, 29878), False, 'from nion.utils import Geometry\n'), ((30677, 30719), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', (['(0, 0)', 'self.canvas_size'], {}), '((0, 0), self.canvas_size)\n', (30693, 30719), False, 'from nion.utils import Geometry\n'), ((30974, 31028), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', (['self.canvas_origin', 'self.canvas_size'], {}), '(self.canvas_origin, self.canvas_size)\n', (30990, 31028), False, 'from nion.utils import Geometry\n'), ((35799, 35818), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (35816, 35818), False, 'from nion.utils import Geometry\n'), ((35867, 35886), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (35884, 35886), False, 'from nion.utils import Geometry\n'), ((43711, 43742), 'nion.ui.DrawingContext.DrawingContext', 'DrawingContext.DrawingContext', ([], {}), '()\n', (43740, 43742), False, 'from nion.ui import DrawingContext\n'), ((44494, 44513), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (44511, 44513), False, 'from nion.utils import Geometry\n'), ((51088, 51156), 'nion.ui.UserInterface.MenuItemState', 'UserInterface.MenuItemState', ([], {'title': 'None', 'enabled': '(True)', 'checked': '(False)'}), '(title=None, enabled=True, checked=False)\n', (51115, 51156), False, 'from nion.ui import UserInterface\n'), ((53469, 53497), 'nion.utils.Geometry.Margins', 'Geometry.Margins', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (53485, 53497), False, 'from nion.utils import Geometry\n'), ((79210, 79229), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (79227, 79229), False, 'from nion.utils import Geometry\n'), ((80986, 81015), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (80997, 81015), False, 'import typing\n'), ((90802, 90832), 'copy.copy', 'copy.copy', (['self.__canvas_items'], {}), '(self.__canvas_items)\n', (90811, 90832), False, 'import copy\n'), ((103805, 103843), 'copy.copy', 'copy.copy', (['self.__prepare_canvas_items'], {}), '(self.__prepare_canvas_items)\n', (103814, 103843), False, 'import copy\n'), ((104857, 104876), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (104874, 104876), False, 'from nion.utils import Geometry\n'), ((117980, 118010), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'w': '(640)', 'h': '(480)'}), '(w=640, h=480)\n', (117996, 118010), False, 'from nion.utils import Geometry\n'), ((118211, 118240), 'copy.deepcopy', 'copy.deepcopy', (['self.__sizings'], {}), '(self.__sizings)\n', (118224, 118240), False, 'import copy\n'), ((118532, 118561), 'copy.deepcopy', 'copy.deepcopy', (['self.__sizings'], {}), '(self.__sizings)\n', (118545, 118561), False, 'import copy\n'), ((119358, 119375), 'copy.copy', 'copy.copy', (['sizing'], {}), '(sizing)\n', (119367, 119375), False, 'import copy\n'), ((123787, 123824), 'copy.copy', 'copy.copy', (['self.__shadow_canvas_items'], {}), '(self.__shadow_canvas_items)\n', (123796, 123824), False, 'import copy\n'), ((123847, 123883), 'copy.deepcopy', 'copy.deepcopy', (['self.__actual_sizings'], {}), '(self.__actual_sizings)\n', (123860, 123883), False, 'import copy\n'), ((124646, 124682), 'copy.deepcopy', 'copy.deepcopy', (['self.__actual_sizings'], {}), '(self.__actual_sizings)\n', (124659, 124682), False, 'import copy\n'), ((125683, 125719), 'copy.deepcopy', 'copy.deepcopy', (['self.__actual_sizings'], {}), '(self.__actual_sizings)\n', (125696, 125719), False, 'import copy\n'), ((126562, 126598), 'copy.deepcopy', 'copy.deepcopy', (['self.__actual_sizings'], {}), '(self.__actual_sizings)\n', (126575, 126598), False, 'import copy\n'), ((135066, 135193), 'nion.utils.Geometry.FloatRect.from_tlhw', 'Geometry.FloatRect.from_tlhw', (['(canvas_size.height / 2 - bar_height / 2)', '(bar_offset + thumb_width / 2)', 'bar_height', 'bar_width'], {}), '(canvas_size.height / 2 - bar_height / 2, \n bar_offset + thumb_width / 2, bar_height, bar_width)\n', (135094, 135193), False, 'from nion.utils import Geometry\n'), ((137138, 137167), 'nion.utils.Geometry.FloatPoint', 'Geometry.FloatPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (137157, 137167), False, 'from nion.utils import Geometry\n'), ((148956, 148983), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (148973, 148983), False, 'from nion.utils import Geometry\n'), ((157763, 157780), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (157774, 157780), False, 'import weakref\n'), ((158525, 158544), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (158542, 158544), False, 'from nion.utils import Geometry\n'), ((190678, 190731), 'typing.cast', 'typing.cast', (['Geometry.IntSizeTuple', 'bitmap_data.shape'], {}), '(Geometry.IntSizeTuple, bitmap_data.shape)\n', (190689, 190731), False, 'import typing\n'), ((191320, 191370), 'typing.cast', 'typing.cast', (['Geometry.IntSizeTuple', 'raw_data.shape'], {}), '(Geometry.IntSizeTuple, raw_data.shape)\n', (191331, 191370), False, 'import typing\n'), ((210005, 210052), 'functools.partial', 'functools.partial', (['current_index_changed', 'index'], {}), '(current_index_changed, index)\n', (210022, 210052), False, 'import functools\n'), ((35903, 35928), 'nion.utils.Geometry.IntPoint.make', 'Geometry.IntPoint.make', (['p'], {}), '(p)\n', (35925, 35928), False, 'from nion.utils import Geometry\n'), ((46737, 46764), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (46754, 46764), False, 'from nion.utils import Geometry\n'), ((50970, 51027), 'typing.cast', 'typing.cast', (['UserInterface.MenuItemState', 'menu_item_state'], {}), '(UserInterface.MenuItemState, menu_item_state)\n', (50981, 51027), False, 'import typing\n'), ((56961, 57022), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x_positions[index]', 'y': 'y_positions[index]'}), '(x=x_positions[index], y=y_positions[index])\n', (56978, 57022), False, 'from nion.utils import Geometry\n'), ((57058, 57118), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'width': 'widths[index]', 'height': 'heights[index]'}), '(width=widths[index], height=heights[index])\n', (57074, 57118), False, 'from nion.utils import Geometry\n'), ((85417, 85454), 'nion.utils.Geometry.IntPoint.make', 'Geometry.IntPoint.make', (['canvas_origin'], {}), '(canvas_origin)\n', (85439, 85454), False, 'from nion.utils import Geometry\n'), ((85486, 85520), 'nion.utils.Geometry.IntSize.make', 'Geometry.IntSize.make', (['canvas_size'], {}), '(canvas_size)\n', (85507, 85520), False, 'from nion.utils import Geometry\n'), ((85971, 86005), 'nion.utils.Geometry.IntSize.make', 'Geometry.IntSize.make', (['canvas_size'], {}), '(canvas_size)\n', (85992, 86005), False, 'from nion.utils import Geometry\n'), ((92475, 92494), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (92492, 92494), False, 'from nion.utils import Geometry\n'), ((96185, 96242), 'typing.cast', 'typing.cast', (['Geometry.IntPoint', 'canvas_item.canvas_origin'], {}), '(Geometry.IntPoint, canvas_item.canvas_origin)\n', (96196, 96242), False, 'import typing\n'), ((104012, 104031), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (104029, 104031), False, 'from nion.utils import Geometry\n'), ((110487, 110552), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', ([], {'origin': '(-content_canvas_origin)', 'size': 'canvas_size'}), '(origin=-content_canvas_origin, size=canvas_size)\n', (110503, 110552), False, 'from nion.utils import Geometry\n'), ((110592, 110611), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (110609, 110611), False, 'from nion.utils import Geometry\n'), ((110618, 110636), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {}), '()\n', (110634, 110636), False, 'from nion.utils import Geometry\n'), ((121247, 121275), 'copy.copy', 'copy.copy', (['self.canvas_items'], {}), '(self.canvas_items)\n', (121256, 121275), False, 'import copy\n'), ((121302, 121331), 'copy.deepcopy', 'copy.deepcopy', (['self.__sizings'], {}), '(self.__sizings)\n', (121315, 121331), False, 'import copy\n'), ((128664, 128693), 'copy.deepcopy', 'copy.deepcopy', (['self.__sizings'], {}), '(self.__sizings)\n', (128677, 128693), False, 'import copy\n'), ((128725, 128761), 'copy.deepcopy', 'copy.deepcopy', (['self.__actual_sizings'], {}), '(self.__actual_sizings)\n', (128738, 128761), False, 'import copy\n'), ((133351, 133394), 'nion.utils.Stream.ValueChangeStream', 'Stream.ValueChangeStream', (['self.value_stream'], {}), '(self.value_stream)\n', (133375, 133394), False, 'from nion.utils import Stream\n'), ((145850, 145869), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (145867, 145869), False, 'from nion.utils import Geometry\n'), ((162405, 162424), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (162422, 162424), False, 'from nion.utils import Geometry\n'), ((162460, 162504), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'height': 'height', 'width': 'width'}), '(height=height, width=width)\n', (162476, 162504), False, 'from nion.utils import Geometry\n'), ((170575, 170602), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (170592, 170602), False, 'from nion.utils import Geometry\n'), ((171206, 171235), 'typing.cast', 'typing.cast', (['typing.Any', 'None'], {}), '(typing.Any, None)\n', (171217, 171235), False, 'import typing\n'), ((171292, 171319), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (171309, 171319), False, 'from nion.utils import Geometry\n'), ((174348, 174375), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (174365, 174375), False, 'from nion.utils import Geometry\n'), ((177379, 177406), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (177396, 177406), False, 'from nion.utils import Geometry\n'), ((190819, 190857), 'nion.utils.Geometry.fit_to_size', 'Geometry.fit_to_size', (['rect', 'image_size'], {}), '(rect, image_size)\n', (190839, 190857), False, 'from nion.utils import Geometry\n'), ((191458, 191496), 'nion.utils.Geometry.fit_to_size', 'Geometry.fit_to_size', (['rect', 'image_size'], {}), '(rect, image_size)\n', (191478, 191496), False, 'from nion.utils import Geometry\n'), ((51932, 51958), 'nion.utils.Geometry.IntPoint.make', 'Geometry.IntPoint.make', (['p1'], {}), '(p1)\n', (51954, 51958), False, 'from nion.utils import Geometry\n'), ((51977, 52003), 'nion.utils.Geometry.IntPoint.make', 'Geometry.IntPoint.make', (['p2'], {}), '(p2)\n', (51999, 52003), False, 'from nion.utils import Geometry\n'), ((55845, 55915), 'nion.utils.Geometry.fit_to_aspect_ratio', 'Geometry.fit_to_aspect_ratio', (['rect', 'layout_sizing.minimum_aspect_ratio'], {}), '(rect, layout_sizing.minimum_aspect_ratio)\n', (55873, 55915), False, 'from nion.utils import Geometry\n'), ((86041, 86060), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (86058, 86060), False, 'from nion.utils import Geometry\n'), ((105750, 105788), 'copy.copy', 'copy.copy', (['self.__prepare_canvas_items'], {}), '(self.__prepare_canvas_items)\n', (105759, 105788), False, 'import copy\n'), ((106617, 106648), 'nion.ui.DrawingContext.DrawingContext', 'DrawingContext.DrawingContext', ([], {}), '()\n', (106646, 106648), False, 'from nion.ui import DrawingContext\n'), ((111690, 111709), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (111707, 111709), False, 'from nion.utils import Geometry\n'), ((126966, 126993), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (126983, 126993), False, 'from nion.utils import Geometry\n'), ((127691, 127718), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (127708, 127718), False, 'from nion.utils import Geometry\n'), ((135783, 135918), 'nion.utils.Geometry.FloatRect.from_tlhw', 'Geometry.FloatRect.from_tlhw', (['(canvas_size.height / 2 - thumb_height / 2)', '(value * bar_width + bar_offset)', 'thumb_height', 'thumb_width'], {}), '(canvas_size.height / 2 - thumb_height / 2, \n value * bar_width + bar_offset, thumb_height, thumb_width)\n', (135811, 135918), False, 'from nion.utils import Geometry\n'), ((145275, 145329), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', ([], {'origin': 'thumb_origin', 'size': 'thumb_size'}), '(origin=thumb_origin, size=thumb_size)\n', (145291, 145329), False, 'from nion.utils import Geometry\n'), ((165552, 165579), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (165569, 165579), False, 'from nion.utils import Geometry\n'), ((166035, 166062), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (166052, 166062), False, 'from nion.utils import Geometry\n'), ((169026, 169053), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (169043, 169053), False, 'from nion.utils import Geometry\n'), ((169483, 169510), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (169500, 169510), False, 'from nion.utils import Geometry\n'), ((169583, 169610), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (169600, 169610), False, 'from nion.utils import Geometry\n'), ((171115, 171142), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (171132, 171142), False, 'from nion.utils import Geometry\n'), ((175056, 175083), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (175073, 175083), False, 'from nion.utils import Geometry\n'), ((177844, 177871), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (177861, 177871), False, 'from nion.utils import Geometry\n'), ((56064, 56134), 'nion.utils.Geometry.fit_to_aspect_ratio', 'Geometry.fit_to_aspect_ratio', (['rect', 'layout_sizing.maximum_aspect_ratio'], {}), '(rect, layout_sizing.maximum_aspect_ratio)\n', (56092, 56134), False, 'from nion.utils import Geometry\n'), ((107212, 107259), 'logging.debug', 'logging.debug', (['"""CanvasItem Render Error: %s"""', 'e'], {}), "('CanvasItem Render Error: %s', e)\n", (107225, 107259), False, 'import logging\n'), ((107280, 107301), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (107299, 107301), False, 'import traceback\n'), ((107322, 107345), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (107343, 107345), False, 'import traceback\n'), ((115034, 115099), 'nion.utils.Geometry.IntRect', 'Geometry.IntRect', ([], {'origin': '(-content_canvas_origin)', 'size': 'canvas_size'}), '(origin=-content_canvas_origin, size=canvas_size)\n', (115050, 115099), False, 'from nion.utils import Geometry\n'), ((121717, 121749), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': 'origin', 'x': '(0)'}), '(y=origin, x=0)\n', (121734, 121749), False, 'from nion.utils import Geometry\n'), ((121823, 121877), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'height': 'size', 'width': 'canvas_size.width'}), '(height=size, width=canvas_size.width)\n', (121839, 121877), False, 'from nion.utils import Geometry\n'), ((122321, 122353), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'y': '(0)', 'x': 'origin'}), '(y=0, x=origin)\n', (122338, 122353), False, 'from nion.utils import Geometry\n'), ((122427, 122482), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'height': 'canvas_size.height', 'width': 'size'}), '(height=canvas_size.height, width=size)\n', (122443, 122482), False, 'from nion.utils import Geometry\n'), ((144900, 144940), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': '(0)', 'y': 'thumb_position'}), '(x=0, y=thumb_position)\n', (144917, 144940), False, 'from nion.utils import Geometry\n'), ((144978, 145040), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'width': 'canvas_size.width', 'height': 'thumb_length'}), '(width=canvas_size.width, height=thumb_length)\n', (144994, 145040), False, 'from nion.utils import Geometry\n'), ((145106, 145146), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'thumb_position', 'y': '(0)'}), '(x=thumb_position, y=0)\n', (145123, 145146), False, 'from nion.utils import Geometry\n'), ((145184, 145247), 'nion.utils.Geometry.IntSize', 'Geometry.IntSize', ([], {'width': 'thumb_length', 'height': 'canvas_size.height'}), '(width=thumb_length, height=canvas_size.height)\n', (145200, 145247), False, 'from nion.utils import Geometry\n'), ((163811, 163830), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {}), '()\n', (163828, 163830), False, 'from nion.utils import Geometry\n'), ((56231, 56303), 'nion.utils.Geometry.fit_to_aspect_ratio', 'Geometry.fit_to_aspect_ratio', (['rect', 'layout_sizing.preferred_aspect_ratio'], {}), '(rect, layout_sizing.preferred_aspect_ratio)\n', (56259, 56303), False, 'from nion.utils import Geometry\n'), ((150047, 150118), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'tracking_content_offset[1]', 'y': 'new_content_offset_v'}), '(x=tracking_content_offset[1], y=new_content_offset_v)\n', (150064, 150118), False, 'from nion.utils import Geometry\n'), ((150593, 150664), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', ([], {'x': 'new_content_offset_h', 'y': 'tracking_content_offset[0]'}), '(x=new_content_offset_h, y=tracking_content_offset[0])\n', (150610, 150664), False, 'from nion.utils import Geometry\n'), ((153307, 153330), 'nion.utils.Geometry.IntPoint', 'Geometry.IntPoint', (['(0)', '(0)'], {}), '(0, 0)\n', (153324, 153330), False, 'from nion.utils import Geometry\n')]
|
import torch
import numpy as np
from torchwi.utils.ctensor import ca2rt, rt2ca
class FreqL2Loss(torch.autograd.Function):
@staticmethod
def forward(ctx, frd, true):
# resid: (nrhs, 2*nx) 2 for real and imaginary
resid = frd - true
resid_c = rt2ca(resid)
l2 = np.real(0.5*np.sum(resid_c*np.conjugate(resid_c)))
ctx.save_for_backward(resid)
return torch.tensor(l2)
@staticmethod
def backward(ctx, grad_output):
resid, = ctx.saved_tensors
grad_input = ca2rt(np.conjugate(rt2ca(resid)))
return grad_input, None
|
[
"numpy.conjugate",
"torchwi.utils.ctensor.rt2ca",
"torch.tensor"
] |
[((275, 287), 'torchwi.utils.ctensor.rt2ca', 'rt2ca', (['resid'], {}), '(resid)\n', (280, 287), False, 'from torchwi.utils.ctensor import ca2rt, rt2ca\n'), ((404, 420), 'torch.tensor', 'torch.tensor', (['l2'], {}), '(l2)\n', (416, 420), False, 'import torch\n'), ((551, 563), 'torchwi.utils.ctensor.rt2ca', 'rt2ca', (['resid'], {}), '(resid)\n', (556, 563), False, 'from torchwi.utils.ctensor import ca2rt, rt2ca\n'), ((328, 349), 'numpy.conjugate', 'np.conjugate', (['resid_c'], {}), '(resid_c)\n', (340, 349), True, 'import numpy as np\n')]
|
""" Preprocess the ISBI data set.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, JHU/APL"
__license__ = "Apache 2.0"
import argparse, os.path
import numpy as np
from scipy.stats.mstats import mquantiles
import scipy.io
import emlib
def get_args():
"""Command line parameters for the 'deploy' procedure.
You will probably want to override the train/valid/test split
to better suit your problem of interest...
"""
parser = argparse.ArgumentParser()
parser.add_argument('-X', dest='dataFileName', type=str, required=True,
help='EM data file')
parser.add_argument('-Y', dest='labelsFileName', type=str, required=True,
help='Ground truth labels for X')
parser.add_argument('--train-slices', dest='trainSlices',
type=str, default='range(10)',
help='which slices to use for training')
parser.add_argument('--valid-slices', dest='validSlices',
type=str, default='range(10,20)',
help='which slices to use for validation')
parser.add_argument('--test-slices', dest='testSlices',
type=str, default='range(20,30)',
help='which slices to use for test')
parser.add_argument('--brightness-quantile', dest='brightQuant',
type=float, default=0.97,
help='top quantile for non-membrane pixels.')
parser.add_argument('--out-dir', dest='outDir',
type=str, default='./',
help='output directory')
args = parser.parse_args()
assert(args.brightQuant <= 1.0)
assert(args.brightQuant > 0)
# map strings to python objects (XXX: a cleaner way than eval)
args.trainSlices = eval(args.trainSlices)
args.validSlices = eval(args.validSlices)
args.testSlices = eval(args.testSlices)
return args
if __name__ == "__main__":
args = get_args();
#outDir = os.path.split(args.dataFileName)[0]
if not os.path.isdir(args.outDir):
os.mkdir(args.outDir)
X = emlib.load_cube(args.dataFileName, np.uint8)
Y = emlib.load_cube(args.labelsFileName, np.uint8)
# remap Y labels from ISBI convention to membrane-vs-non-membrane
Y[Y==0] = 1; # membrane
Y[Y==255] = 0; # non-membrane
# change type of Y so can use -1 as a value.
Y = Y.astype(np.int8)
Xtrain = X[args.trainSlices,:,:]; Ytrain = Y[args.trainSlices,:,:]
Xvalid = X[args.validSlices,:,:]; Yvalid = Y[args.validSlices,:,:]
Xtest = X[args.testSlices,:,:]; Ytest = Y[args.testSlices,:,:]
# brightness thresholding
thresh = mquantiles(np.concatenate((Xtrain[Ytrain==1], Xvalid[Yvalid==1])), args.brightQuant)
pctOmitted = 100.0*np.sum(X > thresh) / np.prod(np.size(X))
print('[preprocess]: percent of pixels omitted by brightness filter: %0.2f' % pctOmitted)
Ytrain[Xtrain > thresh] = -1
Yvalid[Xvalid > thresh] = -1
Ytest[Xtest > thresh] = -1
# save results
np.save(os.path.join(args.outDir, 'Xtrain.npy'), Xtrain)
np.save(os.path.join(args.outDir, 'Ytrain.npy'), Ytrain)
np.save(os.path.join(args.outDir, 'Xvalid.npy'), Xvalid)
np.save(os.path.join(args.outDir, 'Yvalid.npy'), Yvalid)
if Xtest.size > 0:
np.save(os.path.join(args.outDir, 'Xtest.npy'), Xtest)
np.save(os.path.join(args.outDir, 'Ytest.npy'), Ytest)
# also a matlab version
scipy.io.savemat(os.path.join(args.outDir, 'Xtrain.mat'), {'Xtrain' : Xtrain})
scipy.io.savemat(os.path.join(args.outDir, 'Ytrain.mat'), {'Ytrain' : Ytrain})
scipy.io.savemat(os.path.join(args.outDir, 'Xvalid.mat'), {'Xvalid' : Xvalid})
scipy.io.savemat(os.path.join(args.outDir, 'Yvalid.mat'), {'Yvalid' : Yvalid})
if Xtest.size > 0:
scipy.io.savemat(os.path.join(args.outDir, 'Xtest.mat'), {'Xtest' : Xtest})
scipy.io.savemat(os.path.join(args.outDir, 'Ytest.mat'), {'Ytest' : Ytest})
print('[preprocess]: done!')
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"numpy.size",
"numpy.sum",
"argparse.ArgumentParser",
"emlib.load_cube",
"numpy.concatenate"
] |
[((462, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (485, 487), False, 'import argparse, os.path\n'), ((1935, 1979), 'emlib.load_cube', 'emlib.load_cube', (['args.dataFileName', 'np.uint8'], {}), '(args.dataFileName, np.uint8)\n', (1950, 1979), False, 'import emlib\n'), ((1988, 2034), 'emlib.load_cube', 'emlib.load_cube', (['args.labelsFileName', 'np.uint8'], {}), '(args.labelsFileName, np.uint8)\n', (2003, 2034), False, 'import emlib\n'), ((2523, 2581), 'numpy.concatenate', 'np.concatenate', (['(Xtrain[Ytrain == 1], Xvalid[Yvalid == 1])'], {}), '((Xtrain[Ytrain == 1], Xvalid[Yvalid == 1]))\n', (2537, 2581), True, 'import numpy as np\n'), ((2620, 2638), 'numpy.sum', 'np.sum', (['(X > thresh)'], {}), '(X > thresh)\n', (2626, 2638), True, 'import numpy as np\n'), ((2649, 2659), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (2656, 2659), True, 'import numpy as np\n')]
|
import numpy
import math
#from .. import utilities
class phase_space(object):
"""Phase space class.
"""
def __init__(self, xs, tau=1, m=2, eps=.001):
self.tau, self.m, self.eps = tau, m, eps
N = int(len(xs)-m*tau+tau)
self.matrix = numpy.empty([N,m],dtype=float)
for i in range(N):
self.matrix[i,:] = xs[i:i+1+int(m*tau-tau):tau]
self.recurrence_matrix = None
return None
def __repr__(self):
return "phase_space()"
def __str__(self):
return "{} with shape {} and (tau, m, eps) = ({}, {}, {})".format(type(self.matrix), self.matrix.shape, self.tau, self.m, self.eps)
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
def _Theta(x, y, eps):
"""Theta tmp
Args:
x:
y:
eps:
Returns:
int: 0 or 1.
"""
sm = 0
for k in range(len(x)):
sm += (x[k]-y[k])**2
if sm > eps:
return 0
return 1
_recurrence_matrix_cache = dict()
def recurrence_matrix(xps, yps=None, joint=False):
"""Computes cross-reccurence matrix when two inputs are given and self-reccurence otherwise.
Args:
xps (numpy.array): Phase_space object(s).
yps (numpy.array, optional): Phase_space object for cross reccurence. Defaults to none.
joint (bool, optional): Should joint reccurence be calculated? Defaults to False.
Returns:
numpy.array : A 2D numpy matrix.
"""
if not yps:
yps, cross = xps, False
else:
cross = True
if (xps,yps,joint) in _recurrence_matrix_cache:
return _recurrence_matrix_cache[xps, yps, joint]
if (xps.matrix.shape, xps.tau, xps.m, xps.eps) != (yps.matrix.shape, yps.tau, yps.m, yps.eps):
print("Error: Input phase spaces have different parameters.")
return
if joint:
return numpy.multiply( recurrence_matrix(xps), recurrence_matrix(yps) )
BB, AA, tau, m, eps = yps.matrix, xps.matrix, xps.tau, xps.m, xps.eps
N = AA.shape[0]
ans = numpy.full([N, N],0)
for i in range(N):
for j in range(N if cross else i+1):
#ans[i][j] = _Theta( AA[i], BB[j], eps)
ans[i][j] = numpy.linalg.norm(AA[i]-BB[j])
_recurrence_matrix_cache[xps,yps,joint] = ans
return _recurrence_matrix_cache[xps, yps, joint]
def cross_recurrence_matrix( xps, yps ):
"""Cross reccurence matrix.
Args:
xps (numpy.array):
yps (numpy.array):
Returns:
numpy.array : A 2D numpy array.
"""
return recurrence_matrix( xps, yps )
def joint_recurrence_matrix( xps, yps ):
"""Joint reccurence matrix.
Args:
xps (numpy.array):
yps (numpy.array):
Returns:
numpy.array : A 2D numpy array.
"""
return recurrence_matrix( xps, yps, joint=True )
def recurrence_rate( AA ):
"""Computes reccurence-rate from reccurence matrix.
Args:
AA (numpy.array): A reccurence matrix.
Returns:
numpy.array : A numpy array.
"""
isLower = utilities.is_lower_triangular(AA)
N = AA.shape[0]
ans = numpy.zeros( N, dtype=float )
for k in range(1,N):
tmp = numpy.sum(AA[:k,:k])
ans[k] += tmp
for i in range(1, N-k):
if isLower:
tmp += numpy.sum(AA[i+k-1,i:i+k]) - numpy.sum(AA[i-1:i-1+k,i-1])
else:
tmp += numpy.sum( AA[i+k-1, i:i+k] ) \
+ numpy.sum( AA[i:i+k-1, i+k-1] ) \
- numpy.sum( AA[i-1:i-1+k, i-1] ) \
- numpy.sum( AA[i-1, i:i-1+k] )
ans[k] += tmp
ans[k] /= 0.5*(N-k)*k**2 if isLower else (N-k)*k**2
return ans
_measures_cache = dict()
def determinism( AA ):
"""Calculates percentage of recurrence points which form diagonal lines.
Args:
AA (numpy.array): A reccurence matrix.
Returns:
float: The determinism.
"""
if (id(AA),"determinism") in _measures_cache:
return _measures_cache[id(AA),"determinism"]
isLower = utilities.is_lower_triangular(AA)
N = AA.shape[0]
H = dict()
for key in range(N):
H[key] = 0
def lower_DET(x):
for i in range(1, N):
isPrev = False
count = 0
for j in range(i, N):
#search for consective lines in AA[idx1,idx1-idx]
if x[j, j-i]:
if isPrev:
count += 1
else:
count = 1
isPrev = True
elif isPrev:
isPrev = False
H[count] += 1 if count > 1 else 0
count = 0
H[count] += 1 if count>1 else 0
return
lower_DET(AA)
if not isLower:
lower_DET(numpy.transpose(AA))
num, avg, max_L = 0, 0, 0
for key, val in H.items():
max_L = key if val else max_L
num += key*val
avg += val
dem = numpy.sum(AA)
ENTR = 0
if avg:
for key, val in H.items():
p = val/avg
ENTR -= p*math.log(p) if p else 0
PRED = num/avg
else:
ENTR = None
PRED = 0
DIV = 1/max_L if max_L else float('inf')
_measures_cache[id(AA),"determinism"] = num/dem
_measures_cache[id(AA),"pred"] = PRED
_measures_cache[id(AA),"divergence"] = DIV
_measures_cache[id(AA),"entropy"] = ENTR
return _measures_cache[id(AA),"determinism"]
def divergence( AA ):
"""Divergence
Args:
AA (numpy.array): A numpy array.
Returns:
numpy.array: The answer.
"""
if (id(AA),"divergence") not in _measures_cache:
determinism(AA)
return _measures_cache[id(AA),"divergence"]
def entropy( AA ):
"""Entropy
Args:
AA (numpy.array): A numpy array.
Returns:
numpy.array: The answer.
"""
if (id(AA),"entropy") not in _measures_cache:
determinism(AA)
return _measures_cache[id(AA),"entropy"]
def pred( AA ):
"""Pred
Args:
AA (numpy.array): A numpy array.
Returns:
numpy.array: The answer.
"""
if (id(AA),"pred") not in _measures_cache:
determinism(AA)
return _measures_cache[id(AA),"pred"]
def trend( AA, longterm=False ):
"""Calculate the TREND of a give 1d numpy array R.
Args:
AA (numpy.array(float)): A 2D matrix.
longterm (bool, optional): Should long-term trend be calculate? Defaults to False.
Returns:
float: The medium and long range trends a float tuple (Med, Long)
"""
N = AA.shape[0]
R_med = R[:N//2] - np.mean(R[:N//2])
R_long = R[:-1] - np.mean(R[:-1])
coef = np.array([i - N//4 +1 for i in range(N//2)])
Med = np.dot(coef, R_med)/np.dot(coef, coef)
coef = np.array([i - N//2 +1 for i in range(N-1)])
Long = np.dot(coef, R_long)/np.dot(coef, coef)
return Long if longterm else Med
def laminarity( AA ): #+ Trapping
"""Laminarity. Calculates percentage of recurrence points which form verticle lines.
This function calculates Trapping as a side effect.
Args:
AA (numpy.array(float)): A 2D matrix.
Returns:
float: The laminarity
"""
N = AA.shape[0]
H = dict()
for key in range(N):
H[key] = 0
#Lower Lam
for j in range(N):
isPrev, count = False, 0
for i in range(j+1, N):
#search for consecutive lines in M[i, j]
if AA[i, j]:
if isPrev:
count += 1
else:
isPrev, count = True, 1
elif isPrev:
H[count] += 1 if count > 1 else 0
isPrev, count = False, 0
H[count] += 1 if count > 1 else 0
#Upper Lam
if not utilities.is_lower_triangular(AA):
for j in range(N):
isPrev, count = False, 0
for i in range(j):
#search for consecutive lines in M[idx1, idx]
if AA[i,j]:
if isPrev:
count += 1
else:
isPrev, count = True, 1
elif isPrev:
H[count] += 1 if count > 1 else 0
isPrev, count = False, 0
H[count] += 1 if count > 1 else 0
num, avg= 0, 0
for key, val in H.items():
avg += val
num += key*val
dem = num + numpy.sum(AA)
LAMI = num/dem
TRAP = num/avg if avg else 0
_measures_cache[id(AA),"laminarity"] = LAMI
_measures_cache[id(AA),"trapping"] = TRAP
return _measures_cache[id(AA),"laminarity"]
def trapping( AA ):
"""Trapping. Calculates ...
This function calculates Laminiarity as a side effect.
Args:
AA (numpy.array(float)): A 2D matrix.
Returns:
float: The trapping
"""
if (id(AA),"trapping") not in _measures_cache:
return laminarity(AA)
return _measures_cache[id(AA),"trapping"]
|
[
"numpy.full",
"numpy.sum",
"numpy.empty",
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.norm",
"math.log"
] |
[((2196, 2217), 'numpy.full', 'numpy.full', (['[N, N]', '(0)'], {}), '([N, N], 0)\n', (2206, 2217), False, 'import numpy\n'), ((3346, 3373), 'numpy.zeros', 'numpy.zeros', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (3357, 3373), False, 'import numpy\n'), ((5302, 5315), 'numpy.sum', 'numpy.sum', (['AA'], {}), '(AA)\n', (5311, 5315), False, 'import numpy\n'), ((279, 311), 'numpy.empty', 'numpy.empty', (['[N, m]'], {'dtype': 'float'}), '([N, m], dtype=float)\n', (290, 311), False, 'import numpy\n'), ((3417, 3438), 'numpy.sum', 'numpy.sum', (['AA[:k, :k]'], {}), '(AA[:k, :k])\n', (3426, 3438), False, 'import numpy\n'), ((8917, 8930), 'numpy.sum', 'numpy.sum', (['AA'], {}), '(AA)\n', (8926, 8930), False, 'import numpy\n'), ((2361, 2393), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(AA[i] - BB[j])'], {}), '(AA[i] - BB[j])\n', (2378, 2393), False, 'import numpy\n'), ((5124, 5143), 'numpy.transpose', 'numpy.transpose', (['AA'], {}), '(AA)\n', (5139, 5143), False, 'import numpy\n'), ((3548, 3581), 'numpy.sum', 'numpy.sum', (['AA[i + k - 1, i:i + k]'], {}), '(AA[i + k - 1, i:i + k])\n', (3557, 3581), False, 'import numpy\n'), ((3577, 3614), 'numpy.sum', 'numpy.sum', (['AA[i - 1:i - 1 + k, i - 1]'], {}), '(AA[i - 1:i - 1 + k, i - 1])\n', (3586, 3614), False, 'import numpy\n'), ((3822, 3855), 'numpy.sum', 'numpy.sum', (['AA[i - 1, i:i - 1 + k]'], {}), '(AA[i - 1, i:i - 1 + k])\n', (3831, 3855), False, 'import numpy\n'), ((5422, 5433), 'math.log', 'math.log', (['p'], {}), '(p)\n', (5430, 5433), False, 'import math\n'), ((3763, 3800), 'numpy.sum', 'numpy.sum', (['AA[i - 1:i - 1 + k, i - 1]'], {}), '(AA[i - 1:i - 1 + k, i - 1])\n', (3772, 3800), False, 'import numpy\n'), ((3647, 3680), 'numpy.sum', 'numpy.sum', (['AA[i + k - 1, i:i + k]'], {}), '(AA[i + k - 1, i:i + k])\n', (3656, 3680), False, 'import numpy\n'), ((3704, 3741), 'numpy.sum', 'numpy.sum', (['AA[i:i + k - 1, i + k - 1]'], {}), '(AA[i:i + k - 1, i + k - 1])\n', (3713, 3741), False, 'import numpy\n')]
|
"""ProbsMeasurer's module."""
import numpy as np
from mlscratch.tensor import Tensor
from .measurer import Measurer
class ProbsMeasurer(Measurer[float]):
"""Computes how many samples were evaluated correctly by
getting the most probable label/index in the probability array."""
def measure(
self,
result: Tensor,
expected: Tensor) -> float:
batch_size, *_ = result.shape
result_max_indices = np.argmax(result, axis=-1)
expected_max_indices = np.argmax(expected, axis=-1)
asserts = np.sum(result_max_indices == expected_max_indices)
return asserts / batch_size
|
[
"numpy.sum",
"numpy.argmax"
] |
[((459, 485), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (468, 485), True, 'import numpy as np\n'), ((517, 545), 'numpy.argmax', 'np.argmax', (['expected'], {'axis': '(-1)'}), '(expected, axis=-1)\n', (526, 545), True, 'import numpy as np\n'), ((564, 614), 'numpy.sum', 'np.sum', (['(result_max_indices == expected_max_indices)'], {}), '(result_max_indices == expected_max_indices)\n', (570, 614), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
class Stats(object):
'''
Produces stats given a schedule
'''
def __init__(self, games, agg_method, date_col, h_col, a_col, outcome_col, seg_vars = []):
self.games = games
self.agg_method = agg_method
self.date_col = date_col
self.h_col = h_col
self.a_col = a_col
self.outcome_col = outcome_col
self.seg_vars = seg_vars
# Inputs: number of past games, team id, date of current game
# Output: list of most recent n games
def get_last_n_games(self, n, team_id, curr_dt):
#Filter to get past games
games = self.games[self.games[self.date_col]<curr_dt]
#Filters to get past home and away games
a_games = games[games[self.a_col]==team_id]
h_games = games[games[self.h_col] == team_id]
all_games = a_games.append(h_games)
all_games['temp_days'] = [(pd.to_datetime(curr_dt) - pd.to_datetime(x)).days for x in all_games[self.date_col]]
all_games = all_games[all_games['temp_days']<=30]
all_games = all_games.drop('temp_days', axis=1)
all_games = all_games.sort_values(by=self.date_col, ascending=False)
n_games = all_games.head(n)
return n_games
def get_avg(self, games, col, team_id, opp):
h_games = games[games[self.h_col] == team_id]
a_games = games[games[self.a_col] == team_id]
if opp == 0:
a_col = 'A_' + col
h_col = 'H_' + col
else:
a_col = 'H_' + col
h_col = 'A_' + col
h_sum = np.sum(h_games[h_col])
a_sum = np.sum(a_games[a_col])
if len(games) == 0:
return -1
avg = (h_sum + a_sum)*1.0 / (len(games))
return avg
def back_to_back(self, games, curr_dt):
if len(games)==0:
return 0
latest_game = games.sort_values(by=self.date_col, ascending=False).head(1).reset_index(drop=True)
latest_date = latest_game.ix[0,self.date_col]
if (pd.to_datetime(curr_dt) - pd.to_datetime(latest_date)).days == 1:
return 1
return 0
def get_lastn_stats(self, n):
stats = pd.DataFrame()
for index, game in self.games.iterrows():
stats.set_value(index, self.outcome_col, game[self.outcome_col])
a_team = game[self.a_col]
a_games = self.get_last_n_games(n, a_team, game[self.date_col])
h_team = game[self.h_col]
h_games = self.get_last_n_games(n, h_team, game[self.date_col])
poss_cols = self.games.columns.values
poss_cols = self.search_for_cols('H_', poss_cols)
for col in poss_cols:
base_col = col[2:]
stats.set_value(index, ('H_' + base_col + '_' + str(n)), self.get_avg(h_games, base_col, h_team, 0))
stats.set_value(index, ('H_O_' + base_col + '_' + str(n)), self.get_avg(h_games, base_col, h_team, 1))
stats.set_value(index, ('A_' + base_col + '_' + str(n)), self.get_avg(a_games, base_col, a_team, 0))
stats.set_value(index, ('A_O_' + base_col + '_' + str(n)), self.get_avg(a_games, base_col, a_team, 1))
stats.set_value(index, 'H_BTB', self.back_to_back(h_games, game[self.date_col]))
stats.set_value(index, 'A_BTB', self.back_to_back(a_games, game[self.date_col]))
stats.set_value(index, 'H_'+str(n)+'_games', len(h_games))
stats.set_value(index, 'A_'+str(n)+'_games', len(a_games))
for col in self.seg_vars:
stats.set_value(index, col, game[col])
return stats
def search_for_cols(self, pfx, cols):
new_cols = []
pfx_len = len(pfx)
for col in cols:
if col[0:pfx_len] == pfx:
#if col != self.outcome_col:
if col != self.h_col:
if col != self.a_col:
new_cols.append(col)
return new_cols
def get_correl(self, stats):
cor = pd.DataFrame()
for col in stats.columns.values:
if col != self.outcome_col:
cor.set_value(col, 'Correlation', np.corrcoef(x=stats[col], y=stats[self.outcome_col])[0,1])
return cor
|
[
"pandas.DataFrame",
"numpy.corrcoef",
"pandas.to_datetime",
"numpy.sum"
] |
[((1583, 1605), 'numpy.sum', 'np.sum', (['h_games[h_col]'], {}), '(h_games[h_col])\n', (1589, 1605), True, 'import numpy as np\n'), ((1622, 1644), 'numpy.sum', 'np.sum', (['a_games[a_col]'], {}), '(a_games[a_col])\n', (1628, 1644), True, 'import numpy as np\n'), ((2182, 2196), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2194, 2196), True, 'import pandas as pd\n'), ((4044, 4058), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4056, 4058), True, 'import pandas as pd\n'), ((915, 938), 'pandas.to_datetime', 'pd.to_datetime', (['curr_dt'], {}), '(curr_dt)\n', (929, 938), True, 'import pandas as pd\n'), ((941, 958), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (955, 958), True, 'import pandas as pd\n'), ((2027, 2050), 'pandas.to_datetime', 'pd.to_datetime', (['curr_dt'], {}), '(curr_dt)\n', (2041, 2050), True, 'import pandas as pd\n'), ((2053, 2080), 'pandas.to_datetime', 'pd.to_datetime', (['latest_date'], {}), '(latest_date)\n', (2067, 2080), True, 'import pandas as pd\n'), ((4190, 4242), 'numpy.corrcoef', 'np.corrcoef', ([], {'x': 'stats[col]', 'y': 'stats[self.outcome_col]'}), '(x=stats[col], y=stats[self.outcome_col])\n', (4201, 4242), True, 'import numpy as np\n')]
|
'''
Based on:
Gravity Turn Maneuver with direct multiple shooting using CVodes
(c) <NAME>
https://mintoc.de/index.php/Gravity_Turn_Maneuver_(Casadi)
https://github.com/zegkljan/kos-stuff/tree/master/non-kos-tools/gturn
----------------------------------------------------------------
'''
import sys
from pathlib import Path
import casadi as cs
import numpy as np
import pandas as pd
from rocket_input import read_rocket_config
# noinspection PyPep8Naming
def compute_gravity_turn(m0, m1, g0, r0, Isp0, Isp1, Fmax, cd, A, H, rho, h_obj,
v_obj, q_obj, N=300, vel_eps=1e-3):
'''
Computes gravity turn profile
:params:
m0: wet (launch) mass (kg or ton)
m1: dry mass (kg or ton)
g0: gravitational acceleration at zero altitude (m * s^-2 or km * s^-2)
r0: "orbit" radius at zero altitude (body radius) (m or km)
Isp0: specific impulse of the engine(s) at zero altitude (s)
Isp1: specific impulse of the engine(s) in vacuum (s)
Fmax: maximum thrust of the engine(s) (N or MN)
cd: drag coefficient
A: reference area of the vehicle (m^2)
H: scale height of the atmosphere (m or km)
rho: density of the atmosphere at zero altitude (kg * m^-3)
h_obj: target altitude (m or km)
v_obj: target velocity (m * s^-1 of km * s^-1)
q_obj: target angle to vertical (rad)
N: number of shooting interval
vel_eps: initial velocity (must be nonzero, e.g. a very small number)
(m * s^-1 or km * s^-1)
:returns:
a dictionary with results
'''
# Create symbolic variables
x = cs.SX.sym('[m, v, q, h, d]') # Vehicle state
u = cs.SX.sym('u') # Vehicle controls
T = cs.SX.sym('T') # Time horizon (s)
# Introduce symbolic expressions for important composite terms
Fthrust = Fmax * u
Fdrag = 0.5 * A * cd * rho * cs.exp(-x[3] / H) * x[1] ** 2
r = x[3] + r0
g = g0 * (r0 / r) ** 2
vhor = x[1] * cs.sin(x[2])
vver = x[1] * cs.cos(x[2])
Isp = Isp1 + (Isp0 - Isp1) * cs.exp(-x[3] / H)
# Build symbolic expressions for ODE right hand side
mdot = -(Fthrust / (Isp * g0))
vdot = (Fthrust - Fdrag) / x[0] - g * cs.cos(x[2])
hdot = vver
ddot = vhor / r
qdot = g * cs.sin(x[2]) / x[1] - ddot
# Build the DAE function
ode = [mdot, vdot, qdot, hdot, ddot]
quad = u
dae = {'x': x, 'p': cs.vertcat(u, T), 'ode': T * cs.vertcat(*ode), 'quad': T * quad}
I = cs.integrator(
'I', 'cvodes', dae,
{'t0': 0.0, 'tf': 1.0 / N, 'nonlinear_solver_iteration': 'functional'}
)
# Specify upper and lower bounds as well as initial values for DAE
# parameters, states and controls
p_min = [0.0]
p_max = [600.0]
p_init = [300.0]
u_min = [0.0]
u_max = [1.0]
u_init = [0.5]
x0_min = [m0, vel_eps, 0.0, 0.0, 0.0]
x0_max = [m0, vel_eps, 0.5 * cs.pi, 0.0, 0.0]
x0_init = [m0, vel_eps, 0.05 * cs.pi, 0.0, 0.0]
xf_min = [m1, v_obj, q_obj, h_obj, 0.0]
xf_max = [m0, v_obj, q_obj, h_obj, cs.inf]
xf_init = [m1, v_obj, q_obj, h_obj, 0.0]
x_min = [m1, vel_eps, 0.0, 0.0, 0.0]
x_max = [m0, cs.inf, cs.pi, cs.inf, cs.inf]
x_init = [0.5 * (m0 + m1), 0.5 * v_obj, 0.5 * q_obj, 0.5 * h_obj, 0.0]
# Useful variable block sizes
npars = 1 # Number of parameters
nx = x.size1() # Number of states
nu = u.size1() # Number of controls
ns = nx + nu # Number of variables per shooting interval
# Introduce symbolic variables and disassemble them into blocks
V = cs.MX.sym('X', N * ns + nx + npars)
P = V[0]
X = [V[(npars + i * ns):(npars + i * ns + nx)] for i in range(0, N + 1)]
U = [V[(npars + i * ns + nx):(npars + (i + 1) * ns)] for i in range(0, N)]
# Nonlinear constraints and Lagrange objective
G = []
F = 0.0
# Build DMS structure
x0 = p_init + x0_init
for i in range(0, N):
Y = I(x0=X[i], p=cs.vertcat(U[i], P))
G += [Y['xf'] - X[i + 1]]
F = F + Y['qf']
frac = float(i + 1) / N
x0 = x0 + u_init + [x0_init[i] + frac * (xf_init[i] - x0_init[i])
for i in range(0, nx)]
# Lower and upper bounds for solver
lbg = 0.0
ubg = 0.0
lbx = p_min + x0_min + u_min + (N - 1) * (x_min + u_min) + xf_min
ubx = p_max + x0_max + u_max + (N - 1) * (x_max + u_max) + xf_max
# Solve the problem using IPOPT
nlp = {'x': V, 'f': (m0 - X[-1][0]) / (m0 - m1), 'g': cs.vertcat(*G)}
S = cs.nlpsol(
'S', 'ipopt', nlp, {'ipopt': {'tol': 1e-4, 'print_level': 5, 'max_iter': 500}}
)
r = S(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
print('RESULT: {}'.format(S.stats()['return_status']))
if S.stats()['return_status'] in {'Invalid_Number_Detected'}:
return None
# Extract state sequences and parameters from result
x = r['x']
f = r['f']
T = float(x[0])
t = np.linspace(0, T, N + 1)
m = np.array(x[npars::ns]).squeeze()
v = np.array(x[npars + 1::ns]).squeeze()
q = np.array(x[npars + 2::ns]).squeeze()
h = np.array(x[npars + 3::ns]).squeeze()
d = np.array(x[npars + 4::ns]).squeeze()
u = np.concatenate((np.array(x[npars + nx::ns]).squeeze(), [0.0]))
return {
'time': t,
'mass': m,
'vel': v,
'alt': h,
'control': u,
'hor_angle': d,
'ver_angle': q
}
def main(config_file):
( rocket_params,
environment_params,
model_params,
io_params
) = read_rocket_config(config_file)
# Vehicle parameters
m0 = (rocket_params.fuel_mass +
rocket_params.dry_mass) # Launch mass (kg or ton)
m1 = rocket_params.dry_mass # Dry mass (kg or ton)
Isp0 = rocket_params.motor_isp0 # Specific impulse at zero altude (s)
Isp1 = rocket_params.motor_isp1 # Specific impulse at vacuum (s)
A = rocket_params.rocket_area # Reference area (m^2)
Fmax = rocket_params.max_thrust # Maximum thrust (N or MN)
vel_eps = rocket_params.vel # Initial velocity (m/s or km/s)
# Environmental parameters
g0 = environment_params.gravity # Gravitational acceleration at altitude zero (m/s^2 or km/s^2)
r0 = environment_params.radius # Radius at altitude zero (m or km)
cd = environment_params.drag_coefficient # Drag coefficients
H = environment_params.scale_height # Scale height (m or km)
rho = environment_params.density # Density at altitude zero (x 1000)
# Model and target orbit parameters
N = model_params.N # Number of shooting intervals
h_obj = model_params.h_obj # Target altitude (m or km)
v_obj = model_params.v_obj # Target velocity (m/s or km/s)
q_obj = model_params.q_obj / 180 * cs.pi # Target angle to vertical (rad)
# output file
model_file = model_params.model_file
result = compute_gravity_turn(
m0, m1, g0, r0, Isp0, Isp1, Fmax,
cd, A, H, rho, h_obj,
v_obj, q_obj, N=N, vel_eps=vel_eps
)
result_df = pd.DataFrame(result)
result_df.to_excel(model_file, index=False)
print(result_df.head())
if __name__ == '__main__':
config_file_name = 'None'
if len(sys.argv) == 2:
config_file_name = sys.argv[1]
config_file_name = Path(config_file_name)
if not config_file_name.is_file():
print(f'incorrect config file: {config_file_name}')
exit()
main(config_file_name)
|
[
"pandas.DataFrame",
"casadi.nlpsol",
"casadi.SX.sym",
"casadi.exp",
"casadi.integrator",
"casadi.cos",
"casadi.sin",
"rocket_input.read_rocket_config",
"casadi.vertcat",
"pathlib.Path",
"numpy.array",
"casadi.MX.sym",
"numpy.linspace"
] |
[((1656, 1684), 'casadi.SX.sym', 'cs.SX.sym', (['"""[m, v, q, h, d]"""'], {}), "('[m, v, q, h, d]')\n", (1665, 1684), True, 'import casadi as cs\n'), ((1710, 1724), 'casadi.SX.sym', 'cs.SX.sym', (['"""u"""'], {}), "('u')\n", (1719, 1724), True, 'import casadi as cs\n'), ((1753, 1767), 'casadi.SX.sym', 'cs.SX.sym', (['"""T"""'], {}), "('T')\n", (1762, 1767), True, 'import casadi as cs\n'), ((2507, 2616), 'casadi.integrator', 'cs.integrator', (['"""I"""', '"""cvodes"""', 'dae', "{'t0': 0.0, 'tf': 1.0 / N, 'nonlinear_solver_iteration': 'functional'}"], {}), "('I', 'cvodes', dae, {'t0': 0.0, 'tf': 1.0 / N,\n 'nonlinear_solver_iteration': 'functional'})\n", (2520, 2616), True, 'import casadi as cs\n'), ((3601, 3636), 'casadi.MX.sym', 'cs.MX.sym', (['"""X"""', '(N * ns + nx + npars)'], {}), "('X', N * ns + nx + npars)\n", (3610, 3636), True, 'import casadi as cs\n'), ((4550, 4645), 'casadi.nlpsol', 'cs.nlpsol', (['"""S"""', '"""ipopt"""', 'nlp', "{'ipopt': {'tol': 0.0001, 'print_level': 5, 'max_iter': 500}}"], {}), "('S', 'ipopt', nlp, {'ipopt': {'tol': 0.0001, 'print_level': 5,\n 'max_iter': 500}})\n", (4559, 4645), True, 'import casadi as cs\n'), ((4968, 4992), 'numpy.linspace', 'np.linspace', (['(0)', 'T', '(N + 1)'], {}), '(0, T, N + 1)\n', (4979, 4992), True, 'import numpy as np\n'), ((5572, 5603), 'rocket_input.read_rocket_config', 'read_rocket_config', (['config_file'], {}), '(config_file)\n', (5590, 5603), False, 'from rocket_input import read_rocket_config\n'), ((7219, 7239), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (7231, 7239), True, 'import pandas as pd\n'), ((7465, 7487), 'pathlib.Path', 'Path', (['config_file_name'], {}), '(config_file_name)\n', (7469, 7487), False, 'from pathlib import Path\n'), ((2005, 2017), 'casadi.sin', 'cs.sin', (['x[2]'], {}), '(x[2])\n', (2011, 2017), True, 'import casadi as cs\n'), ((2036, 2048), 'casadi.cos', 'cs.cos', (['x[2]'], {}), '(x[2])\n', (2042, 2048), True, 'import casadi as cs\n'), ((2434, 2450), 'casadi.vertcat', 'cs.vertcat', (['u', 'T'], {}), '(u, T)\n', (2444, 2450), True, 'import casadi as cs\n'), ((4526, 4540), 'casadi.vertcat', 'cs.vertcat', (['*G'], {}), '(*G)\n', (4536, 4540), True, 'import casadi as cs\n'), ((1912, 1929), 'casadi.exp', 'cs.exp', (['(-x[3] / H)'], {}), '(-x[3] / H)\n', (1918, 1929), True, 'import casadi as cs\n'), ((2082, 2099), 'casadi.exp', 'cs.exp', (['(-x[3] / H)'], {}), '(-x[3] / H)\n', (2088, 2099), True, 'import casadi as cs\n'), ((2235, 2247), 'casadi.cos', 'cs.cos', (['x[2]'], {}), '(x[2])\n', (2241, 2247), True, 'import casadi as cs\n'), ((2463, 2479), 'casadi.vertcat', 'cs.vertcat', (['*ode'], {}), '(*ode)\n', (2473, 2479), True, 'import casadi as cs\n'), ((5001, 5023), 'numpy.array', 'np.array', (['x[npars::ns]'], {}), '(x[npars::ns])\n', (5009, 5023), True, 'import numpy as np\n'), ((5042, 5068), 'numpy.array', 'np.array', (['x[npars + 1::ns]'], {}), '(x[npars + 1::ns])\n', (5050, 5068), True, 'import numpy as np\n'), ((5087, 5113), 'numpy.array', 'np.array', (['x[npars + 2::ns]'], {}), '(x[npars + 2::ns])\n', (5095, 5113), True, 'import numpy as np\n'), ((5132, 5158), 'numpy.array', 'np.array', (['x[npars + 3::ns]'], {}), '(x[npars + 3::ns])\n', (5140, 5158), True, 'import numpy as np\n'), ((5177, 5203), 'numpy.array', 'np.array', (['x[npars + 4::ns]'], {}), '(x[npars + 4::ns])\n', (5185, 5203), True, 'import numpy as np\n'), ((2299, 2311), 'casadi.sin', 'cs.sin', (['x[2]'], {}), '(x[2])\n', (2305, 2311), True, 'import casadi as cs\n'), ((3985, 4004), 'casadi.vertcat', 'cs.vertcat', (['U[i]', 'P'], {}), '(U[i], P)\n', (3995, 4004), True, 'import casadi as cs\n'), ((5238, 5265), 'numpy.array', 'np.array', (['x[npars + nx::ns]'], {}), '(x[npars + nx::ns])\n', (5246, 5265), True, 'import numpy as np\n')]
|
# Recognise Faces using some classification algorithm - like Logistic, KNN, SVM etc.
# 1. load the training data (numpy arrays of all the persons)
# x- values are stored in the numpy arrays
# y-values we need to assign for each person
# 2. Read a video stream using opencv
# 3. extract faces out of it
# 4. use knn to find the prediction of face (int)
# 5. map the predicted id to name of the user
# 6. Display the predictions on the screen - bounding box and name
import cv2
import numpy as np
import os
from datetime import datetime
import time
########## KNN CODE ############
def distance(v1, v2):
# Eucledian
return np.sqrt(((v1 - v2) ** 2).sum())
def markAttendence(name):
with open('present.csv', 'r+') as f:
total_student_in_class = f.readline()
print(total_student_in_class)
nameList = []
absstuds = []
for line in total_student_in_class:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\nthe present students are : \n{name},{dtString}')
def maarkattndnce(namees):
with open('absent.csv', 'r+') as f:
absstuds = []
for nam in total_student_in_class:
if nam not in class_total_present:
entry = nam.split(',')
absstuds.append(entry[0])
if namees not in absstuds:
f.writelines(f'\nabsent students are : \n{absstuds}')
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get the vector and label
ix = train[i, :-1]
iy = train[i, -1]
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
################################
# Init Camera
cap = cv2.VideoCapture(0)
# Face Detection
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip = 0
dataset_path = "C:/Users/Samarth/Desktop/knn/data/"
face_data = []
number = []
labels = []
class_id = 0 # Labels for the given file
names = {} # Mapping btw id - name
# Data Preparation
for fx in os.listdir(dataset_path):
if fx.endswith('.npy'):
# Create a mapping btw class_id and name
names[class_id] = fx[:-4]
print("Loaded " + fx)
data_item = np.load(dataset_path + fx)
face_data.append(data_item)
# Create Labels for the class
target = class_id * np.ones((data_item.shape[0],))
class_id += 1
labels.append(target)
face_dataset = np.concatenate(face_data, axis=0)
face_labels = np.concatenate(labels, axis=0).reshape((-1, 1))
print(face_dataset.shape)
print(face_labels.shape)
trainset = np.concatenate((face_dataset, face_labels), axis=1)
print(trainset.shape)
# Testing
attn = []
appn = []
while True:
ret, frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
if (len(faces) == 0):
continue
for face in faces:
x, y, w, h = face
# Get the face ROI
offset = 10
face_section = frame[y - offset:y + h + offset, x - offset:x + w + offset]
face_section = cv2.resize(face_section, (100, 100))
# Predicted Label (out)
out = knn(trainset, face_section.flatten())
# Display on the screen the name and rectangle around it
pred_name = names[int(out)]
cv2.putText(frame, pred_name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
if pred_name not in attn:
attn.append(pred_name)
else:
continue
markAttendence(pred_name)
cv2.imshow("Faces", frame)
path = "C:/Users/Samarth/Desktop/knn/data/"
images = [] # LIST CONTAINING ALL THE IMAGES
className = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS Names
myList = os.listdir(path)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
className.append(os.path.splitext(cl)[0])
total_student_in_class = list(className) ###the toatl students in this class
print(total_student_in_class)
class_total_present = list(attn)
#print(attn)
res_list = []
for i in total_student_in_class:
if i not in class_total_present:
res_list.append(i)
print(res_list)
maarkattndnce(i)
# ai = tuple(total_student_in_class) #name of all the students as a tuple
#print(ai)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.resize",
"numpy.load",
"cv2.putText",
"numpy.argmax",
"cv2.waitKey",
"numpy.unique",
"cv2.imshow",
"numpy.ones",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.imread",
"numpy.array",
"os.path.splitext",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"datetime.datetime.now",
"os.listdir",
"numpy.concatenate"
] |
[((2298, 2317), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2314, 2317), False, 'import cv2\n'), ((2354, 2410), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt.xml"""'], {}), "('haarcascade_frontalface_alt.xml')\n", (2375, 2410), False, 'import cv2\n'), ((2635, 2659), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (2645, 2659), False, 'import os\n'), ((3066, 3099), 'numpy.concatenate', 'np.concatenate', (['face_data'], {'axis': '(0)'}), '(face_data, axis=0)\n', (3080, 3099), True, 'import numpy as np\n'), ((3232, 3283), 'numpy.concatenate', 'np.concatenate', (['(face_dataset, face_labels)'], {'axis': '(1)'}), '((face_dataset, face_labels), axis=1)\n', (3246, 3283), True, 'import numpy as np\n'), ((5246, 5269), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5267, 5269), False, 'import cv2\n'), ((2083, 2120), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (2092, 2120), True, 'import numpy as np\n'), ((2184, 2204), 'numpy.argmax', 'np.argmax', (['output[1]'], {}), '(output[1])\n', (2193, 2204), True, 'import numpy as np\n'), ((4303, 4329), 'cv2.imshow', 'cv2.imshow', (['"""Faces"""', 'frame'], {}), "('Faces', frame)\n", (4313, 4329), False, 'import cv2\n'), ((4519, 4535), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4529, 4535), False, 'import os\n'), ((2010, 2022), 'numpy.array', 'np.array', (['dk'], {}), '(dk)\n', (2018, 2022), True, 'import numpy as np\n'), ((2827, 2853), 'numpy.load', 'np.load', (['(dataset_path + fx)'], {}), '(dataset_path + fx)\n', (2834, 2853), True, 'import numpy as np\n'), ((3115, 3145), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (3129, 3145), True, 'import numpy as np\n'), ((3741, 3777), 'cv2.resize', 'cv2.resize', (['face_section', '(100, 100)'], {}), '(face_section, (100, 100))\n', (3751, 3777), False, 'import cv2\n'), ((3980, 4085), 'cv2.putText', 'cv2.putText', (['frame', 'pred_name', '(x, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(frame, pred_name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 0, 0), 2, cv2.LINE_AA)\n', (3991, 4085), False, 'import cv2\n'), ((4090, 4152), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n', (4103, 4152), False, 'import cv2\n'), ((4579, 4605), 'cv2.imread', 'cv2.imread', (['f"""{path}/{cl}"""'], {}), "(f'{path}/{cl}')\n", (4589, 4605), False, 'import cv2\n'), ((5166, 5180), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5177, 5180), False, 'import cv2\n'), ((1069, 1083), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1081, 1083), False, 'from datetime import datetime\n'), ((2961, 2991), 'numpy.ones', 'np.ones', (['(data_item.shape[0],)'], {}), '((data_item.shape[0],))\n', (2968, 2991), True, 'import numpy as np\n'), ((4663, 4683), 'os.path.splitext', 'os.path.splitext', (['cl'], {}), '(cl)\n', (4679, 4683), False, 'import os\n')]
|
#!/usr/bin/env python
"""
PyTorch datasets and data augmenters
"""
###########
# Imports #
###########
import cv2
import numpy as np
import os
import random
import torch
from PIL import Image, ImageFilter
from torch.utils import data
from torchvision import transforms
#############
# Functions #
#############
def to_pil(tensor):
'''Converts a tensor to a PIL image.'''
return transforms.functional.to_pil_image(tensor)
def to_tensor(pic):
'''Converts a PIL image to a tensor.'''
return transforms.functional.to_tensor(pic)
def to_mask(shape, polygons):
'''Builds a mask based on polygon annotations.'''
contours = [np.array(p, dtype=int) for p in polygons]
mask = np.zeros(shape, dtype=np.uint8)
cv2.drawContours(mask, contours, -1, color=255, thickness=-1)
return Image.fromarray(mask)
def to_contours(mask):
'''Converts a mask into OpenCV contours.'''
mask = np.array(mask)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
def clusterize(polygons, size):
'''Clusterize polygons.'''
clusters = {}
for polygon in polygons:
temp = np.array(polygon).astype(int)
xmin = np.amin(temp[:, 0]) // size
xmax = np.amax(temp[:, 0]) // size
ymin = np.amin(temp[:, 1]) // size
ymax = np.amax(temp[:, 1]) // size
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
key = x * size, y * size
if not key in clusters:
clusters[key] = []
clusters[key].append(polygon)
return clusters
###########
# Classes #
###########
class VIADataset(data.IterableDataset):
'''Iterable VIA dataset.'''
def __init__(self, via, path='./', size=256, shuffle=False, shift=0, full=False, alt=0):
self.via = {}
self.masks = {}
self.clusters = {}
self.size = size
for key, polygons in via.items():
imagename = os.path.join(path, key)
if os.path.exists(imagename):
image = Image.open(imagename)
self.via[imagename] = polygons
self.masks[imagename] = to_mask((image.height, image.width), polygons)
if self.size is not None:
self.clusters[imagename] = clusterize(polygons, self.size)
self.shuffle = shuffle # random order
self.shift = shift # random shift
self.full = full # all sub-images
self.alt = alt # alternate
def __len__(self):
if self.size is None:
return len(self.via)
elif self.full:
s = 0
for imagename in self.via:
image = Image.open(imagename)
s += (image.width // self.size) * (image.height // self.size)
return s
else:
return sum(map(len, self.clusters.values())) * (1 + self.alt)
def __iter__(self):
images = random.sample(
self.via.keys(),
len(self.via)
) if self.shuffle else self.via.keys()
for imagename in images:
image = Image.open(imagename).convert('RGB')
mask = self.masks[imagename]
if self.size is None:
yield image, mask
elif self.full:
for left in np.arange(0, image.width, self.size):
for upper in np.arange(0, image.height, self.size):
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
else:
clusters = list(self.clusters[imagename].keys())
if self.shuffle:
random.shuffle(clusters)
for left, upper in clusters:
# Shift
if self.shift > 0:
left += random.randint(-self.shift, self.shift)
upper += random.randint(-self.shift, self.shift)
# Out of bounds
left = min(left, image.width - self.size)
upper = min(upper, image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
# Alternate with random images
for _ in range(self.alt):
left = random.randrange(image.width - self.size)
upper = random.randrange(image.height - self.size)
box = (left, upper, left + self.size, upper + self.size)
yield image.crop(box), mask.crop(box)
class RandomChoice(data.IterableDataset):
'''Apply a randomly picked transformation to each pair (input, target).'''
def __init__(self, dataset, transforms, input_only=False):
super().__init__()
self.dataset = dataset
self.transforms = transforms
self.input_only = input_only
def __len__(self):
return len(self.dataset)
def __iter__(self):
for input, target in self.dataset:
f = random.choice(self.transforms)
yield f(input), target if self.input_only else f(target)
class ColorJitter(RandomChoice):
'''Color jitter.'''
def __init__(self, dataset, brightness=0.25, contrast=0.33, saturation=0.33, hue=0):
super().__init__(
dataset=dataset,
transforms=[transforms.ColorJitter(brightness, contrast, saturation, hue)],
input_only=True
)
class RandomFilter(RandomChoice):
'''Random image filter.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.filter(ImageFilter.BLUR),
lambda x: x.filter(ImageFilter.DETAIL),
lambda x: x.filter(ImageFilter.EDGE_ENHANCE),
lambda x: x.filter(ImageFilter.SMOOTH),
lambda x: x.filter(ImageFilter.SHARPEN)
],
input_only=True
)
class RandomTranspose(RandomChoice):
'''Random image transpose.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[
lambda x: x,
lambda x: x.transpose(Image.FLIP_LEFT_RIGHT),
lambda x: x.transpose(Image.FLIP_TOP_BOTTOM),
lambda x: x.transpose(Image.ROTATE_90),
lambda x: x.transpose(Image.ROTATE_180),
lambda x: x.transpose(Image.ROTATE_270),
lambda x: x.transpose(Image.TRANSPOSE)
],
input_only=False
)
class Scale(RandomChoice):
'''Scale image.'''
def __init__(self, dataset, scale):
super().__init__(
dataset=dataset,
transforms=[lambda x: x.resize(
(int(x.width * scale), int(x.height * scale))
)],
input_only=False
)
class ToTensor(RandomChoice):
'''To Tensor.'''
def __init__(self, dataset):
super().__init__(
dataset=dataset,
transforms=[to_tensor],
input_only=False
)
########
# Main #
########
if __name__ == '__main__':
# Imports
import argparse
import json
import via as VIA
# Arguments
parser = argparse.ArgumentParser(description='Format California annotations to the VIA format')
parser.add_argument('-e', '--ext', default='.tif', help='extension of the images')
parser.add_argument('-o', '--output', default='../products/json/california.json', help='output VIA file')
parser.add_argument('-p', '--path', default='../resources/california/', help='path to California resources')
args = parser.parse_args()
# Polygons
with open(os.path.join(args.path, 'SolarArrayPolygons.json'), 'r') as f:
panels = json.load(f)['polygons']
# VGG Image Annotations
via = {}
for panel in panels:
filename = panel['image_name'] + args.ext
polygon = panel['polygon_vertices_pixels']
## Skip dots and lines
if not len(polygon) > 3:
continue
## Add polygon
if filename not in via:
via[filename] = []
via[filename].append(polygon)
# Save
VIA.dump(via, args.output, path=args.path)
|
[
"torchvision.transforms.functional.to_tensor",
"argparse.ArgumentParser",
"numpy.amin",
"random.shuffle",
"numpy.arange",
"os.path.join",
"via.dump",
"random.randint",
"os.path.exists",
"torchvision.transforms.functional.to_pil_image",
"cv2.drawContours",
"torchvision.transforms.ColorJitter",
"json.load",
"numpy.zeros",
"random.choice",
"PIL.Image.open",
"numpy.amax",
"numpy.array",
"random.randrange",
"PIL.Image.fromarray",
"cv2.findContours"
] |
[((387, 429), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['tensor'], {}), '(tensor)\n', (421, 429), False, 'from torchvision import transforms\n'), ((501, 537), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['pic'], {}), '(pic)\n', (532, 537), False, 'from torchvision import transforms\n'), ((685, 716), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (693, 716), True, 'import numpy as np\n'), ((718, 779), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', '(-1)'], {'color': '(255)', 'thickness': '(-1)'}), '(mask, contours, -1, color=255, thickness=-1)\n', (734, 779), False, 'import cv2\n'), ((789, 810), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (804, 810), False, 'from PIL import Image, ImageFilter\n'), ((889, 903), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (897, 903), True, 'import numpy as np\n'), ((920, 986), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (936, 986), False, 'import cv2\n'), ((6144, 6235), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Format California annotations to the VIA format"""'}), "(description=\n 'Format California annotations to the VIA format')\n", (6167, 6235), False, 'import argparse\n'), ((7005, 7047), 'via.dump', 'VIA.dump', (['via', 'args.output'], {'path': 'args.path'}), '(via, args.output, path=args.path)\n', (7013, 7047), True, 'import via as VIA\n'), ((634, 656), 'numpy.array', 'np.array', (['p'], {'dtype': 'int'}), '(p, dtype=int)\n', (642, 656), True, 'import numpy as np\n'), ((1158, 1177), 'numpy.amin', 'np.amin', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (1165, 1177), True, 'import numpy as np\n'), ((1195, 1214), 'numpy.amax', 'np.amax', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (1202, 1214), True, 'import numpy as np\n'), ((1232, 1251), 'numpy.amin', 'np.amin', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (1239, 1251), True, 'import numpy as np\n'), ((1269, 1288), 'numpy.amax', 'np.amax', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (1276, 1288), True, 'import numpy as np\n'), ((1828, 1851), 'os.path.join', 'os.path.join', (['path', 'key'], {}), '(path, key)\n', (1840, 1851), False, 'import os\n'), ((1859, 1884), 'os.path.exists', 'os.path.exists', (['imagename'], {}), '(imagename)\n', (1873, 1884), False, 'import os\n'), ((4311, 4341), 'random.choice', 'random.choice', (['self.transforms'], {}), '(self.transforms)\n', (4324, 4341), False, 'import random\n'), ((6584, 6634), 'os.path.join', 'os.path.join', (['args.path', '"""SolarArrayPolygons.json"""'], {}), "(args.path, 'SolarArrayPolygons.json')\n", (6596, 6634), False, 'import os\n'), ((6658, 6670), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6667, 6670), False, 'import json\n'), ((1118, 1135), 'numpy.array', 'np.array', (['polygon'], {}), '(polygon)\n', (1126, 1135), True, 'import numpy as np\n'), ((1898, 1919), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (1908, 1919), False, 'from PIL import Image, ImageFilter\n'), ((2406, 2427), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (2416, 2427), False, 'from PIL import Image, ImageFilter\n'), ((2744, 2765), 'PIL.Image.open', 'Image.open', (['imagename'], {}), '(imagename)\n', (2754, 2765), False, 'from PIL import Image, ImageFilter\n'), ((2896, 2932), 'numpy.arange', 'np.arange', (['(0)', 'image.width', 'self.size'], {}), '(0, image.width, self.size)\n', (2905, 2932), True, 'import numpy as np\n'), ((4600, 4661), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['brightness', 'contrast', 'saturation', 'hue'], {}), '(brightness, contrast, saturation, hue)\n', (4622, 4661), False, 'from torchvision import transforms\n'), ((2952, 2989), 'numpy.arange', 'np.arange', (['(0)', 'image.height', 'self.size'], {}), '(0, image.height, self.size)\n', (2961, 2989), True, 'import numpy as np\n'), ((3187, 3211), 'random.shuffle', 'random.shuffle', (['clusters'], {}), '(clusters)\n', (3201, 3211), False, 'import random\n'), ((3297, 3336), 'random.randint', 'random.randint', (['(-self.shift)', 'self.shift'], {}), '(-self.shift, self.shift)\n', (3311, 3336), False, 'import random\n'), ((3352, 3391), 'random.randint', 'random.randint', (['(-self.shift)', 'self.shift'], {}), '(-self.shift, self.shift)\n', (3366, 3391), False, 'import random\n'), ((3699, 3740), 'random.randrange', 'random.randrange', (['(image.width - self.size)'], {}), '(image.width - self.size)\n', (3715, 3740), False, 'import random\n'), ((3755, 3797), 'random.randrange', 'random.randrange', (['(image.height - self.size)'], {}), '(image.height - self.size)\n', (3771, 3797), False, 'import random\n')]
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Module for creating and defining Quil programs.
"""
import warnings
from itertools import count
from math import pi
import numpy as np
from six import string_types
from pyquil._parser.PyQuilListener import run_parser
from pyquil.kraus import _check_kraus_ops, _create_kraus_pragmas
from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit
from .gates import MEASURE, STANDARD_GATES, H
from .quilbase import (DefGate, Gate, Measurement, Pragma, AbstractInstruction, Qubit,
Jump, Label, JumpConditional, JumpTarget, JumpUnless, JumpWhen, Addr)
class Program(object):
def __init__(self, *instructions):
self._defined_gates = []
# Implementation note: the key difference between the private _instructions and the public instructions
# property below is that the private _instructions list may contain placeholder values
self._instructions = []
# Performance optimization: as stated above _instructions may contain placeholder values so the program must
# first be synthesized. _synthesized_instructions is simply a cache on the result of the _synthesize() method.
# It is marked as None whenever new instructions are added.
self._synthesized_instructions = None
self.inst(*instructions)
@property
def defined_gates(self):
"""
A list of defined gates on the program.
"""
return self._defined_gates
@property
def instructions(self):
"""
Fill in any placeholders and return a list of quil AbstractInstructions.
"""
if self._synthesized_instructions is None:
self._synthesized_instructions = self._synthesize()
return self._synthesized_instructions
def inst(self, *instructions):
"""
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
"""
for instruction in instructions:
if isinstance(instruction, list):
self.inst(*instruction)
elif isinstance(instruction, tuple):
if len(instruction) == 0:
raise ValueError("tuple should have at least one element")
elif len(instruction) == 1:
self.inst(instruction[0])
else:
op = instruction[0]
if op == "MEASURE":
if len(instruction) == 2:
self.measure(instruction[1])
else:
self.measure(instruction[1], instruction[2])
else:
params = []
possible_params = instruction[1]
rest = instruction[2:]
if isinstance(possible_params, list):
params = possible_params
else:
rest = [possible_params] + list(rest)
self.gate(op, params, rest)
elif isinstance(instruction, string_types):
self.inst(run_parser(instruction.strip()))
elif isinstance(instruction, Program):
if id(self) == id(instruction):
raise ValueError("Nesting a program inside itself is not supported")
for defgate in instruction._defined_gates:
self.inst(defgate)
for instr in instruction._instructions:
self.inst(instr)
# Implementation note: these two base cases are the only ones which modify the program
elif isinstance(instruction, DefGate):
defined_gate_names = [gate.name for gate in self._defined_gates]
if instruction.name in defined_gate_names:
warnings.warn("Gate {} has already been defined in this program".format(instruction.name))
self._defined_gates.append(instruction)
elif isinstance(instruction, AbstractInstruction):
self._instructions.append(instruction)
self._synthesized_instructions = None
else:
raise TypeError("Invalid instruction: {}".format(instruction))
return self
def gate(self, name, params, qubits):
"""
Add a gate to the program.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param list params: Parameters to send to the gate.
:param list qubits: Qubits that the gate operates on.
:return: The Program instance
:rtype: Program
"""
return self.inst(Gate(name, params, [unpack_qubit(q) for q in qubits]))
def defgate(self, name, matrix, parameters=None):
"""
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
"""
return self.inst(DefGate(name, matrix, parameters))
def define_noisy_gate(self, name, qubit_indices, kraus_ops):
"""
Overload a static ideal gate with a noisy one defined in terms of a Kraus map.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param str name: The name of the gate.
:param tuple|list qubit_indices: The qubits it acts on.
:param tuple|list kraus_ops: The Kraus operators.
:return: The Program instance
:rtype: Program
"""
kraus_ops = [np.asarray(k, dtype=np.complex128) for k in kraus_ops]
_check_kraus_ops(len(qubit_indices), kraus_ops)
return self.inst(_create_kraus_pragmas(name, tuple(qubit_indices), kraus_ops))
def no_noise(self):
"""
Prevent a noisy gate definition from being applied to the immediately following Gate
instruction.
:return: Program
"""
return self.inst(Pragma("NO-NOISE"))
def measure(self, qubit_index, classical_reg=None):
"""
Measures a qubit at qubit_index and puts the result in classical_reg
:param int qubit_index: The address of the qubit to measure.
:param int classical_reg: The address of the classical bit to store the result.
:returns: The Quil Program with the appropriate measure instruction appended, e.g.
MEASURE 0 [1]
:rtype: Program
"""
return self.inst(MEASURE(qubit_index, classical_reg))
def measure_all(self, *qubit_reg_pairs):
"""
Measures many qubits into their specified classical bits, in the order
they were entered.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
"""
for qubit_index, classical_reg in qubit_reg_pairs:
self.inst(MEASURE(qubit_index, classical_reg))
return self
def while_do(self, classical_reg, q_program):
"""
While a classical register at index classical_reg is 1, loop q_program
Equivalent to the following construction:
.. code::
WHILE [c]:
instr...
=>
LABEL @START
JUMP-UNLESS @END [c]
instr...
JUMP @START
LABEL @END
:param int classical_reg: The classical register to check
:param Program q_program: The Quil program to loop.
:return: The Quil Program with the loop instructions added.
:rtype: Program
"""
label_start = LabelPlaceholder("START")
label_end = LabelPlaceholder("END")
self.inst(JumpTarget(label_start))
self.inst(JumpUnless(target=label_end, condition=Addr(classical_reg)))
self.inst(q_program)
self.inst(Jump(label_start))
self.inst(JumpTarget(label_end))
return self
def if_then(self, classical_reg, if_program, else_program=None):
"""
If the classical register at index classical reg is 1, run if_program, else run
else_program.
Equivalent to the following construction:
.. code::
IF [c]:
instrA...
ELSE:
instrB...
=>
JUMP-WHEN @THEN [c]
instrB...
JUMP @END
LABEL @THEN
instrA...
LABEL @END
:param int classical_reg: The classical register to check as the condition
:param Program if_program: A Quil program to execute if classical_reg is 1
:param Program else_program: A Quil program to execute if classical_reg is 0. This
argument is optional and defaults to an empty Program.
:returns: The Quil Program with the branching instructions added.
:rtype: Program
"""
else_program = else_program if else_program is not None else Program()
label_then = LabelPlaceholder("THEN")
label_end = LabelPlaceholder("END")
self.inst(JumpWhen(target=label_then, condition=Addr(classical_reg)))
self.inst(else_program)
self.inst(Jump(label_end))
self.inst(JumpTarget(label_then))
self.inst(if_program)
self.inst(JumpTarget(label_end))
return self
def alloc(self):
"""
Get a new qubit.
:return: A qubit.
:rtype: Qubit
"""
return QubitPlaceholder()
def out(self):
"""
Converts the Quil program to a readable string.
:return: String form of a program
:rtype: string
"""
s = ""
for dg in self._defined_gates:
s += dg.out()
s += "\n"
for instr in self.instructions:
s += instr.out() + "\n"
return s
def get_qubits(self):
"""
Returns all of the qubit indices used in this program, including gate applications and
allocated qubits. e.g.
>>> p = Program()
>>> p.inst(("H", 1))
>>> p.get_qubits()
{1}
>>> q = p.alloc()
>>> p.inst(H(q))
>>> len(p.get_qubits())
2
:return: A set of all the qubit indices used in this program
:rtype: set
"""
qubits = set()
for instr in self.instructions:
if isinstance(instr, Gate):
qubits |= {q.index for q in instr.qubits}
elif isinstance(instr, Measurement):
qubits.add(instr.qubit.index)
return qubits
def is_protoquil(self):
"""
Protoquil programs may only contain gates, no classical instructions and no jumps.
:return: True if the Program is Protoquil, False otherwise
"""
for instr in self._instructions:
if not isinstance(instr, Gate):
return False
return True
def pop(self):
"""
Pops off the last instruction.
:return: The instruction that was popped.
:rtype: tuple
"""
res = self._instructions.pop()
self._synthesized_instructions = None
return res
def dagger(self, inv_dict=None, suffix="-INV"):
"""
Creates the conjugate transpose of the Quil program. The program must not
contain any irreversible actions (measurement, control flow, qubit allocation).
:return: The Quil program's inverse
:rtype: Program
"""
if not self.is_protoquil():
raise ValueError("Program must be valid Protoquil")
daggered = Program()
for gate in self._defined_gates:
if inv_dict is None or gate.name not in inv_dict:
daggered.defgate(gate.name + suffix, gate.matrix.T.conj())
for gate in reversed(self._instructions):
if gate.name in STANDARD_GATES:
if gate.name == "S":
daggered.inst(STANDARD_GATES["PHASE"](-pi / 2, *gate.qubits))
elif gate.name == "T":
daggered.inst(STANDARD_GATES["RZ"](pi / 4, *gate.qubits))
elif gate.name == "ISWAP":
daggered.inst(STANDARD_GATES["PSWAP"](pi / 2, *gate.qubits))
else:
negated_params = list(map(lambda x: -1 * x, gate.params))
daggered.inst(STANDARD_GATES[gate.name](*(negated_params + gate.qubits)))
else:
if inv_dict is None or gate.name not in inv_dict:
gate_inv_name = gate.name + suffix
else:
gate_inv_name = inv_dict[gate.name]
daggered.inst(tuple([gate_inv_name] + gate.qubits))
return daggered
def _synthesize(self):
"""
Takes a program which may contain placeholders and assigns them all defined values.
For qubit placeholders:
1. We look through the program to find all the known indexes of qubits and add them to a set
2. We create a mapping from undefined qubits to their newly assigned index
3. For every qubit placeholder in the program, if it's not already been assigned then look through the set of
known indexes and find the lowest available one
For label placeholders:
1. Start a counter at 1
2. For every label placeholder in the program, replace it with a defined label using the counter and increment
the counter
:return: List of AbstractInstructions with all placeholders removed
"""
used_indexes = set()
for instr in self._instructions:
if isinstance(instr, Gate):
for q in instr.qubits:
if not isinstance(q, QubitPlaceholder):
used_indexes.add(q.index)
elif isinstance(instr, Measurement):
if not isinstance(instr.qubit, QubitPlaceholder):
used_indexes.add(instr.qubit.index)
def find_available_index():
# Just do a linear search.
for i in count(start=0, step=1):
if i not in used_indexes:
return i
qubit_mapping = dict()
def remap_qubit(qubit):
if not isinstance(qubit, QubitPlaceholder):
return qubit
if id(qubit) in qubit_mapping:
return qubit_mapping[id(qubit)]
else:
available_index = find_available_index()
used_indexes.add(available_index)
remapped_qubit = Qubit(available_index)
qubit_mapping[id(qubit)] = remapped_qubit
return remapped_qubit
label_mapping = dict()
label_counter = 1
def remap_label(placeholder):
if id(placeholder) in label_mapping:
return label_mapping[id(placeholder)]
else:
label = Label(placeholder.prefix + str(label_counter))
label_mapping[id(placeholder)] = label
return label
result = []
for instr in self._instructions:
# Remap qubits on Gate and Measurement instructions
if isinstance(instr, Gate):
remapped_qubits = [remap_qubit(q) for q in instr.qubits]
result.append(Gate(instr.name, instr.params, remapped_qubits))
elif isinstance(instr, Measurement):
result.append(Measurement(remap_qubit(instr.qubit), instr.classical_reg))
# Remap any label placeholders on jump or target instructions
elif isinstance(instr, Jump) and isinstance(instr.target, LabelPlaceholder):
result.append(Jump(remap_label(instr.target)))
label_counter += 1
elif isinstance(instr, JumpTarget) and isinstance(instr.label, LabelPlaceholder):
result.append(JumpTarget(remap_label(instr.label)))
label_counter += 1
elif isinstance(instr, JumpConditional) and isinstance(instr.target, LabelPlaceholder):
new_label = remap_label(instr.target)
if isinstance(instr, JumpWhen):
result.append(JumpWhen(new_label, instr.condition))
elif isinstance(instr, JumpUnless):
result.append(JumpUnless(new_label, instr.condition))
else:
raise TypeError("Encountered a JumpConditional that wasn't JumpWhen or JumpUnless: {} {}"
.format(type(instr), instr))
label_counter += 1
# Otherwise simply add it to the result
else:
result.append(instr)
return result
def __add__(self, other):
"""
Concatenate two programs together, returning a new one.
:param Program other: Another program or instruction to concatenate to this one.
:return: A newly concatenated program.
:rtype: Program
"""
p = Program()
p.inst(self)
p.inst(other)
return p
def __getitem__(self, index):
"""
Allows indexing into the program to get an action.
:param index: The action at the specified index.
:return:
"""
return self.instructions[index]
def __iter__(self):
"""
Allow built in iteration through a program's instructions, e.g. [a for a in Program(X(0))]
:return:
"""
return self.instructions.__iter__()
def __eq__(self, other):
return isinstance(other, self.__class__) and self.out() == other.out()
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._instructions)
def __str__(self):
return self.out()
def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
return sum(prog_list, Program())
|
[
"numpy.asarray",
"itertools.count",
"pyquil.quilatom.QubitPlaceholder",
"pyquil.quilatom.LabelPlaceholder",
"pyquil.quilatom.unpack_qubit"
] |
[((10548, 10573), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""START"""'], {}), "('START')\n", (10564, 10573), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((10594, 10617), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""END"""'], {}), "('END')\n", (10610, 10617), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((11926, 11950), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""THEN"""'], {}), "('THEN')\n", (11942, 11950), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((11971, 11994), 'pyquil.quilatom.LabelPlaceholder', 'LabelPlaceholder', (['"""END"""'], {}), "('END')\n", (11987, 11994), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((12408, 12426), 'pyquil.quilatom.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (12424, 12426), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n'), ((8290, 8324), 'numpy.asarray', 'np.asarray', (['k'], {'dtype': 'np.complex128'}), '(k, dtype=np.complex128)\n', (8300, 8324), True, 'import numpy as np\n'), ((17096, 17118), 'itertools.count', 'count', ([], {'start': '(0)', 'step': '(1)'}), '(start=0, step=1)\n', (17101, 17118), False, 'from itertools import count\n'), ((6551, 6566), 'pyquil.quilatom.unpack_qubit', 'unpack_qubit', (['q'], {}), '(q)\n', (6563, 6566), False, 'from pyquil.quilatom import LabelPlaceholder, QubitPlaceholder, unpack_qubit\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 13:49:13 2021
@author: Matteo
"""
import numpy as np
import matplotlib.pyplot as plt
import PCI_o_B
from PCI_o_B import CIfile as CI
from PCI_o_B import G2file as g2
from PCI_o_B import SharedFunctions as sf
class DAM(g2.G2):
def __init__(self,FolderName,CI,nROI,tau):
super().__init__(FolderName,CI,nROI,tau)
self.n_intervals = 0
self.tauDAM= []
self.g2DAM = []
self.g2varDAM = []
def __str__(self):
#write again this stuff
str_res = '\n|---------------|'
str_res += '\n| CIbead class: '
str_res += '\n|--------------------+--------------------|'
str_res += '\n| filelist : ' + str(self.ROIfilelist)
str_res += '\n| folder : ' + str(self.FolderName)
str_res += '\n| number of ROIs : ' + str(self.nROI)
str_res += '\n| ROIs size : ' + str(self.GetROIsize()) + ' px'
str_res += '\n| lag time : ' + str(self.lag)
str_res += '\n| x for theta(x)= 90° : ' + str(self.Center) + 'px'
str_res += '\n| Radius bead : ' + str(self.Center) +'px'
#str_res += '\n| Window of interest top : ' + str(self.GetWINDOWtop()) + ' px'
str_res += '\n|--------------------+--------------------|'
return str_res
def DAMCalculation(self,n_intervals):
self.n_intervals = n_intervals
l_intervals = int(len(self.CI[0]) / n_intervals )
time_list = []
for i in range(n_intervals):
time_list.append(i*l_intervals)
#calculation of the g2 for each roi for each interval
for i in range(n_intervals-1):
super().G2Calculation(time_list[i],time_list[i+1])
self.g2DAM.append(self.g2)
self.tauDAM.append(np.asarray(self.tau))
self.g2varDAM.append(self.g2var)
self.g2 = []
self.g2var = []
#self.tau = []
super().G2Calculation(time_list[-1],len(self.CI[0]))
self.g2DAM.append(self.g2)
self.g2varDAM.append(self.g2var)
self.tauDAM.append(np.asarray(self.tau))
'''
for i in range(n_intervals):
self.tauDAM[i].tolist()
print(type(self.tauDAM[i]))
print(len(self.tauDAM[i]))
'''
return
def DAMFitSingleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime1 = []
self.decaytime1err = []
return
def DAMFitDoubleDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitSingleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
print(i)
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitSingleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
def DAMFitDoubleStretchDecaytime(self,variables,plot):
self.decaytime1DAM = []
self.decaytime1errDAM = []
self.decaytime2DAM = []
self.decaytime2errDAM = []
for i in range(self.n_intervals):
self.g2 = []
self.g2var = []
self.tau = []
self.g2 = self.g2DAM[i]
self.g2var = self.g2varDAM[i]
self.tau = self.tauDAM[i]
super().FitDoubleStretchDecaytime(variables,plot=False)
self.decaytime1DAM.append(self.decaytime1)
self.decaytime1errDAM.append(self.decaytime1err)
self.decaytime2DAM.append(self.decaytime2)
self.decaytime2errDAM.append(self.decaytime2err)
self.decaytime1 = []
self.decaytime1err = []
self.decaytime2 = []
self.decaytime2err = []
return
|
[
"numpy.asarray"
] |
[((2301, 2321), 'numpy.asarray', 'np.asarray', (['self.tau'], {}), '(self.tau)\n', (2311, 2321), True, 'import numpy as np\n'), ((1952, 1972), 'numpy.asarray', 'np.asarray', (['self.tau'], {}), '(self.tau)\n', (1962, 1972), True, 'import numpy as np\n')]
|
__author__ = "<NAME>"
import numpy as np
from tensorflow import keras
from sktime_dl.classification._classifier import BaseDeepClassifier
from sktime_dl.networks._lstmfcn import LSTMFCNNetwork
from sktime_dl.utils import check_and_clean_data, \
check_and_clean_validation_data
from sktime_dl.utils import check_is_fitted
from sklearn.utils import check_random_state
class LSTMFCNClassifier(BaseDeepClassifier, LSTMFCNNetwork):
"""
Implementation of LSTMFCNClassifier from Karim et al (2019). [1]_
Overview:
Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the LSTM which the
author indicates provides improved performance.
Parameters
----------
nb_epochs: int, default=1500
the number of epochs to train the model
param batch_size: int, default=128
the number of samples per gradient update.
kernel_sizes: list of ints, default=[8, 5, 3]
specifying the length of the 1D convolution windows
filter_sizes: int, list of ints, default=[128, 256, 128]
size of filter for each conv layer
num_cells: int, default=8
output dimension for LSTM layer
dropout: float, default=0.8
controls dropout rate of LSTM layer
attention: boolean, default=False
If True, uses custom attention LSTM layer
callbacks: keras callbacks, default=ReduceLRonPlateau
Keras callbacks to use such as learning rate reduction or saving best model based on validation error
random_state: int,
seed to any needed random actions
verbose: boolean,
whether to output extra information
model_name: string,
the name of this model for printing and file writing purposes
model_save_directory: string,
if not None; location to save the trained keras model in hdf5 format
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random, integer.
Attributes
----------
nb_classes : int
Number of classes. Extracted from the data.
References
----------
@article{Karim_2019,
title={Multivariate LSTM-FCNs for time series classification},
volume={116},
ISSN={0893-6080},
url={http://dx.doi.org/10.1016/j.neunet.2019.04.014},
DOI={10.1016/j.neunet.2019.04.014},
journal={Neural Networks},
publisher={Elsevier BV},
author={<NAME> and <NAME> and <NAME> and <NAME>},
year={2019},
month={Aug},
pages={237–245}
}
Example
-------
from sktime_dl.classification import LSTMFCNClassifier
from sktime.datasets import load_italy_power_demand
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
clf = LSTMFCNClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
"""
def __init__(
self,
nb_epochs=1500,
batch_size=8,
kernel_sizes=[8, 5, 3],
filter_sizes=[128, 256, 128],
num_cells=8,
dropout=0.8,
attention=False,
callbacks=[],
random_state=0,
verbose=False,
model_name="lstmfcn",
model_save_directory=None,
):
super(LSTMFCNClassifier, self).__init__(
model_name=model_name, model_save_directory=model_save_directory
)
self.verbose = verbose
self._is_fitted = False
# calced in fit
self.classes_ = None
self.nb_classes = -1
self.input_shape = None
self.model = None
self.history = None
# predefined
self.nb_epochs = nb_epochs
self.batch_size = batch_size
self.kernel_sizes = kernel_sizes
self.filter_sizes = filter_sizes
self.NUM_CELLS=num_cells
self.dropout=dropout
self.attention=attention
self.callbacks = callbacks
self.random_state = random_state
self.verbose = verbose
self._is_fitted = False
def build_model(self, input_shape, nb_classes, **kwargs):
"""
Construct a compiled, un-trained, keras model that is ready for
training
----------
input_shape : tuple
The shape of the data fed into the input layer
nb_classes: int
The number of classes, which shall become the size of the output
layer
Returns
-------
output : a compiled Keras Model
"""
input_layers, output_layer = self.build_network(input_shape, **kwargs)
output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
output_layer
)
model = keras.models.Model(inputs=input_layers, outputs=output_layer)
model.compile(
loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"],
)
# file_path = self.output_directory + 'best_model.hdf5'
# model_checkpoint = keras.callbacks.ModelCheckpoint(
# filepath=file_path, monitor='val_loss',
# save_best_only=True)
# self.callbacks = [model_checkpoint]
if self.callbacks==None:
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.7,
patience=50, min_lr=0.0001)
self.callbacks = [reduce_lr]
else:
pass
return model
def fit(self, X, y, input_checks=True, validation_X=None,
validation_y=None, **kwargs):
"""
Fit the classifier on the training set (X, y)
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
y : array-like, shape = [n_instances]
The training data class labels.
input_checks : boolean
whether to check the X and y parameters
validation_X : a nested pd.Dataframe, or array-like of shape =
(n_instances, series_length, n_dimensions)
The validation samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
Unless strictly defined by the user via callbacks (such as
EarlyStopping), the presence or state of the validation
data does not alter training in any way. Predictions at each epoch
are stored in the model's fit history.
validation_y : array-like, shape = [n_instances]
The validation class labels.
Returns
-------
self : object
"""
self.random_state = check_random_state(self.random_state)
X = check_and_clean_data(X, y, input_checks=input_checks)
y_onehot = self.convert_y(y)
validation_data = \
check_and_clean_validation_data(validation_X, validation_y,
self.label_encoder,
self.onehot_encoder)
# ignore the number of instances, X.shape[0],
# just want the shape of each instance
self.input_shape = X.shape[1:]
if validation_data is not None:
validation_data = (
validation_data[0],
validation_data[1]
)
self.model = self.build_model(self.input_shape, self.nb_classes)
if self.verbose:
self.model.summary()
self.history = self.model.fit(
X,
y_onehot,
batch_size=self.batch_size,
epochs=self.nb_epochs,
verbose=self.verbose,
validation_data=(validation_data),
callbacks=self.callbacks,
)
self.save_trained_model()
self._is_fitted = True
return self
def predict_proba(self, X, input_checks=True, **kwargs):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
input_checks: boolean
whether to check the X parameter
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
check_is_fitted(self)
X = check_and_clean_data(X, input_checks=input_checks)
probs = self.model.predict(X, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
|
[
"sklearn.utils.check_random_state",
"sktime_dl.utils.check_is_fitted",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.hstack",
"tensorflow.keras.models.Model",
"sktime_dl.utils.check_and_clean_data",
"sktime_dl.utils.check_and_clean_validation_data"
] |
[((4872, 4933), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layers', 'outputs': 'output_layer'}), '(inputs=input_layers, outputs=output_layer)\n', (4890, 4933), False, 'from tensorflow import keras\n'), ((6945, 6982), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (6963, 6982), False, 'from sklearn.utils import check_random_state\n'), ((6996, 7049), 'sktime_dl.utils.check_and_clean_data', 'check_and_clean_data', (['X', 'y'], {'input_checks': 'input_checks'}), '(X, y, input_checks=input_checks)\n', (7016, 7049), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((7128, 7233), 'sktime_dl.utils.check_and_clean_validation_data', 'check_and_clean_validation_data', (['validation_X', 'validation_y', 'self.label_encoder', 'self.onehot_encoder'], {}), '(validation_X, validation_y, self.\n label_encoder, self.onehot_encoder)\n', (7159, 7233), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((8753, 8774), 'sktime_dl.utils.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (8768, 8774), False, 'from sktime_dl.utils import check_is_fitted\n'), ((8788, 8838), 'sktime_dl.utils.check_and_clean_data', 'check_and_clean_data', (['X'], {'input_checks': 'input_checks'}), '(X, input_checks=input_checks)\n', (8808, 8838), False, 'from sktime_dl.utils import check_and_clean_data, check_and_clean_validation_data\n'), ((4766, 4818), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['nb_classes'], {'activation': '"""softmax"""'}), "(nb_classes, activation='softmax')\n", (4784, 4818), False, 'from tensorflow import keras\n'), ((5396, 5489), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.7)', 'patience': '(50)', 'min_lr': '(0.0001)'}), "(monitor='loss', factor=0.7, patience=50,\n min_lr=0.0001)\n", (5429, 5489), False, 'from tensorflow import keras\n'), ((9062, 9091), 'numpy.hstack', 'np.hstack', (['[1 - probs, probs]'], {}), '([1 - probs, probs])\n', (9071, 9091), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Suite for loading OpenAI `Safety Gym <https://openai.com/blog/safety-gym/>`_ environments.
**NOTE**: Mujoco requires separated installation.
(gym >= 0.10, and mujoco>=1.50)
Follow the instructions at:
https://github.com/openai/mujoco-py
Several general facts about the provided benchmark environments:
1. All have distance-based dense rewards (can be customized to be sparse).
2. All have continual goals: after reaching a goal, the goal is reset but the
layout keeps the same until timeout (can be customized to not reset goals).
3. Layouts are randomized before episodes begin
4. Costs are indicator binaries (0 or 1). Every positive cost will be binarized
to 1. Thus the total cost will be 1 if any component cost is positive.
5. level 0 has no constraints; level 1 has some unsafe elements; level 2 has
very dense unsafe elements.
See https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L97
for a complete list of default configurations.
"""
try:
import mujoco_py
import safety_gym
except ImportError:
mujoco_py = None
safety_gym = None
import numpy as np
import copy
import gym
import alf
from alf.environments import suite_gym
from alf.environments.alf_wrappers import NonEpisodicAgent
def is_available():
"""Check if both ``mujoco_py`` and ``safety_gym`` have been installed."""
return (mujoco_py is not None and safety_gym is not None)
class VisionObservationWrapper(gym.ObservationWrapper):
"""If the observation is a dict and it contains a key 'vision',
return an uint8 RGB image in [0,255] and a flat vector containing any other
info."""
def __init__(self, env):
super().__init__(env)
self._vision = False
if (isinstance(self.observation_space, gym.spaces.Dict)
and 'vision' in self.observation_space.spaces):
self._vision = True
observation_space = {}
observation_space['vision'] = self.observation_space['vision']
self.obs_flat_size = sum([
np.prod(i.shape)
for (k, i) in self.observation_space.spaces.items()
if k != 'vision'
])
observation_space['robot'] = gym.spaces.Box(
-np.inf, np.inf, (self.obs_flat_size, ), dtype=np.float32)
self.observation_space = gym.spaces.Dict(observation_space)
def observation(self, observation):
if self._vision:
obs = {"vision": observation["vision"]}
flat_obs = np.zeros(self.obs_flat_size)
offset = 0
for k in sorted(observation.keys()):
if k == 'vision':
continue
k_size = np.prod(observation[k].shape)
flat_obs[offset:offset + k_size] = observation[k].flat
offset += k_size
obs['robot'] = flat_obs
return obs
return observation
class CompleteEnvInfo(gym.Wrapper):
"""Always set the complete set of information so that the env info has a
fixed shape (no matter whether some event occurs or not), which is required
by ALF.
The current safety gym env only adds a key to env info when the corresponding
event is triggered, see:
https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1242
"""
def __init__(self, env, env_name):
super().__init__(env)
# env info keys are retrieved from:
# https://github.com/openai/safety-gym/blob/master/safety_gym/envs/engine.py
self._env_info_keys = [
'cost_exception',
'goal_met',
'cost' # this is the summed overall cost
]
if not self._is_level0_env(env_name):
# for level 1 and 2 envs, there are constraints cost info
self._env_info_keys += [
'cost_vases_contact', 'cost_pillars', 'cost_buttons',
'cost_gremlins', 'cost_vases_displace', 'cost_vases_velocity',
'cost_hazards'
]
self._default_env_info = self._generate_default_env_info()
def _is_level0_env(self, env_name):
return "0-v" in env_name
def _generate_default_env_info(self):
env_info = {}
for key in self._env_info_keys:
if key == "goal_met":
env_info[key] = False
else:
env_info[key] = np.float32(0.)
return env_info
def step(self, action):
"""Take a step through the environment the returns the complete set of
env info, regardless of whether the corresponding event is enabled or not.
"""
env_info = copy.copy(self._default_env_info)
obs, reward, done, info = self.env.step(action)
env_info.update(info)
return obs, reward, done, env_info
class VectorReward(gym.Wrapper):
"""This wrapper makes the env returns a reward vector of length 3. The three
dimensions are:
1. distance-improvement reward indicating the delta smaller distances of
agent<->box and box<->goal for "push" tasks, or agent<->goal for
"goal"/"button" tasks.
2. negative binary cost where -1 means that at least one constraint has been
violated at the current time step (constraints vary depending on env
configurations).
3. a success indicator where 1 means the goal is met at the current step
All rewards are the higher the better.
"""
REWARD_DIMENSION = 2
def __init__(self, env, sparse_reward):
super().__init__(env)
self._reward_space = gym.spaces.Box(
low=-float('inf'),
high=float('inf'),
shape=[self.REWARD_DIMENSION])
self._sparse_reward = sparse_reward
def step(self, action):
"""Take one step through the environment and obtains several rewards.
Args:
action (np.array):
Returns:
tuple:
- obs (np.array): a flattened observation vector that contains
all enabled sensors' data
- rewards (np.array): a reward vector of length ``REWARD_DIMENSION``.
See the class docstring for their meanings.
- done (bool): whether the episode has ended
- info (dict): a dict of additional env information
"""
obs, reward, done, info = self.env.step(action)
# Get the second and third reward from ``info``
cost_reward = -info["cost"]
success_reward = float(info["goal_met"])
if self._sparse_reward:
reward = success_reward
return obs, np.array([reward, cost_reward],
dtype=np.float32), done, info
@property
def reward_space(self):
return self._reward_space
@alf.configurable(blacklist=['env'])
class RGBRenderWrapper(gym.Wrapper):
"""A ``metadata`` field should've been defined in the original safety gym env;
otherwise video recording will be disabled. See
https://github.com/openai/gym/blob/master/gym/wrappers/monitoring/video_recorder.py#L41
Also the original env needs a ``camera_id`` if "rgb_array" mode is used for
rendering, which is incompatible with our ``ALFEnvironment`` interfaces.
Here we wrap ``render()`` with a customizable camera mode.
"""
_metadata = {'render.modes': ["rgb_array", "human"]}
def __init__(self, env, width=800, height=800, camera_mode="fixedfar"):
"""
Args:
width (int): the width of rgb image
height (int): the height of rbg image
camera_mode (str): one of ('fixednear', 'fixedfar', 'fixedtop', 'vision', 'track', 'top')
"""
super().__init__(env)
# self.metadata will first inherit subclass's metadata
self.metadata.update(self._metadata)
self._width = width
self._height = height
self._camera_mode = camera_mode
def render(self, mode="human"):
camera_id = self.unwrapped.model.camera_name2id(self._camera_mode)
render_kwargs = dict(mode=mode, camera_id=camera_id)
if self._width is not None:
render_kwargs["width"] = self._width
if self._height is not None:
render_kwargs["height"] = self._height
return self.env.render(**render_kwargs)
@alf.configurable
class EpisodicWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
def step(self, action):
obs, reward, done, info = self.env.step(action)
if info["goal_met"]:
done = True
#print("xy: [%s,%s]" % (info['xy'][0], info['xy'][1]))
return obs, reward, done, info
def reset(self):
#print("xy: reset")
return self.env.reset()
@alf.configurable
def load(environment_name,
env_id=None,
discount=1.0,
max_episode_steps=None,
unconstrained=False,
sparse_reward=False,
episodic=False,
gym_env_wrappers=(),
alf_env_wrappers=()):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a ``TimeLimit`` wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
env_id: A scalar ``Tensor`` of the environment ID of the time step.
discount: Discount to use for the environment.
max_episode_steps: If None the ``max_episode_steps`` will be set to
the default step limit -1 defined in the environment. If 0, no
``TimeLimit`` wrapper will be used.
unconstrained (bool): if True, the suite will be used just as an
unconstrained environment. The reward will always be scalar without
including constraints.
sparse_reward (bool): If True, only give reward when reaching a goal.
episodic (bool): whether terminate the episode when a goal is achieved.
Note that if True, both ``EpisodicWrapper`` and ``NonEpisodicAgent``
wrapper will be used to simulate an infinite horizon even though the
success rate is computed on per-goal basis. This is for approximating
an average constraint reward objective. ``EpisodicWrapper`` first
returns ``done=True`` to signal the end of an episode, and ``NonEpisodicAgent``
replaces ``discount=0`` with ``discount=1``.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
alf_env_wrappers: Iterable with references to wrapper classes to use on
the torch environment.
Returns:
AlfEnvironment:
"""
# We can directly make the env here because none of the safety gym tasks
# is registered with a ``max_episode_steps`` argument (the
# ``gym.wrappers.time_limit.TimeLimit`` won't be applied). But each task
# will inherently manage the time limit through ``env.num_steps``.
env = gym.make(environment_name)
# fill all env info with default values
env = CompleteEnvInfo(env, environment_name)
# make vector reward
if not unconstrained:
env = VectorReward(env, sparse_reward)
env = RGBRenderWrapper(env)
if episodic:
env = EpisodicWrapper(env)
alf_env_wrappers = alf_env_wrappers + (NonEpisodicAgent, )
env = VisionObservationWrapper(env)
# Have to -1 on top of the original env max steps here, because the
# underlying gym env will output ``done=True`` when reaching the time limit
# ``env.num_steps`` (before the ``AlfGymWrapper``), which is incorrect:
# https://github.com/openai/safety-gym/blob/f31042f2f9ee61b9034dd6a416955972911544f5/safety_gym/envs/engine.py#L1302
if max_episode_steps is None:
max_episode_steps = env.num_steps - 1
max_episode_steps = min(env.num_steps - 1, max_episode_steps)
return suite_gym.wrap_env(
env,
env_id=env_id,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
alf_env_wrappers=alf_env_wrappers)
|
[
"alf.configurable",
"gym.make",
"numpy.float32",
"copy.copy",
"numpy.zeros",
"numpy.array",
"gym.spaces.Box",
"alf.environments.suite_gym.wrap_env",
"numpy.prod",
"gym.spaces.Dict"
] |
[((7467, 7502), 'alf.configurable', 'alf.configurable', ([], {'blacklist': "['env']"}), "(blacklist=['env'])\n", (7483, 7502), False, 'import alf\n'), ((11739, 11765), 'gym.make', 'gym.make', (['environment_name'], {}), '(environment_name)\n', (11747, 11765), False, 'import gym\n'), ((12665, 12839), 'alf.environments.suite_gym.wrap_env', 'suite_gym.wrap_env', (['env'], {'env_id': 'env_id', 'discount': 'discount', 'max_episode_steps': 'max_episode_steps', 'gym_env_wrappers': 'gym_env_wrappers', 'alf_env_wrappers': 'alf_env_wrappers'}), '(env, env_id=env_id, discount=discount, max_episode_steps\n =max_episode_steps, gym_env_wrappers=gym_env_wrappers, alf_env_wrappers\n =alf_env_wrappers)\n', (12683, 12839), False, 'from alf.environments import suite_gym\n'), ((5349, 5382), 'copy.copy', 'copy.copy', (['self._default_env_info'], {}), '(self._default_env_info)\n', (5358, 5382), False, 'import copy\n'), ((2864, 2936), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(self.obs_flat_size,)'], {'dtype': 'np.float32'}), '(-np.inf, np.inf, (self.obs_flat_size,), dtype=np.float32)\n', (2878, 2936), False, 'import gym\n'), ((2992, 3026), 'gym.spaces.Dict', 'gym.spaces.Dict', (['observation_space'], {}), '(observation_space)\n', (3007, 3026), False, 'import gym\n'), ((3169, 3197), 'numpy.zeros', 'np.zeros', (['self.obs_flat_size'], {}), '(self.obs_flat_size)\n', (3177, 3197), True, 'import numpy as np\n'), ((7296, 7345), 'numpy.array', 'np.array', (['[reward, cost_reward]'], {'dtype': 'np.float32'}), '([reward, cost_reward], dtype=np.float32)\n', (7304, 7345), True, 'import numpy as np\n'), ((3358, 3387), 'numpy.prod', 'np.prod', (['observation[k].shape'], {}), '(observation[k].shape)\n', (3365, 3387), True, 'import numpy as np\n'), ((5088, 5103), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (5098, 5103), True, 'import numpy as np\n'), ((2690, 2706), 'numpy.prod', 'np.prod', (['i.shape'], {}), '(i.shape)\n', (2697, 2706), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import keras
import numpy as np
import tensorflow as tf
from absl import app
from data import simulation_pb2
from bin.load_batch import load_batch
from bin.data_visualization import map_id_to_units_race
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d.axes3d import Axes3D
from bin.util import *
from lib.unit_constants import *
from lib.config import REPLAYS_PARSED_DIR, REPLAY_DIR, REPO_DIR, STANDARD_VERSION
def main():
learning_rates = [0.05]
beta1 = [0.9, 0.7, 0.6, 0.5]
beta2 = [0.95, 0.85, 0.75, 0.65]
epsilon = 1e-06
training_epochs = 50
trackAcc = []
trackAccs = []
trackCost = []
trackCosts = []
for learning_rate in learning_rates:
for b1 in beta1:
for b2 in beta2:
print("Run gradient descent with Learning Rate: %-6s --- Beta1: %-4s --- Beta2: %-5s" % (learning_rate, b1, b2))
trackAcc, trackCost = run_grad_desc(learning_rate, training_epochs, b1, b2, epsilon)
trackAccs.append(trackAcc)
trackCosts.append(trackCost)
create_graphs(trackAccs, trackCosts, learning_rates, training_epochs, beta1, beta2)
def run_grad_desc(learning_rate, training_epochs, b1, b2, eps):
# Graph Input
x = tf.placeholder(tf.float32, [None, 94])
y = tf.placeholder(tf.float32, [None, 3])
# initialize weight and bias
W_1 = tf.Variable(tf.truncated_normal([94, 94]))
W_2 = tf.Variable(tf.truncated_normal([94, 47]))
W_3 = tf.Variable(tf.truncated_normal([47, 3]))
# Construct Model
x_ = tf.matmul(x, W_1)
x_ = tf.matmul(x_, W_2)
logits = tf.matmul(x_, W_3)
pred = tf.nn.softmax(logits)
# minimize error using cross entropy
# cross_entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.contrib.opt.NadamOptimizer(learning_rate, b1, b2, eps).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
trackAcc = []
trackCost = []
with tf.Session() as s:
s.run(init)
xs_train, xs_test, ys_train, ys_test = load(version=['1_3d'], file_version='multiple')
# loop to train for specified number of epochs
for epoch in range(training_epochs):
_, c = s.run([optimizer, cost], feed_dict={x: xs_train, y: ys_train})
acc = s.run(accuracy, feed_dict={x: xs_test, y: ys_test})
# track accuracy to display in graph when algorithm finished
trackCost.append(c)
trackAcc.append(acc*100)
#print('Epoch:', '%04d' % (epoch+1), "completed with an accuracy of:", "{:.3f}".format(acc), "cost=", "{:.9f}".format(c))
# evaluate accuary when all training steps are completed
print ("Accuracy:", accuracy.eval({x: xs_test, y: ys_test}))
trackAcc = np.array(trackAcc)
return trackAcc, trackCost
def create_graphs(trackAcc, trackCost, learning_rate, training_epochs, b1, b2):
# create graph
fig = plt.figure(figsize=plt.figaspect(4.))
# add plot
ax = fig.add_subplot(2,1,1)
# create array that corresponds to the number of training steps as x-axis
# y-axis is the accuracy in %
a = np.arange(1, training_epochs+1)
b = np.arange(1, training_epochs+1)
ax.set_title('Test Accuracy')
i = 0
bx = fig.add_subplot(2,1,2)
bx.set_title('Cost by Epoch')
m = ''
col = ''
sign = ['.', '-', ',', 'o']
cols = ['b','g', 'y', 'r']
for lr in learning_rate:
for n in range(len(learning_rate)):
if n > 3:
m = '^'
break
if lr == learning_rate[n]:
m = sign[n]
for b_ in b1:
for j in range(len(b1)):
if j > 3:
col = 'k'+m
break
if b_ == b1[j]:
col == cols[j]+m
for b_2 in b2:
ax.plot(a, trackAcc[i], col, label=i)
bx.plot(b, trackCost[i], col, label=i)
i += 1
plt.show()
# function to load the csv-data and construct the input array as return
# input array is a vector with one entry per possible unit id
# 94 entries 47 per combat party
def load(version = STANDARD_VERSION, file_version='single'):
match_arr = []
# load file(s) depending on desired input and version number
if file_version == 'multiple':
replay_log_files = []
replay_log_files = build_file_array('logs', version)
i = 0
#print('Looking over', len(replay_log_files), 'files')
while i < len(replay_log_files):
match_arr.append(read_csv(replay_log_files[i]))
i = i + 1
if file_version == 'single':
file_path = os.path.join(REPO_DIR, 'all_csv_from_version_' + version + '.csv')
match_arr = read_summed_up_csv(file_path, 250)
unit_vector_A = np.zeros(47)
unit_vector_B = np.zeros(47)
xs = []
ys = []
#print(match_arr[0], match_arr[3])
n=0
typeerror = 0
for match in match_arr:
# if str(match['winner_code']) == str(2):
# continue
try:
for id in match['team_A']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_A[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_A[1] += 1
if id == 104:
unit_vector_A[2] += 1
if id == 105:
unit_vector_A[3] += 1
if id == 106:
unit_vector_A[4] += 1
if id == 107:
unit_vector_A[5] += 1
if id == 108:
unit_vector_A[6] += 1
if id == 109:
unit_vector_A[7] += 1
if id == 110:
unit_vector_A[8] += 1
if id == 111:
unit_vector_A[9] += 1
if id == 112:
unit_vector_A[10] += 1
if id == 114:
unit_vector_A[11] += 1
if id == 126:
unit_vector_A[12] += 1
if id == 129:
unit_vector_A[13] += 1
if id == 289:
unit_vector_A[14] += 1
if id == 499:
unit_vector_A[15] += 1
if id == 4:
unit_vector_A[16] += 1
if id == 10:
unit_vector_A[17] += 1
if id == 73:
unit_vector_A[18] += 1
if id == 74:
unit_vector_A[19] += 1
if id == 75:
unit_vector_A[20] += 1
if id == 76:
unit_vector_A[21] += 1
if id == 77:
unit_vector_A[22] += 1
if id == 78:
unit_vector_A[23] += 1
if id == 79:
unit_vector_A[24] += 1
if id == 80:
unit_vector_A[25] += 1
if id == 82:
unit_vector_A[26] += 1
if id == 83:
unit_vector_A[27] += 1
if id == 84:
unit_vector_A[28] += 1
if id == 141:
unit_vector_A[29] += 1
if id == 311:
unit_vector_A[30] += 1
if id == 694:
unit_vector_A[31] += 1
if id == 32 or id == 33:
unit_vector_A[32] += 1
if id == 34 or id == 35:
unit_vector_A[33] += 1
if id == 45:
unit_vector_A[34] += 1
if id == 48:
unit_vector_A[35] += 1
if id == 49:
unit_vector_A[36] += 1
if id == 50:
unit_vector_A[37] += 1
if id == 51:
unit_vector_A[38] += 1
if id == 52:
unit_vector_A[39] += 1
if id == 53 or id == 484:
unit_vector_A[40] += 1
if id == 54:
unit_vector_A[41] += 1
if id == 55:
unit_vector_A[42] += 1
if id == 56:
unit_vector_A[43] += 1
if id == 57:
unit_vector_A[44] += 1
if id == 268:
unit_vector_A[45] += 1
if id == 692:
unit_vector_A[46] += 1
for id in match['team_B']:
id = int(id.replace("'", ""))
if id == 85:
continue
if id == 9:
unit_vector_B[0] += 1
if id == 12 or id == 13 or id == 15 or id == 17:
unit_vector_B[1] += 1
if id == 104:
unit_vector_B[2] += 1
if id == 105:
unit_vector_B[3] += 1
if id == 106:
unit_vector_B[4] += 1
if id == 107:
unit_vector_B[5] += 1
if id == 108:
unit_vector_B[6] += 1
if id == 109:
unit_vector_B[7] += 1
if id == 110:
unit_vector_B[8] += 1
if id == 111:
unit_vector_B[9] += 1
if id == 112:
unit_vector_B[10] += 1
if id == 114:
unit_vector_B[11] += 1
if id == 126:
unit_vector_B[12] += 1
if id == 129:
unit_vector_B[13] += 1
if id == 289:
unit_vector_B[14] += 1
if id == 499:
unit_vector_B[15] += 1
if id == 4:
unit_vector_B[16] += 1
if id == 10:
unit_vector_B[17] += 1
if id == 73:
unit_vector_B[18] += 1
if id == 74:
unit_vector_B[19] += 1
if id == 75:
unit_vector_B[20] += 1
if id == 76:
unit_vector_B[21] += 1
if id == 77:
unit_vector_B[22] += 1
if id == 78:
unit_vector_B[23] += 1
if id == 79:
unit_vector_B[24] += 1
if id == 80:
unit_vector_B[25] += 1
if id == 82:
unit_vector_B[26] += 1
if id == 83:
unit_vector_B[27] += 1
if id == 84:
unit_vector_B[28] += 1
if id == 141:
unit_vector_B[29] += 1
if id == 311:
unit_vector_B[30] += 1
if id == 694:
unit_vector_B[31] += 1
if id == 32 or id == 33:
unit_vector_B[32] += 1
if id == 34 or id == 35:
unit_vector_B[33] += 1
if id == 45:
unit_vector_B[34] += 1
if id == 48:
unit_vector_B[35] += 1
if id == 49:
unit_vector_B[36] += 1
if id == 50:
unit_vector_B[37] += 1
if id == 51:
unit_vector_B[38] += 1
if id == 52:
unit_vector_B[39] += 1
if id == 53 or id == 484:
unit_vector_B[40] += 1
if id == 54:
unit_vector_B[41] += 1
if id == 55:
unit_vector_B[42] += 1
if id == 56:
unit_vector_B[43] += 1
if id == 57:
unit_vector_B[44] += 1
if id == 268:
unit_vector_B[45] += 1
if id == 692:
unit_vector_B[46] += 1
unit_vector = np.append(unit_vector_A, unit_vector_B)
xs.append(unit_vector)
ys.append(int(match['winner_code']))
except TypeError:
print(id)
typeerror += 1
continue
except ZeroDivisionError:
continue
#print(typeerror)
#print(xs[0])
ys = keras.utils.to_categorical(ys, num_classes=3)
split = int(len(xs)*0.1)
# # Make train / test split
xs_train = xs[:-split]
ys_train = ys[:-split]
xs_test = xs[-split:]
ys_test = ys[-split:]
return xs_train, xs_test, ys_train, ys_test
if __name__ == "__main__":
main()
|
[
"tensorflow.nn.softmax",
"matplotlib.pyplot.figaspect",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.cast",
"numpy.arange",
"numpy.array",
"numpy.append",
"tensorflow.contrib.opt.NadamOptimizer",
"os.path.join",
"tensorflow.truncated_normal",
"keras.utils.to_categorical"
] |
[((1941, 1979), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 94]'], {}), '(tf.float32, [None, 94])\n', (1955, 1979), True, 'import tensorflow as tf\n'), ((1988, 2025), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (2002, 2025), True, 'import tensorflow as tf\n'), ((2258, 2275), 'tensorflow.matmul', 'tf.matmul', (['x', 'W_1'], {}), '(x, W_1)\n', (2267, 2275), True, 'import tensorflow as tf\n'), ((2285, 2303), 'tensorflow.matmul', 'tf.matmul', (['x_', 'W_2'], {}), '(x_, W_2)\n', (2294, 2303), True, 'import tensorflow as tf\n'), ((2317, 2335), 'tensorflow.matmul', 'tf.matmul', (['x_', 'W_3'], {}), '(x_, W_3)\n', (2326, 2335), True, 'import tensorflow as tf\n'), ((2347, 2368), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2360, 2368), True, 'import tensorflow as tf\n'), ((2782, 2815), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2813, 2815), True, 'import tensorflow as tf\n'), ((4088, 4121), 'numpy.arange', 'np.arange', (['(1)', '(training_epochs + 1)'], {}), '(1, training_epochs + 1)\n', (4097, 4121), True, 'import numpy as np\n'), ((4132, 4165), 'numpy.arange', 'np.arange', (['(1)', '(training_epochs + 1)'], {}), '(1, training_epochs + 1)\n', (4141, 4165), True, 'import numpy as np\n'), ((5055, 5065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5063, 5065), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5924), 'numpy.zeros', 'np.zeros', (['(47)'], {}), '(47)\n', (5920, 5924), True, 'import numpy as np\n'), ((5945, 5957), 'numpy.zeros', 'np.zeros', (['(47)'], {}), '(47)\n', (5953, 5957), True, 'import numpy as np\n'), ((15708, 15753), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['ys'], {'num_classes': '(3)'}), '(ys, num_classes=3)\n', (15734, 15753), False, 'import keras\n'), ((2086, 2115), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[94, 94]'], {}), '([94, 94])\n', (2105, 2115), True, 'import tensorflow as tf\n'), ((2139, 2168), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[94, 47]'], {}), '([94, 47])\n', (2158, 2168), True, 'import tensorflow as tf\n'), ((2192, 2220), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[47, 3]'], {}), '([47, 3])\n', (2211, 2220), True, 'import tensorflow as tf\n'), ((2456, 2523), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'y'}), '(logits=logits, labels=y)\n', (2498, 2523), True, 'import tensorflow as tf\n'), ((2654, 2672), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (2663, 2672), True, 'import tensorflow as tf\n'), ((2674, 2689), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2683, 2689), True, 'import tensorflow as tf\n'), ((2721, 2760), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2728, 2760), True, 'import tensorflow as tf\n'), ((2867, 2879), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2877, 2879), True, 'import tensorflow as tf\n'), ((3692, 3710), 'numpy.array', 'np.array', (['trackAcc'], {}), '(trackAcc)\n', (3700, 3710), True, 'import numpy as np\n'), ((5770, 5836), 'os.path.join', 'os.path.join', (['REPO_DIR', "('all_csv_from_version_' + version + '.csv')"], {}), "(REPO_DIR, 'all_csv_from_version_' + version + '.csv')\n", (5782, 5836), False, 'import os\n'), ((2542, 2599), 'tensorflow.contrib.opt.NadamOptimizer', 'tf.contrib.opt.NadamOptimizer', (['learning_rate', 'b1', 'b2', 'eps'], {}), '(learning_rate, b1, b2, eps)\n', (2571, 2599), True, 'import tensorflow as tf\n'), ((3882, 3900), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(4.0)'], {}), '(4.0)\n', (3895, 3900), True, 'import matplotlib.pyplot as plt\n'), ((15381, 15420), 'numpy.append', 'np.append', (['unit_vector_A', 'unit_vector_B'], {}), '(unit_vector_A, unit_vector_B)\n', (15390, 15420), True, 'import numpy as np\n')]
|
"""
The PIMA Indians dataset obtained from the UCI Machine Learning Repository
The goal is to predict whether or not a given female patient will contract diabetes
based on features such as BMI, age, and number of pregnancies
It is a binary classification problem
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import randint
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report,
confusion_matrix,
roc_curve,
roc_auc_score,
)
from sklearn.model_selection import (
train_test_split,
cross_val_score,
GridSearchCV,
RandomizedSearchCV,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
plt.style.use("ggplot")
_df = pd.read_csv("datasets/diabetes.csv")
df = _df.dropna()
X = df.drop("Outcome", axis=1).values
# X = X.reshape(-1, 8)
y = df.Outcome.values
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(X_test, y_test)
y_pred = knn.predict(X_test)
print("k-NN performance")
# must always be (test, prediction)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# the support columns gives the number of samples of the true response that lie in that class
#### logistic regression ####
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print("logistic regression performance")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# predict_proba returns an array with two columns: each column contains the probabilities for the respective target values.
# we choose the second column, the one with index 1,
# that is, the probabilities of the predicted labels being '1'
y_pred_prob = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
_ = plt.plot([0, 1], [0, 1], "k--")
_ = plt.plot(fpr, tpr)
_ = plt.xlabel("False Positive Rate")
_ = plt.ylabel("True Positive Rate")
_ = plt.title("ROC Curve")
plt.show()
print(f"AUC: {roc_auc_score(y_test, y_pred_prob)}")
cv_auc = cross_val_score(logreg, X, y, cv=5, scoring="roc_auc")
#### hyperparameter tuning ####
# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {"C": c_space} # hyperparameter to tune and values to test
logreg = LogisticRegression()
logreg_cv = GridSearchCV(
logreg, param_grid, cv=5
) # instantiate the GridSearchCV object
logreg_cv.fit(X, y) # fits in place
print(
f"""Tuned Logistic Regression Parameters: {logreg_cv.best_params_}
Best score is {logreg_cv.best_score_}"""
)
#### random tuning ####
tree = DecisionTreeClassifier()
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"],
}
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
tree_cv.fit(X, y)
print(
f"""Tuned Decision Tree Parameters: {tree_cv.best_params_}
Best score is {tree_cv.best_score_}"""
)
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"numpy.logspace",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.style.use",
"sklearn.model_selection.RandomizedSearchCV",
"matplotlib.pyplot.show",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"scipy.stats.randint",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel"
] |
[((771, 794), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (784, 794), True, 'import matplotlib.pyplot as plt\n'), ((802, 838), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/diabetes.csv"""'], {}), "('datasets/diabetes.csv')\n", (813, 838), True, 'import pandas as pd\n'), ((999, 1053), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': '(42)'}), '(X, y, test_size=0.4, random_state=42)\n', (1015, 1053), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((1067, 1102), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(6)'}), '(n_neighbors=6)\n', (1087, 1102), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1440, 1460), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1458, 1460), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1964, 1994), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred_prob'], {}), '(y_test, y_pred_prob)\n', (1973, 1994), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((2000, 2031), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (2008, 2031), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2054), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2044, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2092), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2069, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2107, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2156), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (2143, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2157, 2167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2285), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['logreg', 'X', 'y'], {'cv': '(5)', 'scoring': '"""roc_auc"""'}), "(logreg, X, y, cv=5, scoring='roc_auc')\n", (2246, 2285), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((2361, 2383), 'numpy.logspace', 'np.logspace', (['(-5)', '(8)', '(15)'], {}), '(-5, 8, 15)\n', (2372, 2383), True, 'import numpy as np\n'), ((2466, 2486), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2484, 2486), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2499, 2537), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['logreg', 'param_grid'], {'cv': '(5)'}), '(logreg, param_grid, cv=5)\n', (2511, 2537), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((2774, 2798), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2796, 2798), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2968, 3010), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['tree', 'param_dist'], {'cv': '(5)'}), '(tree, param_dist, cv=5)\n', (2986, 3010), False, 'from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RandomizedSearchCV\n'), ((1225, 1257), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1241, 1257), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1265, 1302), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1286, 1302), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1569, 1601), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1585, 1601), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((1609, 1646), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1630, 1646), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n'), ((2863, 2876), 'scipy.stats.randint', 'randint', (['(1)', '(9)'], {}), '(1, 9)\n', (2870, 2876), False, 'from scipy.stats import randint\n'), ((2902, 2915), 'scipy.stats.randint', 'randint', (['(1)', '(9)'], {}), '(1, 9)\n', (2909, 2915), False, 'from scipy.stats import randint\n'), ((2183, 2217), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_prob'], {}), '(y_test, y_pred_prob)\n', (2196, 2217), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score\n')]
|
import utils as util
import tensorflow as tf
import numpy as np
def forecast_model(series, time,forecastDays):
split_time=2555
time_train=time[:split_time]
x_train=series[:split_time]
split_time_test=3285
time_valid=time[split_time:split_time_test]
x_valid=series[split_time:split_time_test]
time_test=time[split_time_test:]
x_test=series[split_time_test:]
window_size=30
batch_size=32
shuffle_buffer_size=1000
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = util.windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
valid_set=util.windowed_dataset(x_valid,window_size,batch_size,shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,validation_data=(valid_set),epochs=5)
rnn_forecast = util.model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
mae=tf.keras.metrics.mean_absolute_error(x_test, rnn_forecast[:365]).numpy()
accuracy=100-mae
return (accuracy,mae,rnn_forecast[:forecastDays])
|
[
"tensorflow.random.set_seed",
"tensorflow.keras.metrics.mean_absolute_error",
"numpy.random.seed",
"utils.windowed_dataset",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.losses.Huber",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Lambda",
"utils.model_forecast"
] |
[((461, 493), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (491, 493), True, 'import tensorflow as tf\n'), ((498, 520), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(51)'], {}), '(51)\n', (516, 520), True, 'import tensorflow as tf\n'), ((525, 543), 'numpy.random.seed', 'np.random.seed', (['(51)'], {}), '(51)\n', (539, 543), True, 'import numpy as np\n'), ((560, 662), 'utils.windowed_dataset', 'util.windowed_dataset', (['x_train'], {'window_size': '(60)', 'batch_size': '(100)', 'shuffle_buffer': 'shuffle_buffer_size'}), '(x_train, window_size=60, batch_size=100,\n shuffle_buffer=shuffle_buffer_size)\n', (581, 662), True, 'import utils as util\n'), ((673, 749), 'utils.windowed_dataset', 'util.windowed_dataset', (['x_valid', 'window_size', 'batch_size', 'shuffle_buffer_size'], {}), '(x_valid, window_size, batch_size, shuffle_buffer_size)\n', (694, 749), True, 'import utils as util\n'), ((1292, 1339), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(1e-05)', 'momentum': '(0.9)'}), '(lr=1e-05, momentum=0.9)\n', (1315, 1339), True, 'import tensorflow as tf\n'), ((1549, 1613), 'utils.model_forecast', 'util.model_forecast', (['model', 'series[..., np.newaxis]', 'window_size'], {}), '(model, series[..., np.newaxis], window_size)\n', (1568, 1613), True, 'import utils as util\n'), ((792, 917), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': '(60)', 'kernel_size': '(5)', 'strides': '(1)', 'padding': '"""causal"""', 'activation': '"""relu"""', 'input_shape': '[None, 1]'}), "(filters=60, kernel_size=5, strides=1, padding=\n 'causal', activation='relu', input_shape=[None, 1])\n", (814, 917), True, 'import tensorflow as tf\n'), ((990, 1037), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(60)'], {'return_sequences': '(True)'}), '(60, return_sequences=True)\n', (1010, 1037), True, 'import tensorflow as tf\n'), ((1043, 1090), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['(60)'], {'return_sequences': '(True)'}), '(60, return_sequences=True)\n', (1063, 1090), True, 'import tensorflow as tf\n'), ((1096, 1140), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (1117, 1140), True, 'import tensorflow as tf\n'), ((1146, 1190), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (1167, 1190), True, 'import tensorflow as tf\n'), ((1196, 1220), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1217, 1220), True, 'import tensorflow as tf\n'), ((1226, 1267), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x * 400)'], {}), '(lambda x: x * 400)\n', (1248, 1267), True, 'import tensorflow as tf\n'), ((1362, 1385), 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', ([], {}), '()\n', (1383, 1385), True, 'import tensorflow as tf\n'), ((1690, 1754), 'tensorflow.keras.metrics.mean_absolute_error', 'tf.keras.metrics.mean_absolute_error', (['x_test', 'rnn_forecast[:365]'], {}), '(x_test, rnn_forecast[:365])\n', (1726, 1754), True, 'import tensorflow as tf\n')]
|
# encoding=utf-8
"""
Created on 21:29 2018/11/12
@author: <NAME>
"""
import numpy as np
import scipy.io
import scipy.linalg
import sklearn.metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)
return K
class TCA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, gamma=1):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param gamma: kernel bandwidth for rbf kernel
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.gamma = gamma
def fit(self, Xs, Xt):
'''
Transform Xs and Xt
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: Xs_new and Xt_new after TCA
'''
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
return Xs_new, Xt_new
def fit_predict(self, Xs, Ys, Xt, Yt):
'''
Transform Xs and Xt, then make predictions on target using 1NN
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, Xt_new = self.fit(Xs, Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt_new)
acc = sklearn.metrics.accuracy_score(Yt, y_pred)
return acc, y_pred
def fit_new(self, Xs, Xt, Xt2):
'''
Map Xt2 to the latent space created from Xt and Xs
:param Xs : ns * n_feature, source feature
:param Xt : nt * n_feature, target feature
:param Xt2: n_s, n_feature, target feature to be mapped
:return: Xt2_new, mapped Xt2 with projection created by Xs and Xt
'''
# Computing projection matrix A from Xs an Xt
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
# Compute kernel with Xt2 as target and X as source
Xt2 = Xt2.T
K = kernel(self.kernel_type, X1 = Xt2, X2 = X, gamma=self.gamma)
# New target features
Xt2_new = K @ A
return Xt2_new
def fit_predict_new(self, Xt, Xs, Ys, Xt2, Yt2):
'''
Transfrom Xt and Xs, get Xs_new
Transform Xt2 with projection matrix created by Xs and Xt, get Xt2_new
Make predictions on Xt2_new using classifier trained on Xs_new
:param Xt: ns * n_feature, target feature
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt2: nt * n_feature, new target feature
:param Yt2: nt * 1, new target label
:return: Accuracy and predicted_labels on the target domain
'''
Xs_new, _ = self.fit(Xs, Xt)
Xt2_new = self.fit_new(Xs, Xt, Xt2)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs_new, Ys.ravel())
y_pred = clf.predict(Xt2_new)
acc = sklearn.metrics.accuracy_score(Yt2, y_pred)
return acc, y_pred
if __name__ == '__main__':
domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat']
for i in [1]:
for j in [2]:
if i != j:
src, tar = 'data/' + domains[i], 'data/' + domains[j]
src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
Xs, Ys, Xt, Yt = src_domain['feas'], src_domain['labels'], tar_domain['feas'], tar_domain['labels']
# Split target data
Xt1, Xt2, Yt1, Yt2 = train_test_split(Xt, Yt, train_size=50, stratify=Yt, random_state=42)
# Create latent space and evaluate using Xs and Xt1
tca = TCA(kernel_type='linear', dim=30, lamb=1, gamma=1)
acc1, ypre1 = tca.fit_predict(Xs, Ys, Xt1, Yt1)
# Project and evaluate Xt2 existing projection matrix and classifier
acc2, ypre2 = tca.fit_predict_new(Xt1, Xs, Ys, Xt2, Yt2)
print(f'Accuracy of mapped source and target1 data : {acc1:.3f}') #0.800
print(f'Accuracy of mapped target2 data : {acc2:.3f}') #0.706
|
[
"numpy.eye",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.linalg.norm",
"numpy.dot",
"numpy.linalg.multi_dot"
] |
[((1536, 1559), 'numpy.hstack', 'np.hstack', (['(Xs.T, Xt.T)'], {}), '((Xs.T, Xt.T))\n', (1545, 1559), True, 'import numpy as np\n'), ((1573, 1598), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1587, 1598), True, 'import numpy as np\n'), ((2127, 2140), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (2137, 2140), True, 'import numpy as np\n'), ((2186, 2200), 'numpy.dot', 'np.dot', (['A.T', 'K'], {}), '(A.T, K)\n', (2192, 2200), True, 'import numpy as np\n'), ((2214, 2239), 'numpy.linalg.norm', 'np.linalg.norm', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (2228, 2239), True, 'import numpy as np\n'), ((2772, 2807), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (2792, 2807), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3399, 3422), 'numpy.hstack', 'np.hstack', (['(Xs.T, Xt.T)'], {}), '((Xs.T, Xt.T))\n', (3408, 3422), True, 'import numpy as np\n'), ((3436, 3461), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (3450, 3461), True, 'import numpy as np\n'), ((3990, 4003), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (4000, 4003), True, 'import numpy as np\n'), ((4971, 5006), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (4991, 5006), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1771, 1795), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '"""fro"""'], {}), "(M, 'fro')\n", (1785, 1795), True, 'import numpy as np\n'), ((1808, 1817), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (1814, 1817), True, 'import numpy as np\n'), ((2042, 2074), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, H, K.T]'], {}), '([K, H, K.T])\n', (2061, 2074), True, 'import numpy as np\n'), ((3634, 3658), 'numpy.linalg.norm', 'np.linalg.norm', (['M', '"""fro"""'], {}), "(M, 'fro')\n", (3648, 3658), True, 'import numpy as np\n'), ((3671, 3680), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (3677, 3680), True, 'import numpy as np\n'), ((3905, 3937), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, H, K.T]'], {}), '([K, H, K.T])\n', (3924, 3937), True, 'import numpy as np\n'), ((1828, 1843), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (1835, 1843), True, 'import numpy as np\n'), ((1980, 2012), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, M, K.T]'], {}), '([K, M, K.T])\n', (1999, 2012), True, 'import numpy as np\n'), ((3691, 3706), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (3698, 3706), True, 'import numpy as np\n'), ((3843, 3875), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[K, M, K.T]'], {}), '([K, M, K.T])\n', (3862, 3875), True, 'import numpy as np\n'), ((5709, 5778), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xt', 'Yt'], {'train_size': '(50)', 'stratify': 'Yt', 'random_state': '(42)'}), '(Xt, Yt, train_size=50, stratify=Yt, random_state=42)\n', (5725, 5778), False, 'from sklearn.model_selection import train_test_split\n'), ((1688, 1704), 'numpy.ones', 'np.ones', (['(ns, 1)'], {}), '((ns, 1))\n', (1695, 1704), True, 'import numpy as np\n'), ((1716, 1732), 'numpy.ones', 'np.ones', (['(nt, 1)'], {}), '((nt, 1))\n', (1723, 1732), True, 'import numpy as np\n'), ((2027, 2040), 'numpy.eye', 'np.eye', (['n_eye'], {}), '(n_eye)\n', (2033, 2040), True, 'import numpy as np\n'), ((3551, 3567), 'numpy.ones', 'np.ones', (['(ns, 1)'], {}), '((ns, 1))\n', (3558, 3567), True, 'import numpy as np\n'), ((3579, 3595), 'numpy.ones', 'np.ones', (['(nt, 1)'], {}), '((nt, 1))\n', (3586, 3595), True, 'import numpy as np\n'), ((3890, 3903), 'numpy.eye', 'np.eye', (['n_eye'], {}), '(n_eye)\n', (3896, 3903), True, 'import numpy as np\n'), ((464, 478), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (474, 478), True, 'import numpy as np\n'), ((482, 496), 'numpy.asarray', 'np.asarray', (['X2'], {}), '(X2)\n', (492, 496), True, 'import numpy as np\n'), ((569, 583), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (579, 583), True, 'import numpy as np\n'), ((689, 703), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (699, 703), True, 'import numpy as np\n'), ((707, 721), 'numpy.asarray', 'np.asarray', (['X2'], {}), '(X2)\n', (717, 721), True, 'import numpy as np\n'), ((798, 812), 'numpy.asarray', 'np.asarray', (['X1'], {}), '(X1)\n', (808, 812), True, 'import numpy as np\n')]
|
# coding: utf-8
import numpy as np
import math
from block_average import block_average
def main():
# Enter details here
n_samples = [int(2.5e5)]
# n_samples = [int(5e5),int(1e6),int(2e6),int(4e6)]
for n_sample in n_samples:
# Generate uncorrelated random samples
uncorrelated_samples = np.random.normal(size=n_sample)
average = np.mean(uncorrelated_samples)
variance = np.var(uncorrelated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(uncorrelated_samples)
# Write output
outfile = "uncorr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
# Generate correlated random samples with MC walk
moves = np.random.normal(0.0, 0.05, size=5 * n_sample)
series = []
pos = 0.0
ener = energy(pos)
for i in range(n_sample):
series.append(pos)
trial_pos = pos + moves[i]
trial_ener = energy(trial_pos)
if trial_ener < ener:
pos = trial_pos
ener = trial_ener
else:
rand = np.random.uniform()
if math.exp(-(trial_ener - ener)) > rand:
pos = trial_pos
ener = trial_ener
correlated_samples = np.asarray(series)
# np.savetxt('correlated-samples.txt',correlated_samples)
average = np.mean(correlated_samples)
variance = np.var(correlated_samples)
# Calculate block averages and variances
means_est, vars_est, vars_err = block_average(correlated_samples)
# Write output
outfile = "corr_n{}_blkavg.out".format(n_sample)
with open(outfile, "w") as f:
f.write(
"# Average: {:16.4f}, Variance: {:16.4f}\n".format(
average, variance
)
)
f.write("# N_blk_ops, Mean_est, Var_est, var_err\n")
for n_blk_ops, (mean_est, var_est, var_err) in enumerate(
zip(means_est, vars_est, vars_err)
):
f.write(
"{:10d}{:18.6f}{:16.4e}{:16.4e}\n".format(
n_blk_ops, mean_est, var_est, var_err
)
)
def energy(x):
return x ** 2
if __name__ == "__main__":
main()
|
[
"numpy.random.uniform",
"math.exp",
"numpy.asarray",
"block_average.block_average",
"numpy.mean",
"numpy.random.normal",
"numpy.var"
] |
[((324, 355), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_sample'}), '(size=n_sample)\n', (340, 355), True, 'import numpy as np\n'), ((375, 404), 'numpy.mean', 'np.mean', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (382, 404), True, 'import numpy as np\n'), ((424, 452), 'numpy.var', 'np.var', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (430, 452), True, 'import numpy as np\n'), ((543, 578), 'block_average.block_average', 'block_average', (['uncorrelated_samples'], {}), '(uncorrelated_samples)\n', (556, 578), False, 'from block_average import block_average\n'), ((1325, 1371), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.05)'], {'size': '(5 * n_sample)'}), '(0.0, 0.05, size=5 * n_sample)\n', (1341, 1371), True, 'import numpy as np\n'), ((1908, 1926), 'numpy.asarray', 'np.asarray', (['series'], {}), '(series)\n', (1918, 1926), True, 'import numpy as np\n'), ((2012, 2039), 'numpy.mean', 'np.mean', (['correlated_samples'], {}), '(correlated_samples)\n', (2019, 2039), True, 'import numpy as np\n'), ((2059, 2085), 'numpy.var', 'np.var', (['correlated_samples'], {}), '(correlated_samples)\n', (2065, 2085), True, 'import numpy as np\n'), ((2176, 2209), 'block_average.block_average', 'block_average', (['correlated_samples'], {}), '(correlated_samples)\n', (2189, 2209), False, 'from block_average import block_average\n'), ((1726, 1745), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1743, 1745), True, 'import numpy as np\n'), ((1765, 1795), 'math.exp', 'math.exp', (['(-(trial_ener - ener))'], {}), '(-(trial_ener - ener))\n', (1773, 1795), False, 'import math\n')]
|
"""
Code for collecting failure trajectories using Bayesian Optimization
Project : Policy correction using Bayesian Optimization
Description : The file contains functions for computing failure trajectories given RL policy and
safety specifications
"""
import numpy as np
import gym
import GPyOpt
from numpy.random import seed
from eval_policy import display
import gym
from network import FeedForwardActorNN
import torch
import pickle
from numpy import arange
from numpy.random import rand
'''
Bayesian Optimization module for uncovering failure trajectories
Safety Requirement
# Requirement 1: The walker should not fall down in any trajectory
'''
#=============================================Global Variables =================================#
policy = None
env = None
traj_spec_dic = {}
traj_count = 0
index_count = 0
'''
The function called from within the bayesian optimization module
parameters : bounds containing the sampled variables of the state vector
return : calls specification function and computes and returns the minimum value
'''
def sample_trajectory(sample_1,sample_2,sample_3):
global policy, env, traj_spec_dic,traj_count, index_count
selected_seed = env.seed(None)
x1 = sample_1
x2 = sample_2
x3 = sample_3
env.reset()
env.env.state[0] = x1
env.env.state[2] = x2
env.env.state[3] = x3
obs = torch.Tensor(env.env.state)
#print(f'env.env.state =========== {env.env.state}')
iters= 0
ep_ret = 0
ep_ret, traj, iter = display(obs,policy,env,False)
additional_data = {'reward':ep_ret}
#Create trajectory to be sent to safety specification
traj = (traj, additional_data)
#print(f'trajectory ========== {traj}')
specification_evaluation = safet_spec_2(traj)
index_count = index_count+1
#Store the set of trajectories with negative evaluation
if specification_evaluation<0:
traj_spec_dic[traj_count] = (traj[0],specification_evaluation,selected_seed,(x1,x2,x3))
traj_count = traj_count + 1
print(f'specification_evaluation ========== {specification_evaluation}')
return specification_evaluation
def run_Random():
x1_max = 2*np.pi
x1_min = 0
x2_max = 1
x2_min = -1
x3_max = 1
x3_min = -1
# generate a random sample from the domain
sample_1 = x1_min + rand(1000) * (x1_max - x1_min)
sample_2 = x2_min + rand(1000) * (x2_max - x2_min)
sample_3 = x3_min + rand(1000) * (x3_max - x3_min)
print(f'sample length ========== {len(sample_1)}')
for i in range(len(sample_1)):
val = sample_trajectory(sample_1[i],sample_2[i],sample_3[i])
print(f'sample1 =========== {sample_1[i]} ======== sample2 ==== {sample_2[i]} ==== sample3 ===== {sample_3[i]}')
'''sample = list()
step = 0.7
for sample_1 in arange(x1_min, x1_max+step, step):
for sample_2 in arange(x2_min, x2_max+step, step):
for sample_3 in arange(x3_min, x3_max+step, step):
sample.append([sample_1,sample_2,sample_3])
print(f'sample length ========== {len(sample)}')
for i in range(len(sample)):
val = sample_trajectory(sample[i][0],sample[i][1],sample[i][2])
print(f'sample1 =========== {sample[i][0]} ======== sample2 ==== {sample[i][1]} ==== sample3 ===== {sample[i][2]}')'''
# 1. Find the initial condition such that the pendulum stabilizes to 0
def safet_spec_1(traj, gamma=0.25):
traj = traj[0]
cos_thetas = np.array(traj).T[0]
theta_dots = np.array(traj).T[2]
stab_vals = 0
for ct, td in zip(cos_thetas, theta_dots):
stab_vals = np.abs(np.arccos(ct))**2 + np.abs(td)**2 + stab_vals*gamma
return -stab_vals
# 1. Find the initial condition such that the reward is less than 50
def safet_spec_2(traj):
traj = traj[1]
reward = traj['reward']
#print(f'reward ========== {reward}')
return -(50-reward)
if __name__ == '__main__':
env = gym.make('BipedalWalker-v3')
seed = 0
env.seed(seed)
actor_model = 'Policies/ppo_actor_updatedBipedalWalker-v3.pth'
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim, False)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
run_Random()
print(f'Length trajectory ========== {len(traj_spec_dic)}')
with open('failure_trajectory_bipedal.data', 'wb') as filehandle1:
# store the observation data as binary data stream
pickle.dump(traj_spec_dic, filehandle1)
|
[
"pickle.dump",
"numpy.abs",
"gym.make",
"network.FeedForwardActorNN",
"torch.load",
"eval_policy.display",
"torch.Tensor",
"numpy.array",
"numpy.random.rand",
"numpy.arccos"
] |
[((1450, 1477), 'torch.Tensor', 'torch.Tensor', (['env.env.state'], {}), '(env.env.state)\n', (1462, 1477), False, 'import torch\n'), ((1592, 1624), 'eval_policy.display', 'display', (['obs', 'policy', 'env', '(False)'], {}), '(obs, policy, env, False)\n', (1599, 1624), False, 'from eval_policy import display\n'), ((4072, 4100), 'gym.make', 'gym.make', (['"""BipedalWalker-v3"""'], {}), "('BipedalWalker-v3')\n", (4080, 4100), False, 'import gym\n'), ((4438, 4481), 'network.FeedForwardActorNN', 'FeedForwardActorNN', (['obs_dim', 'act_dim', '(False)'], {}), '(obs_dim, act_dim, False)\n', (4456, 4481), False, 'from network import FeedForwardActorNN\n'), ((4570, 4593), 'torch.load', 'torch.load', (['actor_model'], {}), '(actor_model)\n', (4580, 4593), False, 'import torch\n'), ((4819, 4858), 'pickle.dump', 'pickle.dump', (['traj_spec_dic', 'filehandle1'], {}), '(traj_spec_dic, filehandle1)\n', (4830, 4858), False, 'import pickle\n'), ((2433, 2443), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2437, 2443), False, 'from numpy.random import rand\n'), ((2489, 2499), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2493, 2499), False, 'from numpy.random import rand\n'), ((2545, 2555), 'numpy.random.rand', 'rand', (['(1000)'], {}), '(1000)\n', (2549, 2555), False, 'from numpy.random import rand\n'), ((3583, 3597), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (3591, 3597), True, 'import numpy as np\n'), ((3621, 3635), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (3629, 3635), True, 'import numpy as np\n'), ((3756, 3766), 'numpy.abs', 'np.abs', (['td'], {}), '(td)\n', (3762, 3766), True, 'import numpy as np\n'), ((3736, 3749), 'numpy.arccos', 'np.arccos', (['ct'], {}), '(ct)\n', (3745, 3749), True, 'import numpy as np\n')]
|
import os
import numpy as np
import time
from multiprocessing import Pool
import psutil
import cv2
import matplotlib.pyplot as plt
import av #for better performance
##############################################################################
#For EPM, please select pionts from the OPEN arm to the CLOSE arm and press y:
# o1
# c3 c4
# o2
#For OFT, please select pionts clockwise from upper left corner and press y:
# UL1 UR2
#
# LL4 LR3
#Press y to confirm remove background.
#For EPM please select the central neutral zone(four points, like OFT) and press y to confirm.
##############################################################################
######################
####Set Parameters####
######################
home = 'yourFolder'
src = home + '/Video'
tgt = home + '/Picture'
rmbg_tgt = home + '/Picture_rmbg'
logDir = home + '/log'
isEPM = True # whether EPM or OFT
startT = 60 # start at 30s
cropLen = 600 # crop only 600s(10min)
imgSize = 500 #resize Image
if isEPM:
margin = 0.1 #for EPM, keep a margin of 10% image size
else:
margin = 0.2 #for OFT, keep a margin of 20% image size
useEllipse = False #whether used ellipise to fit mouse, otherwise use
refLenth = 100 # the arm lenth of EPM or size of OFT
centerCutOff = 0.5 # define the center zone, for OFT only!
multiThread = psutil.cpu_count(False)
video2img = True
img2binary = True
useAverFrame = True
cache = home + '/Cache'
tracking = True
preview = False
windowSize = 5 #window size for speed
Filter = 'aver' #a function to filter the positon, currently provide 'aver' 'median' 'none'
######################
##Function and Class##
######################
def padding(img): #padding img in case rotate to the outside
h, w = img.shape[:2]
img_padded = np.zeros(shape=(w+h, w+h), dtype=np.uint8)
img_padded[w//2:w//2+h,h//2:h//2+w] = img
return img_padded
x = 0
vector = []
def mouse_img_cod(event, cod_x, cod_y, flags, param):
global vector
global x
if event == cv2.EVENT_LBUTTONDOWN:
if x == 0 :
x += 1
vector.append([cod_x,cod_y])
else:
x = 0
vector.append([cod_x,cod_y])
class ImageCorrection():
def __init__(self,refPoints,expand,half_size,EPM,crop=0.7):
self.refPoints = refPoints
self.center = half_size
self.EPM = EPM
self.crop = int(crop*self.center)
if EPM:
self.target = np.float32([[expand,self.center], [2*self.center-expand, self.center], [self.center, expand], [self.center, 2*self.center-expand]])
else:
self.target = np.float32([[expand,expand], [2*self.center-expand, expand], [2*self.center-expand, 2*self.center-expand], [expand, 2*self.center-expand]])
self.M = cv2.getPerspectiveTransform(self.refPoints , self.target)
def __call__(self,img):
img = cv2.warpPerspective(img,self.M,(2*self.center,2*self.center))
if self.EPM:
img[0:self.crop,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,2*self.center-self.crop:2*self.center] = 255
img[0:self.crop,2*self.center-self.crop:2*self.center] = 255
return img
class ExtractAndWarp():
def __init__(self,tgt,cache,startT,cropLen,expand=25,half_size=250,EPM = False,preview=False):
self.tgt = tgt
self.cache = cache
self.startT =startT
self.cropLen = cropLen
self.expand =expand
self.half_size =half_size
self.EPM =EPM
self.preview =preview
def __call__(self,direction):
fileAddr,vector = direction
folder = os.path.join(self.tgt,fileAddr.split('.')[0].split('/')[-1])
cache = os.path.join(self.cache,fileAddr.split('.')[0].split('/')[-1])+'.npy'
try:
os.mkdir(folder)
except:
pass
warper = ImageCorrection(vector,self.expand,self.half_size,self.EPM)
cap = cv2.VideoCapture(fileAddr)
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = int( self.startT * fps) #in seconds
#Record only 30min
length = int(min((self.startT+self.cropLen) * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)))
cap.release()
container = av.open(fileAddr)
for i,frame in enumerate(container.decode(video=0)):
if i < np.ceil(fps*10):
img = frame.to_ndarray(format='rgb24')
img = warper(img)
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)/ np.ceil(fps*10)
try:
avgImg += img
except:
avgImg = img
if i >= startAt:
img = frame.to_ndarray(format='rgb24')
img = warper(img)
if self.preview:
cv2.imshow("Image",img)
k = cv2.waitKey(10)
if k ==27: # 键盘上Esc键的键值
cv2.destroyAllWindows()
break
else:
cv2.imwrite(os.path.join(folder,str(i-startAt+1)+'.jpg'), img,[cv2.IMWRITE_JPEG_QUALITY, 100])
if i >= length:
break
np.save(cache,avgImg)
container.close()
return True
class frameAverage():
def __init__(self,imgArray,dirs,nThread):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img / (maxIndex-index)
try:
avgImg += img
except:
avgImg = img
return avgImg
class rmBackground():
def __init__(self,imgArray,dirs,src,tgt,background,nThread,threshold=25):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
self.background = background
self.tgt = tgt
self.src = src
self.threshold =threshold
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.src,self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img - self.background
img[np.where(img<self.threshold)] = 0
img = img.astype(np.uint8)
img = cv2.medianBlur(img,5)
img = 255-cv2.equalizeHist(img)
img = cv2.medianBlur(img,5)
cv2.imwrite(os.path.join(self.tgt,self.dirs,path), img)
return True
class logger(object):
def __init__(self,logDir):
self.logDir = logDir
def __call__(self,x,fileName):
print(x)
f = open(os.path.join(self.logDir,fileName+'.log'),'a')
f.write(str(x)+'\n')
f.close()
def trackingEPM(img,ori = None,kernal=5,thres = 150,preview=False): #kernel has to be odd
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50) #use otsu autothreshold method
ret,result_binary=cv2.threshold(result_gray,thres,255,0)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(255-result_binary, 4)
largest = np.argmax(stats[:,4])
stats[largest,4] = -1
largest = np.argmax(stats[:,4])
left = stats[largest,0]
top = stats[largest,1]
right = stats[largest,0]+stats[largest,2]
down = stats[largest,1]+stats[largest,3]
center = centroids[largest]
if preview:
fit = cv2.rectangle(ori, (left, top), (right, down), (255, 25, 25), 1)
fit = cv2.circle(fit, np.int32(center),3, (25, 25, 255), 1)
cv2.imshow("Image",fit)
k = cv2.waitKey(2)
if k == 32:
cv2.waitKey(0)
return (left,right,top,down,center)
def trackingOFT(img,ori = None,kernal=11,thres = 100,preview=False):
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50)
ret,result_binary=cv2.threshold(result_gray,thres,255,0) #use otsu autothreshold method
edge = cv2.Canny(result_binary,10,245)
y,x=np.nonzero(edge) #change coordination
edge_list = np.array([[_x,_y] for _x,_y in zip(x,y)]) #list edge-points
try:
ellipse = cv2.fitEllipse(edge_list) # fit ellipse and return (x,y) as center,(2a,2b) as radius and angle
except:
ellipse = [(0,0),(0,0),1000]
if preview:
fit=cv2.ellipse(ori, ellipse, (255,25,25),1)
cv2.imshow("Image",fit)
cv2.waitKey(10)
return ellipse
def Identity(x):
return x[-1]
class Speedometer():
def __init__(self,windowSize=5,Filter = 'aver'):
self.container = []
self.windowSize = windowSize
self.filter = Filter
assert(self.filter in ['aver','median','none'])
self.speed = []
def update(self,x):
self.container.append(x)
if len(self.container) == self.windowSize+2:
if self.filter == 'aver':
pastCord = np.mean(self.container[0:windowSize],axis=0)
curentCord = np.mean(self.container[2:],axis=0)
elif self.filter == 'median':
pastCord = np.median(self.container[0:windowSize],axis=0)
curentCord = np.median(self.container[2:],axis=0)
elif self.filter == 'none':
pastCord = self.container[windowSize//2+1]
curentCord = self.container[windowSize//2+3]
else:
pass
speed = ((pastCord[0]-curentCord[0])**2+(pastCord[1]-curentCord[1])**2)**0.5
self.speed.append(speed)
del(self.container[0])
return speed
else:
return 0
def aver(self):
x = np.mean(self.speed)
if np.isnan(x):
return 0
else:
return x
######################
####Prepare images####
######################
if video2img:
if os.path.isdir(src):
try:
os.mkdir(tgt)
except:
pass
try:
os.mkdir(logDir)
except:
pass
try:
os.mkdir(cache)
except:
pass
else:
raise ValueError('No video folder detected!')
vList = os.listdir(src)
direction=[]
for v in vList:
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = startT * fps
midFrame = int(min(cropLen * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)-startAt)) // 2
cap.set(cv2.CAP_PROP_POS_FRAMES,startAt+midFrame)
_,img = cap.read()
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#img = padding(img)
cv2.imshow("Image",img)
cv2.setMouseCallback("Image", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
cap.release()
direction.append((os.path.join(src,v),np.float32(vector)))
print((os.path.join(src,v),vector))
vector = []
print(len(direction))
extractor = ExtractAndWarp(tgt,cache,startT,cropLen,expand=int(margin*imgSize*0.5),half_size=imgSize//2,EPM=isEPM,preview=False)
for d in direction:
extractor(d)
if img2binary:
try:
os.mkdir(rmbg_tgt)
except:
pass
dirList = os.listdir(tgt)
for dirs in dirList:
try:
os.mkdir(os.path.join(rmbg_tgt,dirs))
except:
pass
frameList = os.listdir(os.path.join(tgt,dirs))
if useAverFrame:
aver = frameAverage(frameList,os.path.join(tgt,dirs),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
else:
averaged = np.load(os.path.join(cache,dirs)+'.npy')
_averaged = averaged.astype(np.uint8)
print(dirs)
cv2.imshow('img',_averaged)
k = cv2.waitKey(0)
if k == 121: #121 is y
cv2.destroyAllWindows()
rmer = rmBackground(frameList,dirs,tgt,rmbg_tgt,averaged,multiThread)
with Pool(multiThread) as p:
p.map(rmer,range(0,len(frameList),rmer.windowSize))
printer = logger(logDir)
if tracking:
print('Tracking! Ready? Go!')
if isEPM:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
vector = []
frameList = os.listdir(os.path.join(tgt,v))
aver = frameAverage(frameList,os.path.join(tgt,v),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
_averaged = averaged.astype(np.uint8)
cv2.imshow('img',_averaged)
cv2.setMouseCallback("img", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
printer('NeutralZone is:',v)
printer(vector,v)
printer('Time\tFrame\tleft\tright\ttop\tdown\tcenter_x\tcenter_y\tisOpen_center\tisOpen_any\tOpenTimeRatio_center\tOpenTimeRatio_any\tCurrentSpeed\tAverageSpeed',v)
neutralL = np.min(np.array(vector)[:,0])
neutralR = np.max(np.array(vector)[:,0])
neutralT = np.min(np.array(vector)[:,1])
neutralD = np.max(np.array(vector)[:,1])
ioc = 0
ioa = 1
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
left,right,top,down,(center_x,center_y) = trackingEPM(img,ori,preview=preview)
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
if center_x <= neutralL or center_x >= neutralR:
isOpen_center = 1
ioc += 1
else:
isOpen_center = 0
if left <= neutralL or right >= neutralR:
isOpen_any = 1
ioa += 1
else:
isOpen_any = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:.0f}\t{:.0f}\t{:.5f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,left,right,top,down,center_x,center_y,isOpen_center,isOpen_any,ioc/(i+1),ioa/(i+1),speed,averSpeed),v)
else:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
printer('Time\tFrame\tcenter_x\tcenter_y\ta\tb\tangle\tcenter_distance\tisCenter\tCenterTimeRatio_center\tCurrentSpeed\tAverageSpeed',v)
ic = 0
frameList = os.listdir(os.path.join(tgt,v))
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
if useEllipse:
(center_x,center_y),(a,b),angle = trackingOFT(img,ori,preview=preview)
else:
left,right,top,down,(center_x,center_y)= trackingEPM(img,ori,preview=preview)
a = right-left
b = down-top
angle = 0
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
dis_x = abs(center_x-imgSize//2)
dis_y = abs(center_y-imgSize//2)
distance = ((dis_x**2+dis_y**2)**0.5)*refLenth/(imgSize*(1-margin))
if max(dis_x,dis_y) < imgSize*0.5*(1-margin)*centerCutOff:
isCenter = 1
ic += 1
else:
isCenter = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:.0f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,center_x,center_y,a,b,angle,distance,isCenter,ic/(i+1),speed,averSpeed),v)
|
[
"os.mkdir",
"cv2.medianBlur",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"numpy.isnan",
"cv2.ellipse",
"numpy.mean",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"psutil.cpu_count",
"cv2.warpPerspective",
"cv2.cvtColor",
"cv2.fitEllipse",
"cv2.setMouseCallback",
"numpy.int32",
"cv2.destroyAllWindows",
"cv2.equalizeHist",
"cv2.Canny",
"numpy.save",
"numpy.ceil",
"cv2.waitKey",
"numpy.median",
"cv2.connectedComponentsWithStats",
"multiprocessing.Pool",
"os.listdir",
"os.path.isdir",
"cv2.threshold",
"numpy.float32",
"numpy.zeros",
"time.time",
"numpy.nonzero",
"cv2.VideoCapture",
"numpy.where",
"numpy.array",
"av.open"
] |
[((1349, 1372), 'psutil.cpu_count', 'psutil.cpu_count', (['(False)'], {}), '(False)\n', (1365, 1372), False, 'import psutil\n'), ((1793, 1839), 'numpy.zeros', 'np.zeros', ([], {'shape': '(w + h, w + h)', 'dtype': 'np.uint8'}), '(shape=(w + h, w + h), dtype=np.uint8)\n', (1801, 1839), True, 'import numpy as np\n'), ((7250, 7277), 'cv2.medianBlur', 'cv2.medianBlur', (['img', 'kernal'], {}), '(img, kernal)\n', (7264, 7277), False, 'import cv2\n'), ((7438, 7479), 'cv2.threshold', 'cv2.threshold', (['result_gray', 'thres', '(255)', '(0)'], {}), '(result_gray, thres, 255, 0)\n', (7451, 7479), False, 'import cv2\n'), ((7514, 7570), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['(255 - result_binary)', '(4)'], {}), '(255 - result_binary, 4)\n', (7546, 7570), False, 'import cv2\n'), ((7583, 7605), 'numpy.argmax', 'np.argmax', (['stats[:, 4]'], {}), '(stats[:, 4])\n', (7592, 7605), True, 'import numpy as np\n'), ((7645, 7667), 'numpy.argmax', 'np.argmax', (['stats[:, 4]'], {}), '(stats[:, 4])\n', (7654, 7667), True, 'import numpy as np\n'), ((8241, 8268), 'cv2.medianBlur', 'cv2.medianBlur', (['img', 'kernal'], {}), '(img, kernal)\n', (8255, 8268), False, 'import cv2\n'), ((8398, 8439), 'cv2.threshold', 'cv2.threshold', (['result_gray', 'thres', '(255)', '(0)'], {}), '(result_gray, thres, 255, 0)\n', (8411, 8439), False, 'import cv2\n'), ((8479, 8512), 'cv2.Canny', 'cv2.Canny', (['result_binary', '(10)', '(245)'], {}), '(result_binary, 10, 245)\n', (8488, 8512), False, 'import cv2\n'), ((8519, 8535), 'numpy.nonzero', 'np.nonzero', (['edge'], {}), '(edge)\n', (8529, 8535), True, 'import numpy as np\n'), ((10345, 10363), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (10358, 10363), False, 'import os\n'), ((10662, 10677), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (10672, 10677), False, 'import os\n'), ((11736, 11751), 'os.listdir', 'os.listdir', (['tgt'], {}), '(tgt)\n', (11746, 11751), False, 'import os\n'), ((2793, 2849), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['self.refPoints', 'self.target'], {}), '(self.refPoints, self.target)\n', (2820, 2849), False, 'import cv2\n'), ((2893, 2961), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'self.M', '(2 * self.center, 2 * self.center)'], {}), '(img, self.M, (2 * self.center, 2 * self.center))\n', (2912, 2961), False, 'import cv2\n'), ((4034, 4060), 'cv2.VideoCapture', 'cv2.VideoCapture', (['fileAddr'], {}), '(fileAddr)\n', (4050, 4060), False, 'import cv2\n'), ((4318, 4335), 'av.open', 'av.open', (['fileAddr'], {}), '(fileAddr)\n', (4325, 4335), False, 'import av\n'), ((5269, 5291), 'numpy.save', 'np.save', (['cache', 'avgImg'], {}), '(cache, avgImg)\n', (5276, 5291), True, 'import numpy as np\n'), ((7875, 7939), 'cv2.rectangle', 'cv2.rectangle', (['ori', '(left, top)', '(right, down)', '(255, 25, 25)', '(1)'], {}), '(ori, (left, top), (right, down), (255, 25, 25), 1)\n', (7888, 7939), False, 'import cv2\n'), ((8017, 8041), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'fit'], {}), "('Image', fit)\n", (8027, 8041), False, 'import cv2\n'), ((8053, 8067), 'cv2.waitKey', 'cv2.waitKey', (['(2)'], {}), '(2)\n', (8064, 8067), False, 'import cv2\n'), ((8662, 8687), 'cv2.fitEllipse', 'cv2.fitEllipse', (['edge_list'], {}), '(edge_list)\n', (8676, 8687), False, 'import cv2\n'), ((8836, 8879), 'cv2.ellipse', 'cv2.ellipse', (['ori', 'ellipse', '(255, 25, 25)', '(1)'], {}), '(ori, ellipse, (255, 25, 25), 1)\n', (8847, 8879), False, 'import cv2\n'), ((8885, 8909), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'fit'], {}), "('Image', fit)\n", (8895, 8909), False, 'import cv2\n'), ((8917, 8932), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (8928, 8932), False, 'import cv2\n'), ((10154, 10173), 'numpy.mean', 'np.mean', (['self.speed'], {}), '(self.speed)\n', (10161, 10173), True, 'import numpy as np\n'), ((10185, 10196), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (10193, 10196), True, 'import numpy as np\n'), ((11027, 11064), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (11039, 11064), False, 'import cv2\n'), ((11100, 11124), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (11110, 11124), False, 'import cv2\n'), ((11133, 11177), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Image"""', 'mouse_img_cod'], {}), "('Image', mouse_img_cod)\n", (11153, 11177), False, 'import cv2\n'), ((11191, 11205), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (11202, 11205), False, 'import cv2\n'), ((11678, 11696), 'os.mkdir', 'os.mkdir', (['rmbg_tgt'], {}), '(rmbg_tgt)\n', (11686, 11696), False, 'import os\n'), ((12363, 12391), 'cv2.imshow', 'cv2.imshow', (['"""img"""', '_averaged'], {}), "('img', _averaged)\n", (12373, 12391), False, 'import cv2\n'), ((12403, 12417), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (12414, 12417), False, 'import cv2\n'), ((12778, 12793), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (12788, 12793), False, 'import os\n'), ((15447, 15462), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (15457, 15462), False, 'import os\n'), ((2464, 2608), 'numpy.float32', 'np.float32', (['[[expand, self.center], [2 * self.center - expand, self.center], [self.\n center, expand], [self.center, 2 * self.center - expand]]'], {}), '([[expand, self.center], [2 * self.center - expand, self.center],\n [self.center, expand], [self.center, 2 * self.center - expand]])\n', (2474, 2608), True, 'import numpy as np\n'), ((2636, 2801), 'numpy.float32', 'np.float32', (['[[expand, expand], [2 * self.center - expand, expand], [2 * self.center -\n expand, 2 * self.center - expand], [expand, 2 * self.center - expand]]'], {}), '([[expand, expand], [2 * self.center - expand, expand], [2 * self\n .center - expand, 2 * self.center - expand], [expand, 2 * self.center -\n expand]])\n', (2646, 2801), True, 'import numpy as np\n'), ((3893, 3909), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (3901, 3909), False, 'import os\n'), ((6703, 6725), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (6717, 6725), False, 'import cv2\n'), ((6787, 6809), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (6801, 6809), False, 'import cv2\n'), ((7049, 7093), 'os.path.join', 'os.path.join', (['self.logDir', "(fileName + '.log')"], {}), "(self.logDir, fileName + '.log')\n", (7061, 7093), False, 'import os\n'), ((7971, 7987), 'numpy.int32', 'np.int32', (['center'], {}), '(center)\n', (7979, 7987), True, 'import numpy as np\n'), ((8100, 8114), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8111, 8114), False, 'import cv2\n'), ((10390, 10403), 'os.mkdir', 'os.mkdir', (['tgt'], {}), '(tgt)\n', (10398, 10403), False, 'import os\n'), ((10462, 10478), 'os.mkdir', 'os.mkdir', (['logDir'], {}), '(logDir)\n', (10470, 10478), False, 'import os\n'), ((10537, 10552), 'os.mkdir', 'os.mkdir', (['cache'], {}), '(cache)\n', (10545, 10552), False, 'import os\n'), ((10746, 10766), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (10758, 10766), False, 'import os\n'), ((11253, 11276), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11274, 11276), False, 'import cv2\n'), ((11905, 11928), 'os.path.join', 'os.path.join', (['tgt', 'dirs'], {}), '(tgt, dirs)\n', (11917, 11928), False, 'import os\n'), ((12184, 12211), 'numpy.median', 'np.median', (['averaged'], {'axis': '(0)'}), '(averaged, axis=0)\n', (12193, 12211), True, 'import numpy as np\n'), ((12461, 12484), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12482, 12484), False, 'import cv2\n'), ((13496, 13523), 'numpy.median', 'np.median', (['averaged'], {'axis': '(0)'}), '(averaged, axis=0)\n', (13505, 13523), True, 'import numpy as np\n'), ((13585, 13613), 'cv2.imshow', 'cv2.imshow', (['"""img"""', '_averaged'], {}), "('img', _averaged)\n", (13595, 13613), False, 'import cv2\n'), ((13625, 13667), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""img"""', 'mouse_img_cod'], {}), "('img', mouse_img_cod)\n", (13645, 13667), False, 'import cv2\n'), ((13685, 13699), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (13696, 13699), False, 'import cv2\n'), ((4416, 4433), 'numpy.ceil', 'np.ceil', (['(fps * 10)'], {}), '(fps * 10)\n', (4423, 4433), True, 'import numpy as np\n'), ((6612, 6642), 'numpy.where', 'np.where', (['(img < self.threshold)'], {}), '(img < self.threshold)\n', (6620, 6642), True, 'import numpy as np\n'), ((6747, 6768), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img'], {}), '(img)\n', (6763, 6768), False, 'import cv2\n'), ((6833, 6872), 'os.path.join', 'os.path.join', (['self.tgt', 'self.dirs', 'path'], {}), '(self.tgt, self.dirs, path)\n', (6845, 6872), False, 'import os\n'), ((9411, 9456), 'numpy.mean', 'np.mean', (['self.container[0:windowSize]'], {'axis': '(0)'}), '(self.container[0:windowSize], axis=0)\n', (9418, 9456), True, 'import numpy as np\n'), ((9485, 9520), 'numpy.mean', 'np.mean', (['self.container[2:]'], {'axis': '(0)'}), '(self.container[2:], axis=0)\n', (9492, 9520), True, 'import numpy as np\n'), ((11330, 11350), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (11342, 11350), False, 'import os\n'), ((11350, 11368), 'numpy.float32', 'np.float32', (['vector'], {}), '(vector)\n', (11360, 11368), True, 'import numpy as np\n'), ((11386, 11406), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (11398, 11406), False, 'import os\n'), ((11812, 11840), 'os.path.join', 'os.path.join', (['rmbg_tgt', 'dirs'], {}), '(rmbg_tgt, dirs)\n', (11824, 11840), False, 'import os\n'), ((11996, 12019), 'os.path.join', 'os.path.join', (['tgt', 'dirs'], {}), '(tgt, dirs)\n', (12008, 12019), False, 'import os\n'), ((12049, 12066), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (12053, 12066), False, 'from multiprocessing import Pool\n'), ((12584, 12601), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (12588, 12601), False, 'from multiprocessing import Pool\n'), ((12923, 12943), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (12935, 12943), False, 'import os\n'), ((13249, 13269), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (13261, 13269), False, 'import os\n'), ((13312, 13332), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (13324, 13332), False, 'import os\n'), ((13362, 13379), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (13366, 13379), False, 'from multiprocessing import Pool\n'), ((13755, 13778), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (13776, 13778), False, 'import cv2\n'), ((15592, 15612), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (15604, 15612), False, 'import os\n'), ((16062, 16082), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (16074, 16082), False, 'import os\n'), ((4544, 4581), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (4556, 4581), False, 'import cv2\n'), ((4582, 4599), 'numpy.ceil', 'np.ceil', (['(fps * 10)'], {}), '(fps * 10)\n', (4589, 4599), True, 'import numpy as np\n'), ((4881, 4905), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (4891, 4905), False, 'import cv2\n'), ((4930, 4945), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4941, 4945), False, 'import cv2\n'), ((9589, 9636), 'numpy.median', 'np.median', (['self.container[0:windowSize]'], {'axis': '(0)'}), '(self.container[0:windowSize], axis=0)\n', (9598, 9636), True, 'import numpy as np\n'), ((9665, 9702), 'numpy.median', 'np.median', (['self.container[2:]'], {'axis': '(0)'}), '(self.container[2:], axis=0)\n', (9674, 9702), True, 'import numpy as np\n'), ((12256, 12281), 'os.path.join', 'os.path.join', (['cache', 'dirs'], {}), '(cache, dirs)\n', (12268, 12281), False, 'import os\n'), ((13067, 13078), 'time.time', 'time.time', ([], {}), '()\n', (13076, 13078), False, 'import time\n'), ((14058, 14074), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14066, 14074), True, 'import numpy as np\n'), ((14111, 14127), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14119, 14127), True, 'import numpy as np\n'), ((14164, 14180), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14172, 14180), True, 'import numpy as np\n'), ((14217, 14233), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14225, 14233), True, 'import numpy as np\n'), ((15736, 15747), 'time.time', 'time.time', ([], {}), '()\n', (15745, 15747), False, 'import time\n'), ((5019, 5042), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5040, 5042), False, 'import cv2\n'), ((5714, 5743), 'os.path.join', 'os.path.join', (['self.dirs', 'path'], {}), '(self.dirs, path)\n', (5726, 5743), False, 'import os\n'), ((6477, 6516), 'os.path.join', 'os.path.join', (['self.src', 'self.dirs', 'path'], {}), '(self.src, self.dirs, path)\n', (6489, 6516), False, 'import os\n')]
|
#!/usr/bin/python3
# Copyright 2017-2018 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import glob
import numpy as np
import pandas as pd
import six
import pytablewriter
from multiprocessing import Pool
def get_lossless_average(path, reference_format):
merged_data = {}
columns = [
"format", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time"
]
final_data = pd.DataFrame(columns=columns)
final_data.set_index("format", drop=False, inplace=True)
for format in next(os.walk(path))[1]:
if not glob.glob(path + "/" + format + "/lossless/*.out"):
print("Lossless results files could not be found for format {}.".
format(format))
continue
rawdata = []
data_path = path + "/" + format + "/lossless/"
for f in glob.glob(data_path + "/*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
merged_data[format] = pd.concat(rawdata)
sum_orig_file_size = np.sum(merged_data[format]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[format]["compressed_file_size"])
sum_pixels = np.sum(merged_data[format]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[format]["encode_time"],
weights=merged_data[format]["pixels"])
wavg_decode_time = np.average(
merged_data[format]["decode_time"],
weights=merged_data[format]["pixels"])
final_data.loc[format] = [
format, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time
]
final_data = final_data.assign(weissman_score=lambda x: x.avg_compression_ratio / x.loc[reference_format, "avg_compression_ratio"] * np.log(x.loc[reference_format, "wavg_encode_time"] * 1000) / np.log(x.wavg_encode_time * 1000))
final_data.sort_values("weissman_score", ascending=False, inplace=True)
results_file = path + "/" + os.path.basename(path) + ".lossless.out"
final_data.to_csv(results_file, sep=":")
file = open(path + "/" + os.path.basename(path) + ".lossless.md", "w")
markdown_writer = pytablewriter.MarkdownTableWriter()
markdown_writer.from_dataframe(final_data)
markdown_writer.stream = six.StringIO()
markdown_writer.write_table()
file.write(markdown_writer.stream.getvalue())
file.close()
print(
"Lossless results file successfully saved to {}.".format(results_file))
def get_lossy_average(args):
[path, format, reference_format] = args
if not glob.glob(path + "/" + format + "/lossy/*.out"):
print("Lossy results files could not be found for format {}.".format(
format))
return
rawdata = []
merged_data = []
columns = [
"file_name", "quality", "orig_file_size", "compressed_file_size",
"pixels", "bpp", "compression_ratio", "encode_time", "decode_time",
"y_ssim_score", "rgb_ssim_score", "msssim_score", "psnrhvsm_score",
"vmaf_score"
]
final_columns = [
"quality", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time", "wavg_y_ssim_score",
"wavg_rgb_ssim_score", "wavg_msssim_score", "wavg_psnrhvsm_score",
"wavg_vmaf_score"
]
final_data = pd.DataFrame(columns=final_columns)
data_path = path + "/" + format + "/lossy/"
for f in glob.glob(data_path + "*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
quality_length = len(rawdata[0].index)
for i in range(quality_length):
merged_data.insert(i, pd.DataFrame(columns=columns))
for data in rawdata:
merged_data[i] = merged_data[i].append(data.iloc[[i]])
merged_data[i].sort_values("file_name", ascending=True, inplace=True)
quality = np.mean(merged_data[i]["quality"])
sum_orig_file_size = np.sum(merged_data[i]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[i]["compressed_file_size"])
sum_pixels = np.sum(merged_data[i]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[i]["encode_time"], weights=merged_data[i]["pixels"])
wavg_decode_time = np.average(
merged_data[i]["decode_time"], weights=merged_data[i]["pixels"])
wavg_y_ssim_score = np.average(
merged_data[i]["y_ssim_score"], weights=merged_data[i]["pixels"])
wavg_rgb_ssim_score = np.average(
merged_data[i]["rgb_ssim_score"], weights=merged_data[i]["pixels"])
wavg_msssim_score = np.average(
merged_data[i]["msssim_score"], weights=merged_data[i]["pixels"])
wavg_psnrhvsm_score = np.average(
merged_data[i]["psnrhvsm_score"], weights=merged_data[i]["pixels"])
wavg_vmaf_score = np.average(
merged_data[i]["vmaf_score"], weights=merged_data[i]["pixels"])
final_data.loc[i] = [
quality, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time, wavg_y_ssim_score,
wavg_rgb_ssim_score, wavg_msssim_score, wavg_psnrhvsm_score,
wavg_vmaf_score
]
results_file = path + "/" + os.path.basename(
path) + "." + format + ".lossy.out"
final_data.to_csv(results_file, sep=":", index=False)
print("Lossy results file for format {} successfully saved to {}.".format(
format, results_file))
def main(argv):
if sys.version_info[0] < 3 and sys.version_info[1] < 5:
raise Exception("Python 3.5 or a more recent version is required.")
if len(argv) < 2 or len(argv) > 3:
print(
"rd_average.py: Calculate a per format weighted averages of the results files generated by rd_collect.py"
)
print(
"Arg 1: Path to the results of a subset generated by rd_collect.py")
print(" For ex: rd_average.py \"results/subset1\"")
print("Arg 2: Reference format with which to compare other formats.")
print(" Default to mozjpeg")
return
results_folder = os.path.normpath(argv[1])
available_formats = next(os.walk(results_folder))[1]
# Check is there is actually results files in the path provided
if (not os.path.isdir(results_folder) or not available_formats
or not glob.glob(results_folder + "/**/*.out", recursive=True)):
print(
"Could not find all results file. Please make sure the path provided is correct."
)
return
try:
reference_format = argv[2]
except IndexError:
reference_format = "mozjpeg"
if (reference_format not in available_formats or not glob.glob(
results_folder + "/" + reference_format + "/lossless/*.out")
or not glob.glob(results_folder + "/" + reference_format +
"/lossy/*.out")):
print(
"Could not find reference format results files. Please choose a format among {} or check if the reference format results files are present.".
format(available_formats))
return
get_lossless_average(results_folder, reference_format)
Pool().map(get_lossy_average,
[(results_folder, format, reference_format)
for format in next(os.walk(results_folder))[1]])
if __name__ == "__main__":
main(sys.argv)
|
[
"pandas.DataFrame",
"numpy.sum",
"numpy.average",
"numpy.log",
"os.path.basename",
"pandas.read_csv",
"os.path.isdir",
"os.walk",
"six.StringIO",
"multiprocessing.Pool",
"numpy.mean",
"os.path.normpath",
"pytablewriter.MarkdownTableWriter",
"glob.glob",
"pandas.concat"
] |
[((1914, 1943), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (1926, 1943), True, 'import pandas as pd\n'), ((3874, 3909), 'pytablewriter.MarkdownTableWriter', 'pytablewriter.MarkdownTableWriter', ([], {}), '()\n', (3907, 3909), False, 'import pytablewriter\n'), ((3986, 4000), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (3998, 4000), False, 'import six\n'), ((5042, 5077), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'final_columns'}), '(columns=final_columns)\n', (5054, 5077), True, 'import pandas as pd\n'), ((5141, 5171), 'glob.glob', 'glob.glob', (["(data_path + '*.out')"], {}), "(data_path + '*.out')\n", (5150, 5171), False, 'import glob\n'), ((8034, 8059), 'os.path.normpath', 'os.path.normpath', (['argv[1]'], {}), '(argv[1])\n', (8050, 8059), False, 'import os\n'), ((2343, 2374), 'glob.glob', 'glob.glob', (["(data_path + '/*.out')"], {}), "(data_path + '/*.out')\n", (2352, 2374), False, 'import glob\n'), ((2459, 2477), 'pandas.concat', 'pd.concat', (['rawdata'], {}), '(rawdata)\n', (2468, 2477), True, 'import pandas as pd\n'), ((2507, 2552), 'numpy.sum', 'np.sum', (["merged_data[format]['orig_file_size']"], {}), "(merged_data[format]['orig_file_size'])\n", (2513, 2552), True, 'import numpy as np\n'), ((2588, 2639), 'numpy.sum', 'np.sum', (["merged_data[format]['compressed_file_size']"], {}), "(merged_data[format]['compressed_file_size'])\n", (2594, 2639), True, 'import numpy as np\n'), ((2674, 2711), 'numpy.sum', 'np.sum', (["merged_data[format]['pixels']"], {}), "(merged_data[format]['pixels'])\n", (2680, 2711), True, 'import numpy as np\n'), ((2936, 3026), 'numpy.average', 'np.average', (["merged_data[format]['encode_time']"], {'weights': "merged_data[format]['pixels']"}), "(merged_data[format]['encode_time'], weights=merged_data[format][\n 'pixels'])\n", (2946, 3026), True, 'import numpy as np\n'), ((3074, 3164), 'numpy.average', 'np.average', (["merged_data[format]['decode_time']"], {'weights': "merged_data[format]['pixels']"}), "(merged_data[format]['decode_time'], weights=merged_data[format][\n 'pixels'])\n", (3084, 3164), True, 'import numpy as np\n'), ((4281, 4328), 'glob.glob', 'glob.glob', (["(path + '/' + format + '/lossy/*.out')"], {}), "(path + '/' + format + '/lossy/*.out')\n", (4290, 4328), False, 'import glob\n'), ((5558, 5592), 'numpy.mean', 'np.mean', (["merged_data[i]['quality']"], {}), "(merged_data[i]['quality'])\n", (5565, 5592), True, 'import numpy as np\n'), ((5622, 5662), 'numpy.sum', 'np.sum', (["merged_data[i]['orig_file_size']"], {}), "(merged_data[i]['orig_file_size'])\n", (5628, 5662), True, 'import numpy as np\n'), ((5698, 5744), 'numpy.sum', 'np.sum', (["merged_data[i]['compressed_file_size']"], {}), "(merged_data[i]['compressed_file_size'])\n", (5704, 5744), True, 'import numpy as np\n'), ((5779, 5811), 'numpy.sum', 'np.sum', (["merged_data[i]['pixels']"], {}), "(merged_data[i]['pixels'])\n", (5785, 5811), True, 'import numpy as np\n'), ((6036, 6111), 'numpy.average', 'np.average', (["merged_data[i]['encode_time']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['encode_time'], weights=merged_data[i]['pixels'])\n", (6046, 6111), True, 'import numpy as np\n'), ((6152, 6227), 'numpy.average', 'np.average', (["merged_data[i]['decode_time']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['decode_time'], weights=merged_data[i]['pixels'])\n", (6162, 6227), True, 'import numpy as np\n'), ((6269, 6345), 'numpy.average', 'np.average', (["merged_data[i]['y_ssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['y_ssim_score'], weights=merged_data[i]['pixels'])\n", (6279, 6345), True, 'import numpy as np\n'), ((6389, 6467), 'numpy.average', 'np.average', (["merged_data[i]['rgb_ssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['rgb_ssim_score'], weights=merged_data[i]['pixels'])\n", (6399, 6467), True, 'import numpy as np\n'), ((6509, 6585), 'numpy.average', 'np.average', (["merged_data[i]['msssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['msssim_score'], weights=merged_data[i]['pixels'])\n", (6519, 6585), True, 'import numpy as np\n'), ((6629, 6707), 'numpy.average', 'np.average', (["merged_data[i]['psnrhvsm_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['psnrhvsm_score'], weights=merged_data[i]['pixels'])\n", (6639, 6707), True, 'import numpy as np\n'), ((6747, 6821), 'numpy.average', 'np.average', (["merged_data[i]['vmaf_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['vmaf_score'], weights=merged_data[i]['pixels'])\n", (6757, 6821), True, 'import numpy as np\n'), ((2029, 2042), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2036, 2042), False, 'import os\n'), ((2063, 2113), 'glob.glob', 'glob.glob', (["(path + '/' + format + '/lossless/*.out')"], {}), "(path + '/' + format + '/lossless/*.out')\n", (2072, 2113), False, 'import glob\n'), ((3689, 3711), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3705, 3711), False, 'import os\n'), ((5196, 5219), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""":"""'}), "(f, sep=':')\n", (5207, 5219), True, 'import pandas as pd\n'), ((5332, 5361), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (5344, 5361), True, 'import pandas as pd\n'), ((8089, 8112), 'os.walk', 'os.walk', (['results_folder'], {}), '(results_folder)\n', (8096, 8112), False, 'import os\n'), ((8198, 8227), 'os.path.isdir', 'os.path.isdir', (['results_folder'], {}), '(results_folder)\n', (8211, 8227), False, 'import os\n'), ((8272, 8327), 'glob.glob', 'glob.glob', (["(results_folder + '/**/*.out')"], {'recursive': '(True)'}), "(results_folder + '/**/*.out', recursive=True)\n", (8281, 8327), False, 'import glob\n'), ((8627, 8697), 'glob.glob', 'glob.glob', (["(results_folder + '/' + reference_format + '/lossless/*.out')"], {}), "(results_folder + '/' + reference_format + '/lossless/*.out')\n", (8636, 8697), False, 'import glob\n'), ((8730, 8797), 'glob.glob', 'glob.glob', (["(results_folder + '/' + reference_format + '/lossy/*.out')"], {}), "(results_folder + '/' + reference_format + '/lossy/*.out')\n", (8739, 8797), False, 'import glob\n'), ((9117, 9123), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (9121, 9123), False, 'from multiprocessing import Pool\n'), ((2403, 2426), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""":"""'}), "(f, sep=':')\n", (2414, 2426), True, 'import pandas as pd\n'), ((3806, 3828), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3822, 3828), False, 'import os\n'), ((3546, 3579), 'numpy.log', 'np.log', (['(x.wavg_encode_time * 1000)'], {}), '(x.wavg_encode_time * 1000)\n', (3552, 3579), True, 'import numpy as np\n'), ((7147, 7169), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (7163, 7169), False, 'import os\n'), ((3485, 3543), 'numpy.log', 'np.log', (["(x.loc[reference_format, 'wavg_encode_time'] * 1000)"], {}), "(x.loc[reference_format, 'wavg_encode_time'] * 1000)\n", (3491, 3543), True, 'import numpy as np\n'), ((9241, 9264), 'os.walk', 'os.walk', (['results_folder'], {}), '(results_folder)\n', (9248, 9264), False, 'import os\n')]
|
import sklearn
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn import tree
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.externals.six import StringIO
import pydot
# In[13]:
df = load_breast_cancer()
df = pd.DataFrame(np.c_[df['data'], df['target']],
columns= np.append(df['feature_names'], ['target']))
for col in df.columns:
print(col)
print(df.head())
total_rows=len(df.axes[0])
print(total_rows)
# Outlier detection and visualization
# In[3]:
histograms = df.hist()
df.hist("target")
# In[2]:
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size = .2)
# In[3]:
#PCA with scikit learn
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train_pca = pca = PCA().fit(X_train)
X_test_pca = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
# In[4]:
plot = 1
# plot explained variance
if plot == 1:
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Breast Cancer data set Explained Variance')
plt.savefig('foo.png')
plt.show()
# In[5]:
print(np.cumsum(pca.explained_variance_ratio_))
# Selecting the amount of principle components
# In[6]:
# 10 features
pca = PCA(n_components=10)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.fit_transform(X_test)
# In[7]:
# baseline linear model
reg = LogisticRegression(random_state=0).fit(X_train, y_train)
prediction = reg.predict(X_test)
score = reg.score(X_test,y_test)
print(score)
reg_pca = LogisticRegression(random_state=0).fit(X_train_pca, y_train)
score_pca = reg_pca.score(X_test_pca,y_test)
print(score_pca)
# In[8]:
LPM = linear_model.LinearRegression()
LPM = LPM.fit(X_train, y_train)
LPM.coef_
predictionLPM = LPM.predict(X_test)
scoreLPM = LPM.score(X_test, y_test)
print(scoreLPM)
LPMpca = linear_model.LinearRegression()
LPMpca = LPMpca.fit(X_train_pca, y_train)
LPMpca.coef_
predictionLPM = LPMpca.predict(X_test_pca)
scoreLPMpca = LPMpca.score(X_test_pca, y_test)
print(scoreLPMpca)
# In[9]:
#baseline decicision tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
tree.export_graphviz(clf, out_file='tree.dot')
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontree.pdf")
predictionBaseline = clf.predict(X_test)
scoreclf = clf.score(X_test, y_test)
#print(classification_report(y_test,predictionBaseline,target_names=['malignant', 'benign']))
print(scoreclf)
#baseline decicision tree
clfPca = tree.DecisionTreeClassifier()
clfPca = clfPca.fit(X_train_pca, y_train)
tree.export_graphviz(clfPca, out_file='treepca.dot')
dot_data = StringIO()
tree.export_graphviz(clfPca, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontreepca.pdf")
predictionBaselinePca = clfPca.predict(X_test_pca)
scoreclf = clfPca.score(X_test_pca, y_test)
#print(classification_report(y_test,predictionBaselinePca,target_names=['malignant', 'benign']))
print(scoreclf)
# In[18]:
# KNN classifier on original data
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(X_train, y_train)
score = knn.score(X_test,y_test)
print(score)
knn.fit(X_train_pca, y_train)
score_pca = knn.score(X_test_pca,y_test)
print(score_pca)
# In[14]:
# Decision tree with Gridsearch
clf = tree.DecisionTreeClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'max_depth': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
clf_gscv = GridSearchCV(clf, param_grid, cv=10)
#fit model to data
clf_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(clf_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(clf_gscv.best_score_)
# In[15]:
#KNN with PCA or without PCA and Gridsearch
knn2 = KNeighborsClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'n_neighbors': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn2, param_grid, cv=5)
#fit model to data
knn_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(knn_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(knn_gscv.best_score_)
# In[32]:
## Plot results from gridsearches
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
# In[34]:
# Single function to make plot for each Gridsearch
fig = plot_cv_results(knn_gscv.cv_results_, 'n_neighbors')
# In[59]:
#10 fold cross validation with PCA applied
k_fold = KFold(10)
X_pca = pca.fit_transform(X)
classifiers = []
for k, (train, test) in enumerate(k_fold.split(X_pca, y)):
clfk = tree.DecisionTreeClassifier()
clfk = clfk.fit(X_pca[train], y[train])
predictionBaseline = clfk.predict(X_pca[test])
print ("Classification report for %d fold", k)
print(classification_report(y[test],predictionBaseline,target_names=['malignant', 'benign']))
classifiers.append(clfk)
votes = []
# In[60]:
# Construct ensemble based on majority vote
for classifier in classifiers:
classifier.fit(X_train_pca,y_train)
votes.append(classifier.predict(X_test_pca))
ensembleVotes = np.zeros((len(y_test),1), dtype=int)
predictionEnsemble = np.zeros((len(y_test),1), dtype=int)
for prediction in votes:
for idx in range(0,len(prediction)):
ensembleVotes[idx]+= prediction[idx]
for idx in range(0,len(prediction)):
if ensembleVotes[idx] > 5:
predictionEnsemble[idx] = 1
print("ensemble")
print(classification_report(y_test,predictionEnsemble,target_names=['malignant', 'benign']))
# In[ ]:
## Regularization
# In[15]:
# Ridge regression
param_grid = {'alpha': np.arange(start=0, stop=100, step=10)}
regridge = linear_model.Ridge()
#use gridsearch to test all values for n_neighbors
reg_gscv = GridSearchCV(regridge, param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train_pca, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'alpha')
# In[19]:
# Logistic regression
logitl2 = linear_model.LogisticRegression(penalty='l2', C = 1.0)
param_grid = {'C': np.arange(.1, .9, step = .1)}
reg_gscv = GridSearchCV(logitl2 , param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x=col_x , y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'C')
print (reg_gscv.best_score_, reg_gscv.best_params_)
# In[17]:
## decision tree regularization
parameters = {'max_depth':range(1,40)}
clf = GridSearchCV(tree.DecisionTreeClassifier(), parameters, n_jobs=4)
clf.fit(X_train_pca, y_train)
tree_model = clf.best_estimator_
print (clf.best_score_, clf.best_params_)
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.externals.six.StringIO",
"numpy.arange",
"pandas.DataFrame",
"seaborn.pointplot",
"numpy.cumsum",
"numpy.append",
"matplotlib.pyplot.subplots",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.show",
"sklearn.datasets.load_breast_cancer",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.KFold",
"sklearn.tree.export_graphviz",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((998, 1018), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1016, 1018), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1357, 1392), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (1375, 1392), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1428, 1481), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(0)', 'test_size': '(0.2)'}), '(X, y, random_state=0, test_size=0.2)\n', (1444, 1481), False, 'from sklearn.model_selection import train_test_split\n'), ((1524, 1540), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1538, 1540), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2202, 2222), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (2205, 2222), False, 'from sklearn.decomposition import PCA\n'), ((2635, 2666), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (2664, 2666), False, 'from sklearn import linear_model\n'), ((2808, 2839), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (2837, 2839), False, 'from sklearn import linear_model\n'), ((3049, 3078), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (3076, 3078), False, 'from sklearn import tree\n'), ((3111, 3157), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clf'], {'out_file': '"""tree.dot"""'}), "(clf, out_file='tree.dot')\n", (3131, 3157), False, 'from sklearn import tree\n'), ((3171, 3181), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (3179, 3181), False, 'from sklearn.externals.six import StringIO\n'), ((3183, 3227), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clf'], {'out_file': 'dot_data'}), '(clf, out_file=dot_data)\n', (3203, 3227), False, 'from sklearn import tree\n'), ((3550, 3579), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (3577, 3579), False, 'from sklearn import tree\n'), ((3622, 3674), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clfPca'], {'out_file': '"""treepca.dot"""'}), "(clfPca, out_file='treepca.dot')\n", (3642, 3674), False, 'from sklearn import tree\n'), ((3689, 3699), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (3697, 3699), False, 'from sklearn.externals.six import StringIO\n'), ((3701, 3748), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clfPca'], {'out_file': 'dot_data'}), '(clfPca, out_file=dot_data)\n', (3721, 3748), False, 'from sklearn import tree\n'), ((4112, 4167), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'metric': '"""euclidean"""'}), "(n_neighbors=5, metric='euclidean')\n", (4132, 4167), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4381, 4410), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (4408, 4410), False, 'from sklearn import tree\n'), ((4587, 4623), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_grid'], {'cv': '(10)'}), '(clf, param_grid, cv=10)\n', (4599, 4623), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4905, 4927), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4925, 4927), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5106, 5142), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['knn2', 'param_grid'], {'cv': '(5)'}), '(knn2, param_grid, cv=5)\n', (5118, 5142), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6194, 6203), 'sklearn.model_selection.KFold', 'KFold', (['(10)'], {}), '(10)\n', (6199, 6203), False, 'from sklearn.model_selection import KFold\n'), ((7390, 7410), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {}), '()\n', (7408, 7410), False, 'from sklearn import linear_model\n'), ((7474, 7540), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['regridge', 'param_grid'], {'cv': '(10)', 'return_train_score': '(True)'}), '(regridge, param_grid, cv=10, return_train_score=True)\n', (7486, 7540), False, 'from sklearn.model_selection import GridSearchCV\n'), ((8277, 8329), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(1.0)'}), "(penalty='l2', C=1.0)\n", (8308, 8329), False, 'from sklearn import linear_model\n'), ((8394, 8459), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['logitl2', 'param_grid'], {'cv': '(10)', 'return_train_score': '(True)'}), '(logitl2, param_grid, cv=10, return_train_score=True)\n', (8406, 8459), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1800, 1812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1810, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1906), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Components"""'], {}), "('Number of Components')\n", (1882, 1906), True, 'import matplotlib.pyplot as plt\n'), ((1911, 1937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Variance (%)"""'], {}), "('Variance (%)')\n", (1921, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1962, 2016), 'matplotlib.pyplot.title', 'plt.title', (['"""Breast Cancer data set Explained Variance"""'], {}), "('Breast Cancer data set Explained Variance')\n", (1971, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""foo.png"""'], {}), "('foo.png')\n", (2032, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2056, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2078, 2118), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (2087, 2118), True, 'import numpy as np\n'), ((4506, 4522), 'numpy.arange', 'np.arange', (['(1)', '(50)'], {}), '(1, 50)\n', (4515, 4522), True, 'import numpy as np\n'), ((5025, 5041), 'numpy.arange', 'np.arange', (['(1)', '(50)'], {}), '(1, 50)\n', (5034, 5041), True, 'import numpy as np\n'), ((5715, 5739), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (5727, 5739), True, 'import pandas as pd\n'), ((5785, 5820), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (5797, 5820), True, 'import matplotlib.pyplot as plt\n'), ((5825, 5888), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (5838, 5888), True, 'import seaborn as sns\n'), ((6321, 6350), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (6348, 6350), False, 'from sklearn import tree\n'), ((7166, 7257), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictionEnsemble'], {'target_names': "['malignant', 'benign']"}), "(y_test, predictionEnsemble, target_names=['malignant',\n 'benign'])\n", (7187, 7257), False, 'from sklearn.metrics import classification_report\n'), ((7340, 7377), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(100)', 'step': '(10)'}), '(start=0, stop=100, step=10)\n', (7349, 7377), True, 'import numpy as np\n'), ((7888, 7912), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (7900, 7912), True, 'import pandas as pd\n'), ((7958, 7993), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (7970, 7993), True, 'import matplotlib.pyplot as plt\n'), ((7998, 8061), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (8011, 8061), True, 'import seaborn as sns\n'), ((8352, 8381), 'numpy.arange', 'np.arange', (['(0.1)', '(0.9)'], {'step': '(0.1)'}), '(0.1, 0.9, step=0.1)\n', (8361, 8381), True, 'import numpy as np\n'), ((8805, 8829), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (8817, 8829), True, 'import pandas as pd\n'), ((8875, 8910), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (8887, 8910), True, 'import matplotlib.pyplot as plt\n'), ((8915, 8978), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (8928, 8978), True, 'import seaborn as sns\n'), ((9301, 9330), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (9328, 9330), False, 'from sklearn import tree\n'), ((1097, 1139), 'numpy.append', 'np.append', (["df['feature_names']", "['target']"], {}), "(df['feature_names'], ['target'])\n", (1106, 1139), True, 'import numpy as np\n'), ((1628, 1633), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1631, 1633), False, 'from sklearn.decomposition import PCA\n'), ((1826, 1866), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (1835, 1866), True, 'import numpy as np\n'), ((2346, 2380), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2364, 2380), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2493, 2527), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2511, 2527), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6507, 6600), 'sklearn.metrics.classification_report', 'classification_report', (['y[test]', 'predictionBaseline'], {'target_names': "['malignant', 'benign']"}), "(y[test], predictionBaseline, target_names=[\n 'malignant', 'benign'])\n", (6528, 6600), False, 'from sklearn.metrics import classification_report\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from ..utils.generic_utils import get_uid
class Layer():
"""Abstract base layer class."""
def __init__(self, **kwargs):
self._trainable_weights = []
self._non_trainable_weights = []
self._grads = {} # (name, delta)
self._updates = {}
prefix = self.__class__.__name__.lower()
self.name = prefix + '_' + str(get_uid(prefix))
self.trainable = kwargs.get('trainable', True)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer."""
output_shape = input_shape
self.output_shape = output_shape
return output_shape
def build(self, input_shape):
output_shape = self.compute_output_shape(input_shape)
return output_shape
def add_weight(self, shape=(), name=None, dtype=None, initializer=None, regularizer=None, constraint=None, trainable=True):
"""
@param shape : (tuple) The shape of the weight.
@param dtype : (dtype) The dtype of the weight.
@param initializer: (string) An Initializer instance.
@param regularizer: (string) A Regularizer instance.
@param trainable : (bool) A boolean, whether the weight should be trained via backprop or not.
@return weight : (ndarray) The created weights variable.
"""
weight = initializer(shape=shape, dtype=dtype)
if trainable:
self._trainable_weights.append(name)
else:
self._non_trainable_weights.append(name)
self._updates[name] = np.expand_dims(weight, axis=0) # shape=(z,x,y)
self._grads[name] = np.zeros_like(weight) # shape=(x,y)
return weight
def update(self, optimizer, batch_size):
if self.trainable and len(self._non_trainable_weights)>0:
self._trainable_weights += self._non_trainable_weights
self._non_trainable_weights = []
elif self.trainable == False and len(self._trainable_weights)>0:
self._non_trainable_weights += self._trainable_weights
self._trainable_weights = []
for name in self._trainable_weights:
weight = self.__dict__.get(name)
regularizer = self.__dict__.get(f"{name}_regularizer")
grad = self._grads[name]/batch_size + regularizer.diff(weight)
new_weight = optimizer.get_updates(
grad=grad,
curt_param=weight,
name=f"{self.name}_{name}"
)
self.__dict__[name] = new_weight # Update.
# self._updates[name] = np.r_[self._updates[name], np.expand_dims(new_weight, axis=0)]
self._grads[name] = np.zeros_like(new_weight)
def get_weights(self):
return []
def set_weights(self, weights):
pass
@property
def weights(self):
return self.get_weights()
|
[
"numpy.zeros_like",
"numpy.expand_dims"
] |
[((1652, 1682), 'numpy.expand_dims', 'np.expand_dims', (['weight'], {'axis': '(0)'}), '(weight, axis=0)\n', (1666, 1682), True, 'import numpy as np\n'), ((1727, 1748), 'numpy.zeros_like', 'np.zeros_like', (['weight'], {}), '(weight)\n', (1740, 1748), True, 'import numpy as np\n'), ((2777, 2802), 'numpy.zeros_like', 'np.zeros_like', (['new_weight'], {}), '(new_weight)\n', (2790, 2802), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 22:27:03 2020
@author: <NAME>
"""
import math
import tqdm
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import utils
from net import DCRNNModel
# import sys
# sys.path.append("./xlwang_version")
# from dcrnn_model import DCRNNModel
"""
Hyperparameters
"""
batch_size = 64
enc_input_dim = 2
dec_input_dim = 1
hidden_dim = 64
output_dim = 1
diffusion_steps = 2
num_nodes = 207
rnn_layers = 2
seq_length = 12
horizon = 12
cl_decay_steps = 2000 # decrease teaching force ratio in global steps
filter_type = "dual_random_walk"
epochs = 100
lr = 0.01
weight_decay = 0.0
epsilon = 1.0e-3
amsgard = True
lr_decay_ratio = 0.1
lr_decay_steps = [20, 30, 40, 50]
max_grad_norm = 5
checkpoints = './checkpoints/dcrnn.pt'
sensor_ids = './data/METR-LA/graph_sensor_ids.txt'
sensor_distance = './data/METR-LA/distances_la_2012.csv'
recording='data/processed/METR-LA'
"""
Dataset
"""
# read sensor IDs
with open(sensor_ids) as f:
sensor_ids = f.read().strip().split(',')
# read sensor distance
distance_df = pd.read_csv(sensor_distance, dtype={'from': 'str', 'to': 'str'})
# build adj matrix based on equation (10)
adj_mx = utils.get_adjacency_matrix(distance_df, sensor_ids)
data = utils.load_dataset(dataset_dir=recording, batch_size=batch_size, test_batch_size=batch_size)
train_data_loader = data['train_loader']
val_data_loader = data['val_loader']
test_data_loader = data['test_loader']
standard_scaler = data['scaler']
"""
Init model
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DCRNNModel(adj_mx,
diffusion_steps,
num_nodes,
batch_size,
enc_input_dim,
dec_input_dim,
hidden_dim,
output_dim,
rnn_layers,
filter_type).to(device)
# model = DCRNNModel(adj_mx,
# batch_size,
# enc_input_dim,
# dec_input_dim,
# diffusion_steps,
# num_nodes,
# rnn_layers,
# hidden_dim,
# horizon,
# output_dim,
# filter_type).to(device)
optimizer = torch.optim.Adam(model.parameters(),
lr=lr, eps=epsilon,
weight_decay=weight_decay,
amsgard=amsgard)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=lr_decay_steps,
gamma=lr_decay_ratio)
"""
DCRNN Training
"""
def compute_mae_loss(y_true, y_predicted, standard_scaler):
y_true = standard_scaler.inverse_transform(y_true)
y_predicted = standard_scaler.inverse_transform(y_predicted)
return utils.masked_mae_loss(y_predicted, y_true, null_val=0.0)
def eval_metrics(y_true_np, y_predicted_np, standard_scaler):
metrics = np.zeros(3)
y_true_np = standard_scaler.inverse_transform(y_true_np)
y_predicted_np = standard_scaler.inverse_transform(y_predicted_np)
mae = utils.masked_mae_np(y_predicted_np, y_true_np, null_val=0.0)
mape = utils.masked_mape_np(y_predicted_np, y_true_np, null_val=0.0)
rmse = utils.masked_rmse_np(y_predicted_np, y_true_np, null_val=0.0)
metrics[0] += mae
metrics[1] += mape
metrics[2] += rmse
return metrics
# some pre-calculated properties
num_train_iteration_per_epoch = math.ceil(data['x_train'].shape[0] / batch_size)
num_val_iteration_per_epoch = math.ceil(data['x_val'].shape[0] / batch_size)
num_test_iteration_per_epoch = math.ceil(data['x_test'].shape[0] / batch_size)
# start training
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Total number of trainable parameters:", params)
print("Initialization complete. Start training... ==>", epochs, "epochs with", num_train_iteration_per_epoch, "batches per epoch.")
for epoch in range(1, epochs + 1):
model.train()
train_iterator = train_data_loader.get_iterator()
val_iterator = val_data_loader.get_iterator()
total_loss = 0.0
total_metrics = np.zeros(3) # Three matrics: MAE, MAPE, RMSE
total_val_metrics = np.zeros(3)
for batch_idx, (x, y) in enumerate(tqdm.tqdm(train_iterator)):
x = torch.FloatTensor(x)
y = torch.FloatTensor(y)
y_true = y[..., :output_dim] # delete time encoding to form as label
# x:[batch, seq_len, nodes, enc_input_dim]
# y:[batch, horizon, nodes, output_dim + 1]
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
# compute teaching force ratio: decrease this gradually to 0
global_steps = (epoch - 1) * num_train_iteration_per_epoch + batch_idx
teaching_force_ratio = cl_decay_steps / (cl_decay_steps + math.exp(global_steps / cl_decay_steps))
# feedforward
y_hat = model(x, y, teaching_force_ratio) # [horizon, batch, nodes*output_dim]
y_hat = torch.transpose(torch.reshape(y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
# back propagation
loss = compute_mae_loss(y_true, y_hat.cpu(), standard_scaler)
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
# training statistics
total_loss += loss.item()
t_metrics = eval_metrics(y_true.numpy(), y_hat.detach().cpu().numpy(), standard_scaler)
total_metrics += t_metrics
# print('Batch_idx {:03d} | TF {:.4f} | Train MAE {:.5f} | Train MAPE {:.5f} | Train RMSE {:.5f}'.format(
# batch_idx, teaching_force_ratio, loss.item(), t_metrics[1], t_metrics[2]))
# validation after each epoch
model.eval()
with torch.no_grad():
for _, (val_x, val_y) in enumerate(tqdm.tqdm(val_iterator)):
val_x = torch.FloatTensor(val_x)
val_y = torch.FloatTensor(val_y)
val_y_true = val_y[..., :output_dim] # delete time encoding to form as label
# val_x:[batch, seq_len, nodes, enc_input_dim]
# val_y:[batch, horizon, nodes, output_dim + 1]
val_x, val_y = val_x.to(device), val_y.to(device)
val_y_hat = model(val_x, val_y, 0)
val_y_hat = torch.transpose(torch.reshape(val_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_val_metrics += eval_metrics(val_y_true.numpy(), val_y_hat.detach().cpu().numpy(), standard_scaler)
# learning rate scheduling
lr_scheduler.step()
# GPU mem usage
gpu_mem_alloc = torch.cuda.max_memory_allocated() / 1000000 if torch.cuda.is_available() else 0
# save model every epoch
torch.save(model.state_dict(), checkpoints)
# logging
val_metrics = (total_val_metrics / num_val_iteration_per_epoch).tolist()
print('Epoch {:03d} | lr {:.6f} |Train loss {:.5f} | Val MAE {:.5f} | Val MAPE {:.5f} | Val RMSE {:.5f}| GPU {:.1f} MiB'.format(
epoch, optimizer.param_groups[0]['lr'], total_loss / num_train_iteration_per_epoch, val_metrics[0], val_metrics[1], val_metrics[2], gpu_mem_alloc))
print("Training complete.")
"""
DCRNN Testing
"""
print("\nmodel testing...")
test_iterator = test_data_loader.get_iterator()
total_test_metrics = np.zeros(3)
model.eval()
with torch.no_grad():
for _, (test_x, test_y) in enumerate(tqdm.tqdm(test_iterator)):
test_x = torch.FloatTensor(test_x)
test_y = torch.FloatTensor(test_y)
test_y_true = test_y[..., :output_dim] # delete time encoding to form as label
# test_x:[batch, seq_len, nodes, enc_input_dim]
# test_y:[batch, horizon, nodes, output_dim + 1]
test_x, test_y = test_x.to(device), test_y.to(device)
test_y_hat = model(test_x, test_y, 0)
test_y_hat = torch.transpose(torch.reshape(test_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_test_metrics += eval_metrics(test_y_true.numpy(), test_y_hat.detach().cpu().numpy(), standard_scaler)
test_metrics = (total_test_metrics / num_test_iteration_per_epoch).tolist()
print('Test MAE {:.5f} | Test MAPE {:.5f} | Test RMSE {:.5f}'.format(test_metrics[0], test_metrics[1], test_metrics[2]))
|
[
"tqdm.tqdm",
"math.exp",
"net.DCRNNModel",
"math.ceil",
"pandas.read_csv",
"utils.get_adjacency_matrix",
"utils.masked_mape_np",
"torch.cuda.max_memory_allocated",
"numpy.zeros",
"torch.FloatTensor",
"utils.load_dataset",
"utils.masked_mae_loss",
"utils.masked_mae_np",
"torch.cuda.is_available",
"torch.reshape",
"torch.no_grad",
"utils.masked_rmse_np",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((1085, 1149), 'pandas.read_csv', 'pd.read_csv', (['sensor_distance'], {'dtype': "{'from': 'str', 'to': 'str'}"}), "(sensor_distance, dtype={'from': 'str', 'to': 'str'})\n", (1096, 1149), True, 'import pandas as pd\n'), ((1202, 1253), 'utils.get_adjacency_matrix', 'utils.get_adjacency_matrix', (['distance_df', 'sensor_ids'], {}), '(distance_df, sensor_ids)\n', (1228, 1253), False, 'import utils\n'), ((1262, 1358), 'utils.load_dataset', 'utils.load_dataset', ([], {'dataset_dir': 'recording', 'batch_size': 'batch_size', 'test_batch_size': 'batch_size'}), '(dataset_dir=recording, batch_size=batch_size,\n test_batch_size=batch_size)\n', (1280, 1358), False, 'import utils\n'), ((2557, 2657), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'lr_decay_steps', 'gamma': 'lr_decay_ratio'}), '(optimizer, milestones=lr_decay_steps,\n gamma=lr_decay_ratio)\n', (2593, 2657), False, 'import torch\n'), ((3627, 3675), 'math.ceil', 'math.ceil', (["(data['x_train'].shape[0] / batch_size)"], {}), "(data['x_train'].shape[0] / batch_size)\n", (3636, 3675), False, 'import math\n'), ((3706, 3752), 'math.ceil', 'math.ceil', (["(data['x_val'].shape[0] / batch_size)"], {}), "(data['x_val'].shape[0] / batch_size)\n", (3715, 3752), False, 'import math\n'), ((3784, 3831), 'math.ceil', 'math.ceil', (["(data['x_test'].shape[0] / batch_size)"], {}), "(data['x_test'].shape[0] / batch_size)\n", (3793, 3831), False, 'import math\n'), ((7715, 7726), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7723, 7726), True, 'import numpy as np\n'), ((2975, 3031), 'utils.masked_mae_loss', 'utils.masked_mae_loss', (['y_predicted', 'y_true'], {'null_val': '(0.0)'}), '(y_predicted, y_true, null_val=0.0)\n', (2996, 3031), False, 'import utils\n'), ((3109, 3120), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3117, 3120), True, 'import numpy as np\n'), ((3263, 3323), 'utils.masked_mae_np', 'utils.masked_mae_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3282, 3323), False, 'import utils\n'), ((3335, 3396), 'utils.masked_mape_np', 'utils.masked_mape_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3355, 3396), False, 'import utils\n'), ((3408, 3469), 'utils.masked_rmse_np', 'utils.masked_rmse_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3428, 3469), False, 'import utils\n'), ((4379, 4390), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4387, 4390), True, 'import numpy as np\n'), ((4449, 4460), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4457, 4460), True, 'import numpy as np\n'), ((7745, 7760), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7758, 7760), False, 'import torch\n'), ((1558, 1583), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1581, 1583), False, 'import torch\n'), ((1605, 1746), 'net.DCRNNModel', 'DCRNNModel', (['adj_mx', 'diffusion_steps', 'num_nodes', 'batch_size', 'enc_input_dim', 'dec_input_dim', 'hidden_dim', 'output_dim', 'rnn_layers', 'filter_type'], {}), '(adj_mx, diffusion_steps, num_nodes, batch_size, enc_input_dim,\n dec_input_dim, hidden_dim, output_dim, rnn_layers, filter_type)\n', (1615, 1746), False, 'from net import DCRNNModel\n'), ((4505, 4530), 'tqdm.tqdm', 'tqdm.tqdm', (['train_iterator'], {}), '(train_iterator)\n', (4514, 4530), False, 'import tqdm\n'), ((4554, 4574), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (4571, 4574), False, 'import torch\n'), ((4587, 4607), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (4604, 4607), False, 'import torch\n'), ((6135, 6150), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6148, 6150), False, 'import torch\n'), ((7064, 7089), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7087, 7089), False, 'import torch\n'), ((7803, 7827), 'tqdm.tqdm', 'tqdm.tqdm', (['test_iterator'], {}), '(test_iterator)\n', (7812, 7827), False, 'import tqdm\n'), ((7847, 7872), 'torch.FloatTensor', 'torch.FloatTensor', (['test_x'], {}), '(test_x)\n', (7864, 7872), False, 'import torch\n'), ((7890, 7915), 'torch.FloatTensor', 'torch.FloatTensor', (['test_y'], {}), '(test_y)\n', (7907, 7915), False, 'import torch\n'), ((5286, 5352), 'torch.reshape', 'torch.reshape', (['y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (5299, 5352), False, 'import torch\n'), ((6195, 6218), 'tqdm.tqdm', 'tqdm.tqdm', (['val_iterator'], {}), '(val_iterator)\n', (6204, 6218), False, 'import tqdm\n'), ((6241, 6265), 'torch.FloatTensor', 'torch.FloatTensor', (['val_x'], {}), '(val_x)\n', (6258, 6265), False, 'import torch\n'), ((6286, 6310), 'torch.FloatTensor', 'torch.FloatTensor', (['val_y'], {}), '(val_y)\n', (6303, 6310), False, 'import torch\n'), ((7017, 7050), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (7048, 7050), False, 'import torch\n'), ((8263, 8334), 'torch.reshape', 'torch.reshape', (['test_y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(test_y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (8276, 8334), False, 'import torch\n'), ((5094, 5133), 'math.exp', 'math.exp', (['(global_steps / cl_decay_steps)'], {}), '(global_steps / cl_decay_steps)\n', (5102, 5133), False, 'import math\n'), ((6670, 6740), 'torch.reshape', 'torch.reshape', (['val_y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(val_y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (6683, 6740), False, 'import torch\n')]
|
import numpy as np
from rdkit.DataStructs.cDataStructs import ExplicitBitVect, SparseBitVect
from scipy.sparse import issparse, csr_matrix
from collections import defaultdict
from rdkit import DataStructs
from luna.util.exceptions import (BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError)
from luna.version import __version__
import logging
logger = logging.getLogger()
DEFAULT_FP_LENGTH = 2**32
DEFAULT_FOLDED_FP_LENGTH = 4096
DEFAULT_FP_DTYPE = np.int32
class Fingerprint:
"""A fingerprint that stores indices of "on" bits.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices, fp_length=DEFAULT_FP_LENGTH, unfolded_fp=None, unfolding_map=None, props=None):
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._indices = np.unique(indices)
self._fp_length = fp_length
self._unfolded_fp = unfolded_fp
self._unfolding_map = unfolding_map or {}
self._props = props or {}
self.version = __version__
@classmethod
def from_indices(cls, indices, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of bits.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice([0, 1], size=(fp_length,), p=[0.8, 0.2])
>>> print(vector)
[0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 1 0 0 0 0]
>>> fp = Fingerprint.from_vector(vector)
>>> print(fp.indices)
[ 7 8 13 17 19 20 27]
>>> print(fp.fp_length)
32
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp = Fingerprint.from_bit_string("0010100110000010")
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.fp_length)
16
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_rdkit(cls, rdkit_fp, **kwargs):
"""Initialize from an RDKit fingerprint.
Parameters
----------
rdkit_fp : :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
An existing RDKit fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not (isinstance(rdkit_fp, ExplicitBitVect) or isinstance(rdkit_fp, SparseBitVect)):
logger.exception("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
raise TypeError("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
fp_length = rdkit_fp.GetNumBits()
indices = np.asarray(rdkit_fp.GetOnBits(), dtype=np.long)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
return cls.from_indices(fp.indices, fp.fp_length, unfolded_fp=unfolded_fp, unfolding_map=unfolding_map, props=props)
@property
def indices(self):
"""array_like of int, read-only: Indices of "on" bits."""
return self._indices
@property
def bit_count(self):
"""int, read-only: Number of "on" bits."""
return self.indices.shape[0]
@property
def density(self):
"""float, read-only: Proportion of "on" bits in fingerprint."""
return self.bit_count / self.fp_length
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts, which is always 1 for bit fingerprints."""
return dict([(k, 1) for k in self.indices])
@property
def fp_length(self):
"""int, read-only: The fingerprint length (total number of bits)."""
return self._fp_length
@property
def unfolded_fp(self):
"""`Fingerprint` or None, read-only: The unfolded version of this fingerprint. If None, this fingerprint may have not been folded yet."""
if self._unfolded_fp is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolded_fp
@property
def unfolded_indices(self):
"""array_like of int, read-only: Indices of "on" bits in the unfolded fingerprint."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self.unfolded_fp.indices
@property
def unfolding_map(self):
"""dict, read-only: The mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features)."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolding_map
@property
def props(self):
"""dict, read-only: The custom properties of the fingerprint."""
return self._props
@property
def name(self):
"""str: The property 'name'. If it was not provided, then return an empty string."""
return self.props.get("name", "")
@name.setter
def name(self, name):
self.props["name"] = str(name)
@property
def num_levels(self):
"""int: The property 'num_levels' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_levels", None)
@num_levels.setter
def num_levels(self, num_levels):
self.props["num_levels"] = str(num_levels)
@property
def radius_step(self):
"""float: The property 'radius_step' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("radius_step", None)
@radius_step.setter
def radius_step(self, radius_step):
self.props["radius_step"] = str(radius_step)
@property
def num_shells(self):
"""int: The property 'num_shells' \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_shells", None)
@num_shells.setter
def num_shells(self, num_shells):
self.props["num_shells"] = str(num_shells)
def get_prop(self, key):
"""Get value of the property ``key``. If not set, raise KeyError."""
try:
return self.props[key]
except KeyError:
logger.warning("Key '%s' does not exist." % key)
return None
def set_prop(self, key, value):
"""Set value to the property ``key``."""
self.props[key] = value
def get_num_bits(self):
"""Get the fingerprint length (total number of bits)."""
return self.fp_length
def get_num_on_bits(self):
"""Get the number of "on" bits."""
return self.bit_count
def get_num_off_bits(self):
"""Get the number of "off" bits."""
return self.get_num_bits() - self.get_num_on_bits()
def get_bit(self, index):
"""Get the bit/count value at index ``index``.
Raises
------
BitsValueError
If the provided index is in a different bit scale.
"""
if index in self.counts:
return self.counts[index]
elif index >= 0 and index < self.fp_length:
return 0
else:
logger.exception("The provided index is in a different bit scale.")
raise BitsValueError("The provided index is in a different bit scale.")
def get_on_bits(self):
"""Get "on" bits.
Returns
-------
: :class:`numpy.ndarray`
"""
return np.array([k for (k, v) in self.counts.items() if v > 0])
def to_vector(self, compressed=True, dtype=DEFAULT_FP_DTYPE):
"""Convert this fingerprint to a vector of bits/counts.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
dtype : data-type
The default value is np.int32.
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
data = [self.counts[i] for i in self.indices]
if compressed:
try:
row = np.zeros(self.bit_count)
col = self.indices
vector = csr_matrix((data, (row, col)), shape=(1, self.fp_length), dtype=dtype)
except ValueError as e:
logger.exception(e)
raise BitsValueError("Sparse matrix construction failed. Invalid indices or input data.")
else:
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
vector = np.zeros(self.fp_length, dtype=dtype)
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
try:
vector[self.indices] = data
except IndexError as e:
logger.exception(e)
raise BitsValueError("Some of the provided indices are greater than the fingerprint length.")
return vector
def to_bit_vector(self, compressed=True):
"""Convert this fingerprint to a vector of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
return self.to_vector(compressed=compressed, dtype=np.bool_).astype(np.int8)
def to_bit_string(self):
"""Convert this fingerprint to a string of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_string`.
Returns
-------
: str
Raises
------
MemoryError
If the operation ran out of memory.
"""
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
bit_vector = self.to_bit_vector(compressed=False).astype(np.int8)
return "".join(map(str, bit_vector))
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
def to_rdkit(self, rdkit_fp_cls=None):
"""Convert this fingerprint to an RDKit fingerprint.
.. note::
If the fingerprint length exceeds the maximum RDKit fingerprint length (:math:`2^{31} - 1`),
this fingerprint will be folded to length :math:`2^{31} - 1` before conversion.
Returns
-------
: :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
If ``fp_length`` is less than :math:`1e5`, :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` is used.
Otherwise, :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect` is used.
"""
if rdkit_fp_cls is None:
# Classes to store explicit bit vectors: ExplicitBitVect or SparseBitVect.
# ExplicitBitVect is most useful for situations where the size of the vector is
# relatively small (tens of thousands or smaller).
# For larger vectors, use the _SparseBitVect_ class instead.
if self.fp_length < 1e5:
rdkit_fp_cls = ExplicitBitVect
else:
rdkit_fp_cls = SparseBitVect
# RDKit data structure defines fingerprints as a std:set composed of ints (signed int).
# Since we always have values higher than 0 and since the data structure contains only signed ints,
# then the max length for a RDKit fingerprint is 2^31 - 1.
# C signed int (32 bit) ranges: [-2^31, 2^31-1].
max_rdkit_fp_length = 2**31 - 1
fp_length = self.fp_length
if max_rdkit_fp_length < fp_length:
logger.warning("The current fingerprint will be folded as its size is higher than the maximum "
"size accepted by RDKit, which is 2**31 - 1.")
fp_length = max_rdkit_fp_length
indices = self.indices % max_rdkit_fp_length
rdkit_fp = rdkit_fp_cls(fp_length)
rdkit_fp.SetBitsFromList(indices.tolist())
return rdkit_fp
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 1 1 1 0 1]
"""
if new_length > self.fp_length:
error_msg = ("The new fingerprint length must be smaller than the existing fingerprint length.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
if not np.log2(self.fp_length / new_length).is_integer():
error_msg = ("It is not possible to fold the current fingerprint into the informed new length. "
"The current length divided by the new one is not a power of 2 number.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
folded_indices = self.indices % new_length
unfolding_map = defaultdict(set)
for k, v in sorted(zip(folded_indices, self.indices)):
unfolding_map[k].add(v)
props = dict(self.props)
if "fp_length" in props:
props["fp_length"] = new_length
new_fp = self.__class__(indices=folded_indices, fp_length=new_length,
unfolded_fp=self, unfolding_map=unfolding_map, props=props)
return new_fp
def unfold(self):
"""Unfold this fingerprint and return its parent fingerprint.
Returns
-------
: `Fingerprint`
"""
return self.unfolded_fp
def union(self, other):
"""Return the union of indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("The informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("The informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.union1d(self.indices, other.indices)
def intersection(self, other):
"""Return the intersection between indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.intersect1d(self.indices, other.indices, assume_unique=True)
def difference(self, other):
"""Return indices in this fingerprint but not in ``other``.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setdiff1d(self.indices, other.indices, assume_unique=True)
def symmetric_difference(self, other):
"""Return indices in either this fingerprint or ``other`` but not both.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setxor1d(self.indices, other.indices, assume_unique=True)
def calc_similarity(self, other):
"""Calculates the Tanimoto similarity between this fingeprint and ``other``.
Returns
-------
: float
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp1 = Fingerprint.from_bit_string("0010101110000010")
>>> fp2 = Fingerprint.from_bit_string("1010100110010010")
>>> print(fp1.calc_similarity(fp2))
0.625
"""
return DataStructs.FingerprintSimilarity(self.to_rdkit(), other.to_rdkit())
def __repr__(self):
return ("<%s: indices=%s length=%d>" %
(self.__class__, repr(self.indices).replace('\n', '').replace(' ', ''), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
class CountFingerprint(Fingerprint):
"""A fingerprint that stores the number of occurrences of each index.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH,
unfolded_fp=None, unfolding_map=None, props=None):
if indices is None and counts is None:
logger.exception("Indices or counts must be provided.")
raise IllegalArgumentError("Indices or counts must be provided.")
if indices is not None:
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
if counts is None:
indices, counts = np.unique(indices, return_counts=True)
counts = dict(zip(indices, counts))
else:
indices = np.unique(indices)
if not np.all([x in indices for x in counts]):
logger.exception("At least one index from 'counts' is not in 'indices'.")
raise FingerprintCountsError("At least one index from 'counts' is not in 'indices'.")
if len(set(indices).symmetric_difference(counts)) > 0:
logger.exception("At least one index in 'indices' is not in 'counts'.")
raise FingerprintCountsError("At least one index in 'indices' is not in 'counts'.")
else:
indices = np.asarray(sorted(counts.keys()), dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._counts = counts
super().__init__(indices, fp_length, unfolded_fp, unfolding_map, props)
@classmethod
def from_indices(cls, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices=indices, counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_counts(cls, counts, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from a counting map.
Parameters
----------
counts : dict
Mapping between each index in ``indices`` to the number of counts.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> counts = dict(zip(*np.unique(np.random.randint(0, fp_length, on_bits),
... return_counts=True)))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_counts(counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, counts=None, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> fp = CountFingerprint.from_bit_string("0010100110000010",
... counts={2: 5, 4: 1, 7: 3, 8: 1, 14: 2})
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.counts)
{2: 5, 4: 1, 7: 3, 8: 1, 14: 2}
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of counts.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice(5, size=(fp_length,), p=[0.76, 0.1, 0.1, 0.02, 0.02])
>>> print(vector)
[0 0 0 0 2 3 0 1 0 0 2 0 0 0 1 1 2 3 1 0 1 0 0 0 2 0 0 0 1 0 0 0]
>>> fp = CountFingerprint.from_vector(vector)
>>> print(fp.indices)
[ 4 5 7 10 14 15 16 17 18 20 24 28]
>>> print(fp.counts)
{4: 2, 5: 3, 7: 1, 10: 2, 14: 1, 15: 1, 16: 2, 17: 3, 18: 1, 20: 1, 24: 2, 28: 1}
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
counts = vector.data
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
counts = vector[indices]
counts = dict(zip(indices, counts))
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
counts = dict([(i, c) for i, c in fp.counts.items() if c > 0])
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
new_fp = cls.from_counts(counts, fp.fp_length, unfolded_fp=unfolded_fp,
unfolding_map=unfolding_map, props=props)
return new_fp
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts."""
return self._counts
def get_count(self, index):
"""Get the count value at index ``index``. Return 0 if index is not in ``counts``."""
return self.counts.get(index, 0)
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 3 1 1 0 2]
"""
new_fp = super().fold(new_length)
new_fp._counts = dict([(folded_idx, sum([self.get_count(x) for x in unfolded_set]))
for folded_idx, unfolded_set in new_fp.unfolding_map.items()])
return new_fp
def __repr__(self):
return ("<%s: counts={%s} length=%d>" %
(self.__class__, tuple([(k, v) for k, v in self.counts.items()]), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.counts == other.counts
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
|
[
"scipy.sparse.issparse",
"collections.defaultdict",
"luna.util.exceptions.IllegalArgumentError",
"luna.util.exceptions.InvalidFingerprintType",
"numpy.unique",
"luna.util.exceptions.BitsValueError",
"numpy.intersect1d",
"numpy.union1d",
"numpy.log2",
"numpy.asarray",
"luna.util.exceptions.FingerprintCountsError",
"scipy.sparse.csr_matrix",
"numpy.all",
"numpy.setdiff1d",
"numpy.setxor1d",
"numpy.zeros",
"numpy.where",
"numpy.logical_or",
"logging.getLogger",
"numpy.in1d"
] |
[((390, 409), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (407, 409), False, 'import logging\n'), ((1543, 1577), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.long'}), '(indices, dtype=np.long)\n', (1553, 1577), True, 'import numpy as np\n'), ((1835, 1853), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (1844, 1853), True, 'import numpy as np\n'), ((4669, 4685), 'scipy.sparse.issparse', 'issparse', (['vector'], {}), '(vector)\n', (4677, 4685), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((21523, 21539), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (21534, 21539), False, 'from collections import defaultdict\n'), ((22930, 22969), 'numpy.union1d', 'np.union1d', (['self.indices', 'other.indices'], {}), '(self.indices, other.indices)\n', (22940, 22969), True, 'import numpy as np\n'), ((23773, 23836), 'numpy.intersect1d', 'np.intersect1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (23787, 23836), True, 'import numpy as np\n'), ((24634, 24695), 'numpy.setdiff1d', 'np.setdiff1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (24646, 24695), True, 'import numpy as np\n'), ((25515, 25575), 'numpy.setxor1d', 'np.setxor1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (25526, 25575), True, 'import numpy as np\n'), ((36072, 36088), 'scipy.sparse.issparse', 'issparse', (['vector'], {}), '(vector)\n', (36080, 36088), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((1597, 1645), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (1610, 1645), True, 'import numpy as np\n'), ((1745, 1809), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (1759, 1809), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((7691, 7783), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % cls.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n cls.__class__)\n", (7713, 7783), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21062, 21087), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['error_msg'], {}), '(error_msg)\n', (21076, 21087), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21420, 21445), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['error_msg'], {}), '(error_msg)\n', (21434, 21445), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((22692, 22790), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('The informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('The informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (22714, 22790), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((22854, 22913), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (22868, 22913), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((23539, 23633), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (23561, 23633), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((23697, 23756), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (23711, 23756), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((24400, 24494), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (24422, 24494), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((24558, 24617), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (24572, 24617), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((25281, 25375), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (25303, 25375), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((25439, 25498), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (25453, 25498), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28427, 28486), 'luna.util.exceptions.IllegalArgumentError', 'IllegalArgumentError', (['"""Indices or counts must be provided."""'], {}), "('Indices or counts must be provided.')\n", (28447, 28486), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28542, 28576), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.long'}), '(indices, dtype=np.long)\n', (28552, 28576), True, 'import numpy as np\n'), ((37027, 37119), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % cls.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n cls.__class__)\n", (37049, 37119), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((12792, 12857), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""The provided index is in a different bit scale."""'], {}), "('The provided index is in a different bit scale.')\n", (12806, 12857), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((14317, 14341), 'numpy.zeros', 'np.zeros', (['self.bit_count'], {}), '(self.bit_count)\n', (14325, 14341), True, 'import numpy as np\n'), ((14402, 14472), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(1, self.fp_length)', 'dtype': 'dtype'}), '((data, (row, col)), shape=(1, self.fp_length), dtype=dtype)\n', (14412, 14472), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((14801, 14838), 'numpy.zeros', 'np.zeros', (['self.fp_length'], {'dtype': 'dtype'}), '(self.fp_length, dtype=dtype)\n', (14809, 14838), True, 'import numpy as np\n'), ((28600, 28648), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (28613, 28648), True, 'import numpy as np\n'), ((28756, 28820), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (28770, 28820), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28887, 28925), 'numpy.unique', 'np.unique', (['indices'], {'return_counts': '(True)'}), '(indices, return_counts=True)\n', (28896, 28925), True, 'import numpy as np\n'), ((29022, 29040), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (29031, 29040), True, 'import numpy as np\n'), ((29679, 29727), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (29692, 29727), True, 'import numpy as np\n'), ((29835, 29899), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (29849, 29899), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((14567, 14655), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Sparse matrix construction failed. Invalid indices or input data."""'], {}), "(\n 'Sparse matrix construction failed. Invalid indices or input data.')\n", (14581, 14655), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((15260, 15352), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Some of the provided indices are greater than the fingerprint length."""'], {}), "(\n 'Some of the provided indices are greater than the fingerprint length.')\n", (15274, 15352), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21104, 21140), 'numpy.log2', 'np.log2', (['(self.fp_length / new_length)'], {}), '(self.fp_length / new_length)\n', (21111, 21140), True, 'import numpy as np\n'), ((26532, 26588), 'numpy.in1d', 'np.in1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (26539, 26588), True, 'import numpy as np\n'), ((29064, 29104), 'numpy.all', 'np.all', (['[(x in indices) for x in counts]'], {}), '([(x in indices) for x in counts])\n', (29070, 29104), True, 'import numpy as np\n'), ((29224, 29303), 'luna.util.exceptions.FingerprintCountsError', 'FingerprintCountsError', (['"""At least one index from \'counts\' is not in \'indices\'."""'], {}), '("At least one index from \'counts\' is not in \'indices\'.")\n', (29246, 29303), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((29493, 29570), 'luna.util.exceptions.FingerprintCountsError', 'FingerprintCountsError', (['"""At least one index in \'indices\' is not in \'counts\'."""'], {}), '("At least one index in \'indices\' is not in \'counts\'.")\n', (29515, 29570), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((40028, 40084), 'numpy.in1d', 'np.in1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (40035, 40084), True, 'import numpy as np\n'), ((4787, 4803), 'numpy.where', 'np.where', (['vector'], {}), '(vector)\n', (4795, 4803), True, 'import numpy as np\n'), ((36223, 36239), 'numpy.where', 'np.where', (['vector'], {}), '(vector)\n', (36231, 36239), True, 'import numpy as np\n')]
|
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, jaccard_score
from model_plus import createDeepLabv3Plus
import sys
print(sys.version, sys.platform, sys.executable)
from trainer_plus import train_model
import datahandler_plus
import argparse
import os
import torch
import numpy
torch.cuda.empty_cache()
"""
Version requirements:
PyTorch Version: 1.2.0
Torchvision Version: 0.4.0a0+6b959ee
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-data_directory", help='Specify the dataset directory path')
parser.add_argument(
"-exp_directory", help='Specify the experiment directory where metrics and model weights shall be stored.')
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batchsize", default=2, type=int)
parser.add_argument("--output_stride", default=8, type=int)
parser.add_argument("--channels", default=4, type=int)
parser.add_argument("--pretrained", default='')
parser.add_argument("--class_weights", nargs='+', default=None)
parser.add_argument("--folder_structure", default='sep', help='sep or single')
args = parser.parse_args()
bpath = args.exp_directory
print('Export Directory: ' + bpath)
data_dir = args.data_directory
print('Data Directory: ' + data_dir)
epochs = args.epochs
print('Epochs: ' + str(epochs))
batchsize = args.batchsize
print('Batch size: ' + str(batchsize))
output_stride = args.output_stride
channels = args.channels
print('Number of classes: ' + str(channels))
class_weights = args.class_weights
print('Class weights: ' + str(class_weights))
folder_structure = args.folder_structure
print('folder structure: ' + folder_structure)
model_path = args.pretrained
print('loading pre-trained model from saved state: ' + model_path)
if not os.path.exists(bpath): # if it doesn't exist already
os.makedirs(bpath)
# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017,
# on the 20 categories that are present in the Pascal VOC dataset.
if model_path != '':
try:
model = torch.load(model_path)
print('LOADED MODEL')
model.train()
except:
print('model path did not load')
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
else:
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
model.train()
# Specify the loss function
if class_weights == None:
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
elif class_weights != None and len(class_weights) == channels:
print('class weighted')
class_weights = numpy.array(class_weights).astype(float)
torch_class_weights = torch.FloatTensor(class_weights).cuda()
criterion = torch.nn.CrossEntropyLoss(weight=torch_class_weights)
else:
print('channels did not allign with class weights - default applied')
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Specify the evalutation metrics
metrics = {'f1_score': f1_score, 'jaccard_score': jaccard_score}
# Create the dataloader
if folder_structure == 'sep':
dataloaders = datahandler_plus.get_dataloader_sep_folder(data_dir, batch_size=batchsize)
else:
dataloaders = datahandler_plus.get_dataloader_single_folder(data_dir, batch_size=batchsize)
trained_model = train_model(model, criterion, dataloaders,
optimizer, bpath=bpath, metrics=metrics, num_epochs=epochs)
# Save the trained model
# torch.save({'model_state_dict':trained_model.state_dict()},os.path.join(bpath,'weights'))
torch.save(model, os.path.join(bpath, 'weights.pt'))
|
[
"trainer_plus.train_model",
"os.makedirs",
"argparse.ArgumentParser",
"datahandler_plus.get_dataloader_single_folder",
"torch.load",
"os.path.exists",
"model_plus.createDeepLabv3Plus",
"torch.nn.CrossEntropyLoss",
"torch.FloatTensor",
"numpy.array",
"torch.cuda.empty_cache",
"datahandler_plus.get_dataloader_sep_folder",
"os.path.join"
] |
[((312, 336), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (334, 336), False, 'import torch\n'), ((460, 485), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (483, 485), False, 'import argparse\n'), ((3450, 3557), 'trainer_plus.train_model', 'train_model', (['model', 'criterion', 'dataloaders', 'optimizer'], {'bpath': 'bpath', 'metrics': 'metrics', 'num_epochs': 'epochs'}), '(model, criterion, dataloaders, optimizer, bpath=bpath, metrics=\n metrics, num_epochs=epochs)\n', (3461, 3557), False, 'from trainer_plus import train_model\n'), ((1778, 1799), 'os.path.exists', 'os.path.exists', (['bpath'], {}), '(bpath)\n', (1792, 1799), False, 'import os\n'), ((1835, 1853), 'os.makedirs', 'os.makedirs', (['bpath'], {}), '(bpath)\n', (1846, 1853), False, 'import os\n'), ((2296, 2369), 'model_plus.createDeepLabv3Plus', 'createDeepLabv3Plus', ([], {'outputchannels': 'channels', 'output_stride': 'output_stride'}), '(outputchannels=channels, output_stride=output_stride)\n', (2315, 2369), False, 'from model_plus import createDeepLabv3Plus\n'), ((2492, 2519), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2517, 2519), False, 'import torch\n'), ((3252, 3326), 'datahandler_plus.get_dataloader_sep_folder', 'datahandler_plus.get_dataloader_sep_folder', (['data_dir'], {'batch_size': 'batchsize'}), '(data_dir, batch_size=batchsize)\n', (3294, 3326), False, 'import datahandler_plus\n'), ((3351, 3428), 'datahandler_plus.get_dataloader_single_folder', 'datahandler_plus.get_dataloader_single_folder', (['data_dir'], {'batch_size': 'batchsize'}), '(data_dir, batch_size=batchsize)\n', (3396, 3428), False, 'import datahandler_plus\n'), ((3717, 3750), 'os.path.join', 'os.path.join', (['bpath', '"""weights.pt"""'], {}), "(bpath, 'weights.pt')\n", (3729, 3750), False, 'import os\n'), ((2060, 2082), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2070, 2082), False, 'import torch\n'), ((2755, 2808), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'weight': 'torch_class_weights'}), '(weight=torch_class_weights)\n', (2780, 2808), False, 'import torch\n'), ((2937, 2964), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2962, 2964), False, 'import torch\n'), ((2204, 2277), 'model_plus.createDeepLabv3Plus', 'createDeepLabv3Plus', ([], {'outputchannels': 'channels', 'output_stride': 'output_stride'}), '(outputchannels=channels, output_stride=output_stride)\n', (2223, 2277), False, 'from model_plus import createDeepLabv3Plus\n'), ((2632, 2658), 'numpy.array', 'numpy.array', (['class_weights'], {}), '(class_weights)\n', (2643, 2658), False, 'import numpy\n'), ((2699, 2731), 'torch.FloatTensor', 'torch.FloatTensor', (['class_weights'], {}), '(class_weights)\n', (2716, 2731), False, 'import torch\n')]
|
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
LinAlgError
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
solve_lyapunov - Solve the (continous-time) Lyapunov equation
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
"""
from __future__ import division, print_function, absolute_import
from .linalg_version import linalg_version as __version__
from .misc import *
from .basic import *
from .decomp import *
from .decomp_lu import *
from .decomp_cholesky import *
from .decomp_qr import *
from ._decomp_qz import *
from .decomp_svd import *
from .decomp_schur import *
from ._decomp_polar import *
from .matfuncs import *
from .blas import *
from .lapack import *
from .special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',
'eigvalsh', 'lstsq', 'cholesky']:
try:
register_func(k, eval(k))
except ValueError:
pass
try:
register_func('pinv', pinv2)
except ValueError:
pass
del k, register_func
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
[
"numpy.dual.register_func",
"numpy.testing.Tester"
] |
[((6396, 6424), 'numpy.dual.register_func', 'register_func', (['"""pinv"""', 'pinv2'], {}), "('pinv', pinv2)\n", (6409, 6424), False, 'from numpy.dual import register_func\n'), ((6523, 6531), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (6529, 6531), False, 'from numpy.testing import Tester\n'), ((6546, 6554), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (6552, 6554), False, 'from numpy.testing import Tester\n')]
|
import os
from PIL import Image
from models.model import model
import argparse
import numpy as np
import tensorflow as tf
import shutil
def create(args):
if args.pre_trained == 'facenet':
from models.Face_recognition import FR_model
FR = FR_model()
Model = tf.keras.models.load_model(args.save_path)
path = args.img_dir + '/'
names = os.listdir(path)
Add = []
Age = []
for idx, i in enumerate(names, 0):
curr_img = Image.open(path + i)
# print(path+i)
curr_img = curr_img.resize((args.img_size, args.img_size))
curr_img = np.asarray(curr_img)
curr_img = curr_img.astype('float64')
curr_img /= 127.5
curr_img = curr_img - 1
X = [curr_img]
X = np.asarray(X)
assert X.shape == (1, args.img_size, args.img_size, 3), 'check input image shape'
X = FR(X)
y = Model(X)
Add.append(i)
Age.append(y)
if (idx + 1) % args.log_step == 0:
print('{} no of images predicted'.format(idx + 1))
os.mkdir('Face-AHQ')
# path = '/content/data/celeba_hq/train/male/'
path = args.img_dir + '/'
for i in range(len(Add)):
ages = os.listdir('Face-AHQ')
age = (int)(Age[i])
add = path + Add[i]
# creates folder
if str(age) not in ages:
os.mkdir('Face-AHQ/{}'.format(age))
dest = 'Face-AHQ/{}/{}.png'.format(age, i)
shutil.move(add, dest)
if (i + 1) % args.log_step == 0:
print('{} no of images saved'.format(i + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--pre_trained', type=str, default = 'facenet', help='pre-trained model to be used')
parser.add_argument('--img_dir', type=str, default = 'data', help='pre-trained model to be used')
parser.add_argument('--img_size', type=int, default = 160, help='size of image to be fed to the model')
parser.add_argument('--log_step', type=int, default = 50, help='number of steps to be taken before logging')
parser.add_argument('--save_path', type=str, default = 'Model_checkpoint',
help = 'path of dir where model is to be saved')
args = parser.parse_args()
create(args)
|
[
"os.mkdir",
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"numpy.asarray",
"models.Face_recognition.FR_model",
"PIL.Image.open",
"shutil.move",
"os.listdir"
] |
[((373, 389), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (383, 389), False, 'import os\n'), ((1063, 1083), 'os.mkdir', 'os.mkdir', (['"""Face-AHQ"""'], {}), "('Face-AHQ')\n", (1071, 1083), False, 'import os\n'), ((1618, 1643), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1641, 1643), False, 'import argparse\n'), ((260, 270), 'models.Face_recognition.FR_model', 'FR_model', ([], {}), '()\n', (268, 270), False, 'from models.Face_recognition import FR_model\n'), ((287, 329), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.save_path'], {}), '(args.save_path)\n', (313, 329), True, 'import tensorflow as tf\n'), ((474, 494), 'PIL.Image.open', 'Image.open', (['(path + i)'], {}), '(path + i)\n', (484, 494), False, 'from PIL import Image\n'), ((605, 625), 'numpy.asarray', 'np.asarray', (['curr_img'], {}), '(curr_img)\n', (615, 625), True, 'import numpy as np\n'), ((765, 778), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (775, 778), True, 'import numpy as np\n'), ((1210, 1232), 'os.listdir', 'os.listdir', (['"""Face-AHQ"""'], {}), "('Face-AHQ')\n", (1220, 1232), False, 'import os\n'), ((1454, 1476), 'shutil.move', 'shutil.move', (['add', 'dest'], {}), '(add, dest)\n', (1465, 1476), False, 'import shutil\n')]
|
from __future__ import unicode_literals, print_function, division
from collections import Counter
from nltk.tokenize import TweetTokenizer
import cPickle as cp
import io
import numpy as np
PAD_TOKEN = 0
SOS_TOKEN = 1
EOS_TOKEN = 2
VOCAB_SIZE = 10000
class Lang(object):
def __init__(self, name, lowercase=True, tokenizer=None):
self.name = name
self.word_count = Counter()
self.tokenizer = tokenizer
self.lowercase = lowercase # To lowercase all words encountered
self.embedding_matrix = None
self.PAD_TOK_VEC = None
self.UNK_TOK_VEC = None
def tokenize_sent(self, sentence):
if self.tokenizer is None:
return sentence.split(u' ')
else:
return self.tokenizer.tokenize(sentence)
def add_sentence(self, sentence):
for w in self.tokenize_sent(sentence):
if self.lowercase:
w = w.lower()
self.word_count[w] += 1
def generate_vocab(self):
vocab = self.word_count.most_common(VOCAB_SIZE)
self.word2ix = {"<PAD>": PAD_TOKEN, "<SOS>": SOS_TOKEN, "<EOS>": EOS_TOKEN}
for w, _ in vocab:
self.word2ix[w] = len(self.word2ix)
self.ix2word = {self.word2ix[w]: w for w in self.word2ix}
def add_word(self, word, embedding=None):
assert word not in self.word2ix, "Already present in vocab"
self.word2ix[word] = len(self.word2ix)
self.ix2word[self.word2ix[word]] = word
if self.embedding_matrix is not None:
_, n_embed = self.embedding_matrix.shape
embedding = embedding if embedding is not None else np.random.normal(0, 1, (1, n_embed))
self.embedding_matrix = np.concatenate([self.embedding_matrix, embedding], axis=0)
def __getitem__(self, item):
if type(item) == str or type(item) == unicode:
# Encode the string to be unicode
item = unicode(item)
if self.lowercase:
item = item.lower()
return self.word2ix[item] if item in self.word2ix else len(self.word2ix)
else:
return self.ix2word[item] if item in self.ix2word else u"<UNK>"
def __len__(self):
assert len(self.ix2word) == len(self.word2ix), "Index not built using generate_vocab and add_word"
return len(self.ix2word)
def save_file(self, filename):
cp.dump(self.__dict__, open(filename, 'wb'))
def load_file(self, filename):
self.__dict__ = cp.load(open(filename))
def get_embedding_matrix(self):
if self.embedding_matrix is None:
return None
_embedding_matrix = np.concatenate([self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC], axis=0)
return _embedding_matrix
def build_vocab(filename, l):
with io.open(filename, encoding='utf-8', mode='r', errors='replace') as f:
for line in f:
line = line.strip().split('\t')
l.add_sentence(line[0])
l.add_sentence(line[1])
l.generate_vocab()
return l
def build_embedding_matrix_from_gensim(l_en, gensim_model, embedding_dim=300):
l_en.PAD_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.UNK_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.embedding_matrix = np.random.normal(0, 1, (len(l_en) - 1, embedding_dim)) # PAD TOKEN ENCODED SEPARATELY
for w in l_en.word2ix:
if l_en.word2ix[w] == PAD_TOKEN:
# PAD TOKEN ENCODED SEPARATELY
continue
if w in gensim_model.wv:
l_en.embedding_matrix[l_en.word2ix[w] - 1] = gensim_model.wv[w]
return l_en
if __name__ == "__main__":
# ROOT_DIR = "/home/bass/DataDir/RTE/"
ROOT_DIR = ""
DATA_FILE = ROOT_DIR + "data/train.txt"
# DATA_FILE ="data/tiny_eng-fra.txt"
l_en = Lang('en', tokenizer=TweetTokenizer())
l_en = build_vocab(DATA_FILE, l_en)
save_file_name = ROOT_DIR + 'data/vocab.pkl'
l_en.save_file(save_file_name)
|
[
"nltk.tokenize.TweetTokenizer",
"numpy.random.normal",
"io.open",
"collections.Counter",
"numpy.concatenate"
] |
[((3179, 3221), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, embedding_dim)'], {}), '(0, 1, (1, embedding_dim))\n', (3195, 3221), True, 'import numpy as np\n'), ((3245, 3287), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, embedding_dim)'], {}), '(0, 1, (1, embedding_dim))\n', (3261, 3287), True, 'import numpy as np\n'), ((389, 398), 'collections.Counter', 'Counter', ([], {}), '()\n', (396, 398), False, 'from collections import Counter\n'), ((2668, 2755), 'numpy.concatenate', 'np.concatenate', (['[self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC]'], {'axis': '(0)'}), '([self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC],\n axis=0)\n', (2682, 2755), True, 'import numpy as np\n'), ((2826, 2889), 'io.open', 'io.open', (['filename'], {'encoding': '"""utf-8"""', 'mode': '"""r"""', 'errors': '"""replace"""'}), "(filename, encoding='utf-8', mode='r', errors='replace')\n", (2833, 2889), False, 'import io\n'), ((1731, 1789), 'numpy.concatenate', 'np.concatenate', (['[self.embedding_matrix, embedding]'], {'axis': '(0)'}), '([self.embedding_matrix, embedding], axis=0)\n', (1745, 1789), True, 'import numpy as np\n'), ((3868, 3884), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (3882, 3884), False, 'from nltk.tokenize import TweetTokenizer\n'), ((1658, 1694), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, n_embed)'], {}), '(0, 1, (1, n_embed))\n', (1674, 1694), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
A program that carries out mini batch k-means clustering on Movielens datatset"""
from __future__ import print_function, division, absolute_import, unicode_literals
from decimal import *
#other stuff we need to import
import csv
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics.cluster import v_measure_score
from math import *
def distance(user_id,i):
distance=0
for j in range(len(user_movie_matrix[0])):
if user_movie_matrix[user_id][j] !=0 and user_movie_matrix[i][j]!=0:
distance+=Decimal(pow(Decimal(user_movie_matrix[user_id][j] - user_movie_matrix[i][j]),2))
distance=sqrt(distance)
return distance
#beginning of main program
#read in u1.base
training_file = open('ml-100k/u1.base','r')
rows = training_file.readlines()
training_file.close()
training_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
training_data.append(int_list)
#read in u1.test
test_file = open('ml-100k/u1.test','r')
rows = test_file.readlines()
test_file.close()
test_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
test_data.append(int_list)
print(len(training_data))
print(len(test_data))
user_ids = [row[0] for row in training_data]
user_ids = set(user_ids)
user_ids = sorted(user_ids)
number_of_users = len(user_ids)
#print(user_ids)
print(number_of_users)
movie_ids = [row[1] for row in training_data]
movie_ids = set(movie_ids)
movie_ids = sorted(movie_ids)
number_of_movies = len(movie_ids)
#print(movie_ids)
print(number_of_movies)
#create a user movie matrix
#pre-processing could be in two ways :
# a. either ignore ratings <= 3 so rating of 4 or 5 = 1 in matrix and <=3 is 0
# b. calculate a mean for each user
# c. or simply give 1 if rated and 0 if not rated
user_movie_matrix = np.zeros((number_of_users,number_of_movies))
#user_movie_matrix.fill(0.001)
for row in training_data:
user_id = user_ids.index(row[0])
movie_id = movie_ids.index(row[1])
user_movie_matrix[user_id,movie_id] = row[2]
#user_movie_matrix[user_id,movie_id] = row[2]
#print(user_movie_matrix[0])
#print(user_movie_matrix[942][1])
#print(user_movie_matrix[942][8])
#Normalizing user-movie matrix
#Additional step
'''for i in range(number_of_users):
tempList = []
tempList = user_movie_matrix[i].tolist()
print('templist')
print(tempList)
minVal = min(tempList)
maxVal = max(tempList)
for j in tempList:
j=Decimal(Decimal(j-minVal)/Decimal(maxVal-minVal))
j=j*5
user_movie_matrix[i] = tempList'''
print(user_movie_matrix)
print(len(user_movie_matrix))
print(len(user_movie_matrix[0]))
#print(user_movie_matrix)
#initialize and carry out clustering
K=50
#km = KMeans(n_clusters = K)
#km.fit(user_movie_matrix)
#km = KMeans(n_clusters = K)
km = MiniBatchKMeans(n_clusters = K)
km.fit(user_movie_matrix)
#labels
labels = km.labels_
print(str(labels))
#find which cluster each user is in
cluster_num_users=np.zeros(K)
#maintain a list of users per cluster
cluster_list_users=[]
for i in range(K):
cluster_list_users.append([])
print(cluster_list_users)
prediction = km.predict(user_movie_matrix)
print('\n--------Which cluster each user is in--------')
print('{:<15}\t{}'.format('User','Cluster'))
for i in range(len(prediction)):
print('{:<15}\t{}'.format(user_ids[i],prediction[i]))
cluster_num_users[prediction[i]]+=1
list_of_users = []
list_of_users = cluster_list_users[prediction[i]]
list_of_users.append(i)
cluster_list_users[prediction[i]]=list_of_users
f=open('cluster_num_users','w')
for i in range(K):
f.write(str(i))
f.write('\t')
f.write(str(cluster_num_users[i]))
f.write('\n')
f.close()
print(cluster_num_users)
print(cluster_list_users)
#Number of users in each cluster
print('\n--------Number of users in a cluster--------')
for i in range(K):
print('{:<15}\t{}'.format(i,cluster_num_users[i]))
print(sum(cluster_num_users))
print('The total distance of the solution found is',sum((km.transform(user_movie_matrix)).min(axis=1)))
#predicting rating for a movie by a user
print('Number of test data ')
print(len(test_data))
accuracy=0
root_mean_accuracy=0
weighted_sum=0
sum_of_weights=0
for row in test_data:
print('Testing for user and movie in test : ' + str(row))
movie = row[1]
rating = row[2]
#print('Cluster for this user : ')
user = row[0]
#print(user)
user_id = user_ids.index(user)
#print(user_id)
#print(labels)
cluster_index = labels[user_id]
#print(cluster_index)
print('Other user ids in this cluster : ')
print(cluster_num_users[cluster_index])
#print(len(cluster_list_users[cluster_index]))
other_user_ids_in_same_cluster=cluster_list_users[cluster_index]
print(other_user_ids_in_same_cluster)
#print('Have they rated movie ')
#print(movie)
if movie in movie_ids:
movie_id=movie_ids.index(movie)
else:
continue
number_of_users_who_rated_movie=0
sum_total_rating=0
for i in other_user_ids_in_same_cluster:
if user_movie_matrix[i][movie_id] > 0:
#print(i)
#print('index has rated movie ')
#print(movie_id)
#print(user_movie_matrix[i][movie_id])
if(Decimal(round(distance(user_id,i),2)) > Decimal(0.0)):
weight = Decimal(1/(distance(user_id,i)))
weighted_sum += weight*Decimal(user_movie_matrix[i][movie_id])
sum_of_weights += Decimal(weight)
number_of_users_who_rated_movie += 1
sum_total_rating += user_movie_matrix[i][movie_id]
print('Predicted Rating for this movie :')
#print(sum_total_rating)
if(number_of_users_who_rated_movie > 0 and sum_of_weights > 0):
print(weighted_sum)
print(sum_of_weights)
rating_predicted = weighted_sum/sum_of_weights
print(rating_predicted)
print(rating)
#rating_predicted = round(rating_predicted)
root_mean_accuracy += Decimal(pow(Decimal(rating_predicted-rating),2))
if abs(Decimal(rating_predicted - rating)) <= Decimal(1.0):
print("HERE")
accuracy += 1
'''elif Decimal(rating - rating_predicted) < Decimal(0.5):
print("HERE")
accuracy += 1'''
print(accuracy)
print('% accuracy')
print(accuracy*100/len(test_data))
root_mean_accuracy = root_mean_accuracy/len(test_data)
root_mean_accuracy = sqrt(root_mean_accuracy)
print(root_mean_accuracy)
|
[
"sklearn.cluster.MiniBatchKMeans",
"numpy.zeros"
] |
[((1958, 2003), 'numpy.zeros', 'np.zeros', (['(number_of_users, number_of_movies)'], {}), '((number_of_users, number_of_movies))\n', (1966, 2003), True, 'import numpy as np\n'), ((2991, 3020), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'K'}), '(n_clusters=K)\n', (3006, 3020), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((3160, 3171), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (3168, 3171), True, 'import numpy as np\n')]
|
import json # note: ujson fails this test due to float equality
import copy
import numpy as np
import pytest
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2, start=6), Discrete(2, start=-4))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
sample_1_prime, sample_2_prime = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=-np.inf, high=np.inf, shape=(1, 3)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2), Discrete(2, start=-6))),
MultiDiscrete([2, 2, 100]),
MultiBinary(6),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_equality(space):
space1 = space
space2 = copy.copy(space)
assert space1 == space2, "Expected {} to equal {}".format(space1, space2)
@pytest.mark.parametrize(
"spaces",
[
(Discrete(3), Discrete(4)),
(Discrete(3), Discrete(3, start=-1)),
(MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
(MultiBinary(8), MultiBinary(7)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=np.array([-10, 0]), high=np.array([10, 9]), dtype=np.float32),
),
(
Box(low=-np.inf, high=0.0, shape=(2, 1)),
Box(low=0.0, high=np.inf, shape=(2, 1)),
),
(Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(1), Discrete(10)])),
(
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5, start=7), Discrete(10)]),
),
(Dict({"position": Discrete(5)}), Dict({"position": Discrete(4)})),
(Dict({"position": Discrete(5)}), Dict({"speed": Discrete(5)})),
],
)
def test_inequality(spaces):
space1, space2 = spaces
assert space1 != space2, "Expected {} != {}".format(space1, space2)
@pytest.mark.parametrize(
"space",
[
Discrete(5),
Discrete(8, start=-20),
Box(low=0, high=255, shape=(2,), dtype="uint8"),
Box(low=-np.inf, high=np.inf, shape=(3, 3)),
Box(low=1.0, high=np.inf, shape=(3, 3)),
Box(low=-np.inf, high=2.0, shape=(3, 3)),
],
)
def test_sample(space):
space.seed(0)
n_trials = 100
samples = np.array([space.sample() for _ in range(n_trials)])
expected_mean = 0.0
if isinstance(space, Box):
if space.is_bounded():
expected_mean = (space.high + space.low) / 2
elif space.is_bounded("below"):
expected_mean = 1 + space.low
elif space.is_bounded("above"):
expected_mean = -1 + space.high
else:
expected_mean = 0.0
elif isinstance(space, Discrete):
expected_mean = space.start + space.n / 2
else:
raise NotImplementedError
np.testing.assert_allclose(expected_mean, samples.mean(), atol=3.0 * samples.std())
@pytest.mark.parametrize(
"spaces",
[
(Discrete(5), MultiBinary(5)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
MultiDiscrete([2, 2, 8]),
),
(
Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),
Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
),
(Dict({"position": Discrete(5)}), Tuple([Discrete(5)])),
(Dict({"position": Discrete(5)}), Discrete(5)),
(Tuple((Discrete(5),)), Discrete(5)),
(
Box(low=np.array([-np.inf, 0.0]), high=np.array([0.0, np.inf])),
Box(low=np.array([-np.inf, 1.0]), high=np.array([0.0, np.inf])),
),
],
)
def test_class_inequality(spaces):
assert spaces[0] == spaces[0]
assert spaces[1] == spaces[1]
assert spaces[0] != spaces[1]
assert spaces[1] != spaces[0]
@pytest.mark.parametrize(
"space_fn",
[
lambda: Dict(space1="abc"),
lambda: Dict({"space1": "abc"}),
lambda: Tuple(["abc"]),
],
)
def test_bad_space_calls(space_fn):
with pytest.raises(AssertionError):
space_fn()
def test_seed_Dict():
test_space = Dict(
{
"a": Box(low=0, high=1, shape=(3, 3)),
"b": Dict(
{
"b_1": Box(low=-100, high=100, shape=(2,)),
"b_2": Box(low=-1, high=1, shape=(2,)),
}
),
"c": Discrete(5),
}
)
seed_dict = {
"a": 0,
"b": {
"b_1": 1,
"b_2": 2,
},
"c": 3,
}
test_space.seed(seed_dict)
# "Unpack" the dict sub-spaces into individual spaces
a = Box(low=0, high=1, shape=(3, 3))
a.seed(0)
b_1 = Box(low=-100, high=100, shape=(2,))
b_1.seed(1)
b_2 = Box(low=-1, high=1, shape=(2,))
b_2.seed(2)
c = Discrete(5)
c.seed(3)
for i in range(10):
test_s = test_space.sample()
a_s = a.sample()
assert (test_s["a"] == a_s).all()
b_1_s = b_1.sample()
assert (test_s["b"]["b_1"] == b_1_s).all()
b_2_s = b_2.sample()
assert (test_s["b"]["b_2"] == b_2_s).all()
c_s = c.sample()
assert test_s["c"] == c_s
def test_box_dtype_check():
# Related Issues:
# https://github.com/openai/gym/issues/2357
# https://github.com/openai/gym/issues/2298
space = Box(0, 2, tuple(), dtype=np.float32)
# casting will match the correct type
assert space.contains(0.5)
# float64 is not in float32 space
assert not space.contains(np.array(0.5))
assert not space.contains(np.array(1))
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_returns_list(space):
def assert_integer_list(seed):
assert isinstance(seed, list)
assert len(seed) >= 1
assert all([isinstance(s, int) for s in seed])
assert_integer_list(space.seed(None))
assert_integer_list(space.seed(0))
def convert_sample_hashable(sample):
if isinstance(sample, np.ndarray):
return tuple(sample.tolist())
if isinstance(sample, (list, tuple)):
return tuple(convert_sample_hashable(s) for s in sample)
if isinstance(sample, dict):
return tuple(
(key, convert_sample_hashable(value)) for key, value in sample.items()
)
return sample
def sample_equal(sample1, sample2):
return convert_sample_hashable(sample1) == convert_sample_hashable(sample2)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_reproducibility(space):
space1 = space
space2 = copy.deepcopy(space)
space1.seed(None)
space2.seed(None)
assert space1.seed(0) == space2.seed(0)
assert sample_equal(space1.sample(), space2.sample())
@pytest.mark.parametrize(
"space",
[
Tuple([Discrete(100), Discrete(100)]),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Discrete(5, start=10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_subspace_incorrelated(space):
subspaces = space.spaces if isinstance(space, Tuple) else space.spaces.values()
space.seed(0)
states = [
convert_sample_hashable(subspace.np_random.bit_generator.state)
for subspace in subspaces
]
assert len(states) == len(set(states))
def test_multidiscrete_as_tuple():
# 1D multi-discrete
space = MultiDiscrete([3, 4, 5])
assert space.shape == (3,)
assert space[0] == Discrete(3)
assert space[0:1] == MultiDiscrete([3])
assert space[0:2] == MultiDiscrete([3, 4])
assert space[:] == space and space[:] is not space
assert len(space) == 3
# 2D multi-discrete
space = MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space.shape == (2, 3)
assert space[0, 1] == Discrete(4)
assert space[0] == MultiDiscrete([3, 4, 5])
assert space[0:1] == MultiDiscrete([[3, 4, 5]])
assert space[0:2, :] == MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space[:, 0:1] == MultiDiscrete([[3], [6]])
assert space[0:2, 0:2] == MultiDiscrete([[3, 4], [6, 7]])
assert space[:] == space and space[:] is not space
assert space[:, :] == space and space[:, :] is not space
def test_multidiscrete_subspace_reproducibility():
# 1D multi-discrete
space = MultiDiscrete([100, 200, 300])
space.seed(None)
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2].sample(), space[0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:].sample(), space.sample())
# 2D multi-discrete
space = MultiDiscrete([[300, 400, 500], [600, 700, 800]])
space.seed(None)
assert sample_equal(space[0, 1].sample(), space[0, 1].sample())
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2, :].sample(), space[0:2, :].sample())
assert sample_equal(space[:, 0:1].sample(), space[:, 0:1].sample())
assert sample_equal(space[0:2, 0:2].sample(), space[0:2, 0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:, :].sample(), space[:, :].sample())
assert sample_equal(space[:, :].sample(), space.sample())
def test_space_legacy_state_pickling():
legacy_state = {
"shape": (
1,
2,
3,
),
"dtype": np.int64,
"np_random": np.random.default_rng(),
"n": 3,
}
space = Discrete(1)
space.__setstate__(legacy_state)
assert space.shape == legacy_state["shape"]
assert space._shape == legacy_state["shape"]
assert space.np_random == legacy_state["np_random"]
assert space._np_random == legacy_state["np_random"]
assert space.n == 3
assert space.dtype == legacy_state["dtype"]
|
[
"gym.spaces.MultiBinary",
"copy.deepcopy",
"gym.spaces.Discrete",
"copy.copy",
"json.dumps",
"gym.spaces.MultiDiscrete",
"numpy.random.default_rng",
"pytest.raises",
"numpy.array",
"gym.spaces.Box",
"gym.spaces.Tuple",
"gym.spaces.Dict"
] |
[((2604, 2620), 'copy.copy', 'copy.copy', (['space'], {}), '(space)\n', (2613, 2620), False, 'import copy\n'), ((6518, 6550), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(3, 3)'}), '(low=0, high=1, shape=(3, 3))\n', (6521, 6550), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6575, 6610), 'gym.spaces.Box', 'Box', ([], {'low': '(-100)', 'high': '(100)', 'shape': '(2,)'}), '(low=-100, high=100, shape=(2,))\n', (6578, 6610), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6637, 6668), 'gym.spaces.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(2,)'}), '(low=-1, high=1, shape=(2,))\n', (6640, 6668), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6693, 6704), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (6701, 6704), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9745, 9765), 'copy.deepcopy', 'copy.deepcopy', (['space'], {}), '(space)\n', (9758, 9765), False, 'import copy\n'), ((10956, 10980), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (10969, 10980), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11258, 11295), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5], [6, 7, 8]]'], {}), '([[3, 4, 5], [6, 7, 8]])\n', (11271, 11295), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11855, 11885), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[100, 200, 300]'], {}), '([100, 200, 300])\n', (11868, 11885), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((12260, 12309), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[300, 400, 500], [600, 700, 800]]'], {}), '([[300, 400, 500], [600, 700, 800]])\n', (12273, 12309), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((13185, 13196), 'gym.spaces.Discrete', 'Discrete', (['(1)'], {}), '(1)\n', (13193, 13196), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1232, 1252), 'json.dumps', 'json.dumps', (['json_rep'], {}), '(json_rep)\n', (1242, 1252), False, 'import json\n'), ((245, 256), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (253, 256), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((266, 287), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(-2)'}), '(5, start=-2)\n', (274, 287), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((297, 336), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (300, 336), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((688, 714), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (701, 714), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((724, 739), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (735, 739), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1739, 1750), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (1747, 1750), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1760, 1781), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(-2)'}), '(5, start=-2)\n', (1768, 1781), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1871, 1914), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1, 3)'}), '(low=-np.inf, high=np.inf, shape=(1, 3))\n', (1874, 1914), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2257, 2283), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (2270, 2283), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2293, 2307), 'gym.spaces.MultiBinary', 'MultiBinary', (['(6)'], {}), '(6)\n', (2304, 2307), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3801, 3812), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3809, 3812), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3822, 3844), 'gym.spaces.Discrete', 'Discrete', (['(8)'], {'start': '(-20)'}), '(8, start=-20)\n', (3830, 3844), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3854, 3901), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(2,)', 'dtype': '"""uint8"""'}), "(low=0, high=255, shape=(2,), dtype='uint8')\n", (3857, 3901), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3911, 3954), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(3, 3)'}), '(low=-np.inf, high=np.inf, shape=(3, 3))\n', (3914, 3954), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3964, 4003), 'gym.spaces.Box', 'Box', ([], {'low': '(1.0)', 'high': 'np.inf', 'shape': '(3, 3)'}), '(low=1.0, high=np.inf, shape=(3, 3))\n', (3967, 4003), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4013, 4053), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': '(2.0)', 'shape': '(3, 3)'}), '(low=-np.inf, high=2.0, shape=(3, 3))\n', (4016, 4053), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5890, 5919), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5903, 5919), False, 'import pytest\n'), ((7521, 7532), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (7529, 7532), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7542, 7563), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-4)'}), '(3, start=-4)\n', (7550, 7563), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7573, 7612), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (7576, 7612), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7889, 7915), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (7902, 7915), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7925, 7940), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (7936, 7940), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9017, 9028), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (9025, 9028), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9038, 9059), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-4)'}), '(3, start=-4)\n', (9046, 9059), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9069, 9108), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (9072, 9108), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9385, 9411), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (9398, 9411), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9421, 9436), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (9432, 9436), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11036, 11047), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (11044, 11047), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11073, 11091), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3]'], {}), '([3])\n', (11086, 11091), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11117, 11138), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4]'], {}), '([3, 4])\n', (11130, 11138), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11356, 11367), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (11364, 11367), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11391, 11415), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (11404, 11415), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11441, 11467), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5]]'], {}), '([[3, 4, 5]])\n', (11454, 11467), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11496, 11533), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5], [6, 7, 8]]'], {}), '([[3, 4, 5], [6, 7, 8]])\n', (11509, 11533), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11562, 11587), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3], [6]]'], {}), '([[3], [6]])\n', (11575, 11587), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11618, 11649), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4], [6, 7]]'], {}), '([[3, 4], [6, 7]])\n', (11631, 11649), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((13126, 13149), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (13147, 13149), True, 'import numpy as np\n'), ((2756, 2767), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (2764, 2767), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2769, 2780), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (2777, 2780), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2792, 2803), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (2800, 2803), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2805, 2826), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-1)'}), '(3, start=-1)\n', (2813, 2826), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2838, 2864), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (2851, 2864), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2866, 2890), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 8]'], {}), '([2, 2, 8])\n', (2879, 2890), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2902, 2916), 'gym.spaces.MultiBinary', 'MultiBinary', (['(8)'], {}), '(8)\n', (2913, 2916), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2918, 2932), 'gym.spaces.MultiBinary', 'MultiBinary', (['(7)'], {}), '(7)\n', (2929, 2932), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3145, 3185), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': '(0.0)', 'shape': '(2, 1)'}), '(low=-np.inf, high=0.0, shape=(2, 1))\n', (3148, 3185), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3199, 3238), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 1)'}), '(low=0.0, high=np.inf, shape=(2, 1))\n', (3202, 3238), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4823, 4834), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (4831, 4834), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4836, 4850), 'gym.spaces.MultiBinary', 'MultiBinary', (['(5)'], {}), '(5)\n', (4847, 4850), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4959, 4983), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 8]'], {}), '([2, 2, 8])\n', (4972, 4983), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5018, 5073), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(64, 64, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8)\n', (5021, 5073), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5087, 5142), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(32, 32, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8)\n', (5090, 5142), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5262, 5273), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5270, 5273), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5308, 5319), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5316, 5319), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5743, 5761), 'gym.spaces.Dict', 'Dict', ([], {'space1': '"""abc"""'}), "(space1='abc')\n", (5747, 5761), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5779, 5802), 'gym.spaces.Dict', 'Dict', (["{'space1': 'abc'}"], {}), "({'space1': 'abc'})\n", (5783, 5802), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5820, 5834), 'gym.spaces.Tuple', 'Tuple', (["['abc']"], {}), "(['abc'])\n", (5825, 5834), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6014, 6046), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(3, 3)'}), '(low=0, high=1, shape=(3, 3))\n', (6017, 6046), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6263, 6274), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (6271, 6274), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7408, 7421), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (7416, 7421), True, 'import numpy as np\n'), ((7453, 7464), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (7461, 7464), True, 'import numpy as np\n'), ((353, 364), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (361, 364), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((366, 378), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (374, 378), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((427, 438), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (435, 438), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((564, 575), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (572, 575), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((577, 588), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (585, 588), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((590, 601), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (598, 601), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((620, 631), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (628, 631), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((633, 653), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(6)'}), '(2, start=6)\n', (641, 653), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((655, 676), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(-4)'}), '(2, start=-4)\n', (663, 676), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((797, 808), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (805, 808), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1799, 1817), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (1807, 1817), True, 'import numpy as np\n'), ((1824, 1842), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (1832, 1842), True, 'import numpy as np\n'), ((1931, 1942), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (1939, 1942), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1944, 1956), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (1952, 1956), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2005, 2016), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2013, 2016), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2142, 2153), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2150, 2153), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2155, 2166), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2163, 2166), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2168, 2179), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2176, 2179), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2198, 2209), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2206, 2209), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2211, 2222), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2219, 2222), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2224, 2245), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(-6)'}), '(2, start=-6)\n', (2232, 2245), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2365, 2376), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2373, 2376), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7629, 7640), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7637, 7640), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7642, 7654), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (7650, 7654), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7703, 7714), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7711, 7714), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7840, 7851), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7848, 7851), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7853, 7864), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (7861, 7864), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7866, 7877), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (7874, 7877), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7998, 8009), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (8006, 8009), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9125, 9136), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9133, 9136), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9138, 9150), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (9146, 9150), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9199, 9210), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9207, 9210), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9336, 9347), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9344, 9347), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9349, 9360), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (9357, 9360), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9362, 9373), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (9370, 9373), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9494, 9505), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9502, 9505), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9976, 9989), 'gym.spaces.Discrete', 'Discrete', (['(100)'], {}), '(100)\n', (9984, 9989), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9991, 10004), 'gym.spaces.Discrete', 'Discrete', (['(100)'], {}), '(100)\n', (9999, 10004), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10023, 10034), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10031, 10034), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10036, 10048), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (10044, 10048), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10067, 10078), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10075, 10078), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10080, 10101), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(10)'}), '(5, start=10)\n', (10088, 10101), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10150, 10161), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10158, 10161), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10287, 10298), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10295, 10298), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10300, 10311), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (10308, 10311), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10313, 10324), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (10321, 10324), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10384, 10395), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10392, 10395), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2965, 2983), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (2973, 2983), True, 'import numpy as np\n'), ((2990, 3008), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (2998, 3008), True, 'import numpy as np\n'), ((3049, 3067), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (3057, 3067), True, 'import numpy as np\n'), ((3074, 3091), 'numpy.array', 'np.array', (['[10, 9]'], {}), '([10, 9])\n', (3082, 3091), True, 'import numpy as np\n'), ((3267, 3278), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3275, 3278), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3280, 3292), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3288, 3292), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3303, 3314), 'gym.spaces.Discrete', 'Discrete', (['(1)'], {}), '(1)\n', (3311, 3314), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3316, 3328), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3324, 3328), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3362, 3373), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3370, 3373), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3375, 3387), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3383, 3387), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3410, 3430), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(7)'}), '(5, start=7)\n', (3418, 3430), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3432, 3444), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3440, 3444), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3486, 3497), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3494, 3497), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3519, 3530), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (3527, 3530), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3562, 3573), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3570, 3573), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3592, 3603), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3600, 3603), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4883, 4901), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (4891, 4901), True, 'import numpy as np\n'), ((4908, 4926), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (4916, 4926), True, 'import numpy as np\n'), ((5182, 5193), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5190, 5193), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5204, 5215), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5212, 5215), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5247, 5258), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5255, 5258), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5292, 5303), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5300, 5303), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5352, 5376), 'numpy.array', 'np.array', (['[-np.inf, 0.0]'], {}), '([-np.inf, 0.0])\n', (5360, 5376), True, 'import numpy as np\n'), ((5383, 5406), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (5391, 5406), True, 'import numpy as np\n'), ((5429, 5453), 'numpy.array', 'np.array', (['[-np.inf, 1.0]'], {}), '([-np.inf, 1.0])\n', (5437, 5453), True, 'import numpy as np\n'), ((5460, 5483), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (5468, 5483), True, 'import numpy as np\n'), ((6116, 6151), 'gym.spaces.Box', 'Box', ([], {'low': '(-100)', 'high': '(100)', 'shape': '(2,)'}), '(low=-100, high=100, shape=(2,))\n', (6119, 6151), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6180, 6211), 'gym.spaces.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(2,)'}), '(low=-1, high=1, shape=(2,))\n', (6183, 6211), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((464, 480), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (472, 480), True, 'import numpy as np\n'), ((487, 503), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (495, 503), True, 'import numpy as np\n'), ((867, 883), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (875, 883), True, 'import numpy as np\n'), ((890, 906), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (898, 906), True, 'import numpy as np\n'), ((2042, 2058), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2050, 2058), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (2073, 2081), True, 'import numpy as np\n'), ((2435, 2451), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2443, 2451), True, 'import numpy as np\n'), ((2458, 2474), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (2466, 2474), True, 'import numpy as np\n'), ((7740, 7756), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (7748, 7756), True, 'import numpy as np\n'), ((7763, 7779), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (7771, 7779), True, 'import numpy as np\n'), ((8068, 8084), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8076, 8084), True, 'import numpy as np\n'), ((8091, 8107), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (8099, 8107), True, 'import numpy as np\n'), ((9236, 9252), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9244, 9252), True, 'import numpy as np\n'), ((9259, 9275), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (9267, 9275), True, 'import numpy as np\n'), ((9564, 9580), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9572, 9580), True, 'import numpy as np\n'), ((9587, 9603), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (9595, 9603), True, 'import numpy as np\n'), ((10187, 10203), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10195, 10203), True, 'import numpy as np\n'), ((10210, 10226), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (10218, 10226), True, 'import numpy as np\n'), ((10454, 10470), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10462, 10470), True, 'import numpy as np\n'), ((10477, 10493), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (10485, 10493), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from lib.crf import crf_inference
from lib.CC_labeling_8 import CC_lab
def single_generate_seed_step(params):
"""Implemented seeded region growing
Parameters
----------
params : 3-tuple of numpy 4D arrays
(tag) : numpy 4D array (size: B x 1 x 1 x C), where B = batch size, C = number of classes
GT label
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue
(prob) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Final feature map
Returns
-------
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue, after seeded region growing
"""
# th_f,th_b = 0.85,0.99
th_f, th_b = 0.5, 0.7
tag, cue, prob = params
existing_prob = prob * tag
existing_prob_argmax = np.argmax(existing_prob,
axis=2) + 1 # to tell the background pixel and the not-satisfy-condition pixel
tell_where_is_foreground_mask = (existing_prob_argmax > 1).astype(np.uint8)
existing_prob_fg_th_mask = (np.sum((existing_prob[:, :, 1:] > th_f).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8) # if there is one existing category's score is bigger than th_f, the the mask is 1 for this pixel
existing_prob_bg_th_mask = (np.sum((existing_prob[:, :, 0:1] > th_b).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8)
label_map = (existing_prob_fg_th_mask * tell_where_is_foreground_mask + existing_prob_bg_th_mask * (
1 - tell_where_is_foreground_mask)) * existing_prob_argmax
# the label map is a two-dimensional map to show which category satisify the following three conditions for each pixel
# 1. the category is in the tags of the image
# 2. the category has a max probs among the tags
# 3. the prob of the category is bigger that the threshold
# and those three conditions is the similarity criteria
# for the value in label_map, 0 is for no category satisifies the conditions, n is for the category n-1 satisifies the conditions
cls_index = np.where(tag > 0.5)[2] # the existing labels index
for c in cls_index:
mat = (label_map == (c + 1))
mat = mat.astype(int)
cclab = CC_lab(mat)
cclab.connectedComponentLabel() # this divide each connected region into a group, and update the value of cclab.labels which is a two-dimensional list to show the group index of each pixel
high_confidence_set_label = set() # this variable colloects the connected region index
for (x, y), value in np.ndenumerate(mat):
if value == 1 and cue[x, y, c] == 1:
high_confidence_set_label.add(cclab.labels[x][y])
elif value == 1 and np.sum(cue[x, y, :]) == 1:
cclab.labels[x][y] = -1
for (x, y), value in np.ndenumerate(np.array(cclab.labels)):
if value in high_confidence_set_label:
cue[x, y, c] = 1
return np.expand_dims(cue, axis=0)
class DSRG():
"""Class for the DSRG method"""
def __init__(self, config):
self.config = config
self.dataset = self.config.get('dataset')
self.h, self.w = (self.config.get('img_size'), self.config.get('img_size'))
self.num_classes = self.config.get('num_classes')
self.batch_size = self.config.get("batch_size")
self.phase = self.config.get('phase')
self.img_mean = self.config.get('img_mean')
self.seed_size = self.config.get('seed_size')
self.init_model_path = self.config.get('init_model_path', None)
self.crf_config_train = {"g_sxy":3/12,"g_compat":3,"bi_sxy":80/12,"bi_srgb":13,"bi_compat":10,"iterations":5}
self.crf_config_test = {"g_sxy":3,"g_compat":3,"bi_sxy":80,"bi_srgb":13,"bi_compat":10,"iterations":10}
self.net = {}
self.weights = {}
self.trainable_list = []
self.loss = {}
self.metric = {}
self.variables={"total":[]}
self.min_prob = 0.0001
self.stride = {}
self.stride["input"] = 1
# different lr for different variable
self.lr_1_list = []
self.lr_2_list = []
self.lr_10_list = []
self.lr_20_list = []
self.pool = self.config.get('pool')
def build(self,net_input=None,net_label=None,net_cues=None,net_id=None,phase='train'):
"""Build DSRG model
Parameters
----------
net_input : Tensor, optional
Input images in batch, after resizing and normalizing
net_label : Tensor, optional
GT segmentation in batch, after resizing
net_cues : Tensor, optional
Weak cue labels in batch, after resizing
net_id : Tensor, optional
Filenames in batch
phase : str, optional
Phase to run DSRG model
Returns
-------
(output) : Tensor
Final layer of FCN model of DSRG
"""
if "output" not in self.net:
if phase == 'train':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.5, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase in ['val', 'tuning', 'segtest', 'test']:
with tf.name_scope("placeholder"):
self.net["input"] = net_input
# self.net["label"] = net_label # [None, self.num_classes], int32
# self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase == 'debug':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
self.net["epoch"] = tf.Variable(0.0, trainable=False)
return self.net["output"]
def create_network(self, phase):
"""Helper function to build DSRG model
Parameters
----------
phase : str, optional
Phase to run DSRG model
Returns
-------
(crf) : Tensor
Final layer of FCN model of DSRG
"""
if self.init_model_path is not None:
self.load_init_model()
with tf.name_scope("vgg") as scope:
# build block
block = self.build_block("input",["conv1_1","relu1_1","conv1_2","relu1_2","pool1"])
block = self.build_block(block,["conv2_1","relu2_1","conv2_2","relu2_2","pool2"])
block = self.build_block(block,["conv3_1","relu3_1","conv3_2","relu3_2","conv3_3","relu3_3","pool3"])
block = self.build_block(block,["conv4_1","relu4_1","conv4_2","relu4_2","conv4_3","relu4_3","pool4"])
block = self.build_block(block,["conv5_1","relu5_1","conv5_2","relu5_2","conv5_3","relu5_3","pool5","pool5a"])
fc1 = self.build_fc(block,["fc6_1","relu6_1","drop6_1","fc7_1","relu7_1","drop7_1","fc8_1"], dilate_rate=6)
fc2 = self.build_fc(block,["fc6_2","relu6_2","drop6_2","fc7_2","relu7_2","drop7_2","fc8_2"], dilate_rate=12)
fc3 = self.build_fc(block,["fc6_3","relu6_3","drop6_3","fc7_3","relu7_3","drop7_3","fc8_3"], dilate_rate=18)
fc4 = self.build_fc(block,["fc6_4","relu6_4","drop6_4","fc7_4","relu7_4","drop7_4","fc8_4"], dilate_rate=24)
self.net["fc8"] = self.net[fc1]+self.net[fc2]+self.net[fc3]+self.net[fc4]
# DSRG
softmax = self.build_sp_softmax("fc8","fc8-softmax")
if phase in ['train', 'debug']:
new_seed = self.build_dsrg_layer("cues","fc8-softmax","new_cues")
crf = self.build_crf("fc8-softmax", "crf") # new
return self.net[crf] # NOTE: crf is log-probability
def build_block(self,last_layer,layer_lists):
"""Build a block of the DSRG model
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("conv"):
if layer[4] != "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer[4] == "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=2, padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.relu( self.net[last_layer],name="relu")
last_layer = layer
elif layer.startswith("pool5a"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.avg_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
elif layer.startswith("pool"):
if layer[4] not in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = 2 * self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,2,2,1],padding="SAME",name="pool")
last_layer = layer
if layer[4] in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
return last_layer
def build_fc(self,last_layer, layer_lists, dilate_rate=12):
"""Build a block of fully-connected layers
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
dilate_rate : int, optional
Dilation rate for atrous 2D convolutional layers
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("fc"):
with tf.name_scope(layer) as scope:
weights,bias = self.get_weights_and_bias(layer)
if layer.startswith("fc6"):
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=dilate_rate, padding="SAME", name="conv")
else:
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("batch_norm"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.contrib.layers.batch_norm(self.net[last_layer])
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.relu( self.net[last_layer])
last_layer = layer
if layer.startswith("drop"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.dropout( self.net[last_layer], keep_prob=1-self.net["drop_prob"])
last_layer = layer
return last_layer
def build_sp_softmax(self,last_layer,layer):
"""Build a block of a fully-connected layer and softmax
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer : str
Name of the softmax output layer
Returns
-------
layer : str
Name of the softmax output layer
"""
preds_max = tf.reduce_max(self.net[last_layer],axis=3,keepdims=True)
preds_exp = tf.exp(self.net[last_layer] - preds_max)
self.net[layer] = preds_exp / tf.reduce_sum(preds_exp,axis=3,keepdims=True) + self.min_prob
self.net[layer] = self.net[layer] / tf.reduce_sum(self.net[layer],axis=3,keepdims=True)
return layer
def build_crf(self,featmap_layer,layer):
"""Build a custom dense CRF layer
Parameters
----------
featemap_layer : str
Layer name of the feature map inputted to dense CRF layer
layer : str
Layer name of the dense CRF layer
Returns
-------
layer : str
Layer name of the dense CRF layer
"""
origin_image = self.net["input"] + self.img_mean
origin_image_zoomed = tf.image.resize_bilinear(origin_image,(self.seed_size, self.seed_size))
featemap = self.net[featmap_layer]
featemap_zoomed = tf.image.resize_bilinear(featemap,(self.seed_size, self.seed_size))
def crf(featemap,image):
batch_size = featemap.shape[0]
image = image.astype(np.uint8)
ret = np.zeros(featemap.shape,dtype=np.float32)
for i in range(batch_size):
ret[i,:,:,:] = crf_inference(image[i],self.crf_config_train,self.num_classes,featemap[i],use_log=True)
ret[ret < self.min_prob] = self.min_prob
ret /= np.sum(ret,axis=3,keepdims=True)
ret = np.log(ret)
return ret.astype(np.float32)
crf = tf.py_func(crf,[featemap_zoomed,origin_image_zoomed],tf.float32) # shape [N, h, w, C], RGB or BGR doesn't matter
self.net[layer] = crf
return layer
def build_dsrg_layer(self,seed_layer,prob_layer,layer):
"""Build DSRG layer
Parameters
----------
seed_layer : str
Layer name of the weak cues
prob_layer : str
Layer name of softmax
layer : str
Layer name of the DSRG layer
Returns
-------
layer : str
Layer name of the DSRG layer
"""
def generate_seed_step(tags,cues,probs):
tags = np.reshape(tags,[-1,1,1,self.num_classes])
params_list = []
for i in range(self.batch_size):
params_list.append([tags[i],cues[i],probs[i]])
ret = self.pool.map(single_generate_seed_step, params_list)
new_cues = ret[0]
for i in range(1,self.batch_size):
new_cues = np.concatenate([new_cues,ret[i]],axis=0)
return new_cues
self.net[layer] = tf.py_func(generate_seed_step,[self.net["label"],self.net[seed_layer],self.net[prob_layer]],tf.float32)
return layer
def load_init_model(self):
"""Load initialized layer"""
model_path = self.config["init_model_path"]
self.init_model = np.load(model_path, encoding="latin1", allow_pickle=True).item()
def get_weights_and_bias(self,layer,shape=None):
"""Load saved weights and biases for saved network
Parameters
----------
layer : str
Name of current layer
shape : list of int (size: 4), optional
4D shape of the convolutional or fully-connected layer
Returns
-------
weights : Variable
Saved weights
bias : Variable
Saved biases
"""
if layer in self.weights:
return self.weights[layer]
if shape is not None:
pass
elif layer.startswith("conv"):
shape = [3,3,0,0]
if layer == "conv1_1":
shape[2] = 3
else:
shape[2] = 64 * self.stride[layer]
if shape[2] > 512: shape[2] = 512
if layer in ["conv2_1","conv3_1","conv4_1"]: shape[2] = int(shape[2]/2)
shape[3] = 64 * self.stride[layer]
if shape[3] > 512: shape[3] = 512
elif layer.startswith("fc"):
if layer.startswith("fc6"):
shape = [3,3,512,1024]
if layer.startswith("fc7"):
shape = [1,1,1024,1024]
if layer.startswith("fc8"):
shape = [1,1,1024,self.num_classes]
if self.init_model_path is None:
init = tf.random_normal_initializer(stddev=0.01)
weights = tf.get_variable(name="%s_weights" % layer,initializer=init, shape = shape)
init = tf.constant_initializer(0)
bias = tf.get_variable(name="%s_bias" % layer,initializer=init, shape = [shape[-1]])
else:
if layer.startswith("fc8"):
init = tf.contrib.layers.xavier_initializer(uniform=True)
else:
init = tf.constant_initializer(self.init_model[layer]["w"])
weights = tf.get_variable(name="%s_weights" % layer,initializer=init,shape = shape)
if layer.startswith("fc8"):
init = tf.constant_initializer(0)
else:
init = tf.constant_initializer(self.init_model[layer]["b"])
bias = tf.get_variable(name="%s_bias" % layer,initializer=init,shape = [shape[-1]])
self.weights[layer] = (weights,bias)
if layer.startswith("fc8"):
self.lr_10_list.append(weights)
self.lr_20_list.append(bias)
else:
self.lr_1_list.append(weights)
self.lr_2_list.append(bias)
self.trainable_list.append(weights)
self.trainable_list.append(bias)
self.variables["total"].append(weights)
self.variables["total"].append(bias)
return weights,bias
def pred(self):
"""Implement final segmentation prediction as argmax of final feature map"""
if self.h is not None:
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], (self.h, self.w))
else:
label_size = tf.py_func(lambda x: x.shape[1:3], [self.net["input"]], [tf.int64, tf.int64])
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], [tf.cast(label_size[0], tf.int32),
tf.cast(label_size[1],
tf.int32)])
self.net["pred"] = tf.argmax(self.net["rescale_output"], axis=3)
def getloss(self):
"""Construct overall loss function
Returns
-------
loss : Tensor
Output of overall loss function
"""
loss = 0
# for DSRG
seed_loss = self.get_balanced_seed_loss(self.net["fc8-softmax"],self.net["new_cues"])
constrain_loss = self.get_constrain_loss(self.net["fc8-softmax"],self.net["crf"])
self.loss["seed"] = seed_loss
self.loss["constrain"] = constrain_loss
loss += seed_loss + constrain_loss
return loss
def get_balanced_seed_loss(self,softmax,cues):
"""Balanced seeding loss function
Parameters
----------
softmax : Tensor
Final feature map
cues : Tensor
Weak cues
Returns
-------
(loss) : Tensor
Output of balanced seeding loss function (sum of foreground/background losses)
"""
count_bg = tf.reduce_sum(cues[:,:,:,0:1],axis=(1,2,3),keepdims=True)
loss_bg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,0:1]*tf.log(softmax[:,:,:,0:1]),axis=(1,2,3),keepdims=True)/(count_bg+1e-8))
count_fg = tf.reduce_sum(cues[:,:,:,1:],axis=(1,2,3),keepdims=True)
loss_fg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,1:]*tf.log(softmax[:,:,:,1:]),axis=(1,2,3),keepdims=True)/(count_fg+1e-8))
return loss_bg+loss_fg
def get_constrain_loss(self,softmax,crf):
"""Constrain loss function
Parameters
----------
softmax : Tensor
Final feature map
crf : Tensor
Output of dense CRF
Returns
-------
loss : Tensor
Output of constrain loss function
"""
probs_smooth = tf.exp(crf)
loss = tf.reduce_mean(tf.reduce_sum(probs_smooth * tf.log(probs_smooth/(softmax+1e-8)+1e-8), axis=3))
return loss
|
[
"tensorflow.contrib.layers.xavier_initializer",
"numpy.load",
"tensorflow.reduce_sum",
"numpy.sum",
"numpy.argmax",
"tensorflow.constant_initializer",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.reduce_max",
"lib.CC_labeling_8.CC_lab",
"tensorflow.get_variable",
"tensorflow.nn.relu",
"lib.crf.crf_inference",
"tensorflow.nn.atrous_conv2d",
"tensorflow.cast",
"tensorflow.exp",
"numpy.reshape",
"tensorflow.name_scope",
"tensorflow.nn.bias_add",
"numpy.ndenumerate",
"tensorflow.nn.max_pool",
"tensorflow.random_normal_initializer",
"tensorflow.log",
"numpy.concatenate",
"numpy.log",
"tensorflow.py_func",
"tensorflow.argmax",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.contrib.layers.batch_norm",
"numpy.where",
"numpy.array",
"tensorflow.nn.avg_pool",
"tensorflow.nn.dropout",
"tensorflow.image.resize_bilinear"
] |
[((3106, 3133), 'numpy.expand_dims', 'np.expand_dims', (['cue'], {'axis': '(0)'}), '(cue, axis=0)\n', (3120, 3133), True, 'import numpy as np\n'), ((951, 983), 'numpy.argmax', 'np.argmax', (['existing_prob'], {'axis': '(2)'}), '(existing_prob, axis=2)\n', (960, 983), True, 'import numpy as np\n'), ((2213, 2232), 'numpy.where', 'np.where', (['(tag > 0.5)'], {}), '(tag > 0.5)\n', (2221, 2232), True, 'import numpy as np\n'), ((2372, 2383), 'lib.CC_labeling_8.CC_lab', 'CC_lab', (['mat'], {}), '(mat)\n', (2378, 2383), False, 'from lib.CC_labeling_8 import CC_lab\n'), ((2707, 2726), 'numpy.ndenumerate', 'np.ndenumerate', (['mat'], {}), '(mat)\n', (2721, 2726), True, 'import numpy as np\n'), ((6767, 6800), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6778, 6800), True, 'import tensorflow as tf\n'), ((14069, 14127), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.net[last_layer]'], {'axis': '(3)', 'keepdims': '(True)'}), '(self.net[last_layer], axis=3, keepdims=True)\n', (14082, 14127), True, 'import tensorflow as tf\n'), ((14146, 14186), 'tensorflow.exp', 'tf.exp', (['(self.net[last_layer] - preds_max)'], {}), '(self.net[last_layer] - preds_max)\n', (14152, 14186), True, 'import tensorflow as tf\n'), ((14894, 14966), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['origin_image', '(self.seed_size, self.seed_size)'], {}), '(origin_image, (self.seed_size, self.seed_size))\n', (14918, 14966), True, 'import tensorflow as tf\n'), ((15035, 15103), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['featemap', '(self.seed_size, self.seed_size)'], {}), '(featemap, (self.seed_size, self.seed_size))\n', (15059, 15103), True, 'import tensorflow as tf\n'), ((15646, 15713), 'tensorflow.py_func', 'tf.py_func', (['crf', '[featemap_zoomed, origin_image_zoomed]', 'tf.float32'], {}), '(crf, [featemap_zoomed, origin_image_zoomed], tf.float32)\n', (15656, 15713), True, 'import tensorflow as tf\n'), ((16766, 16877), 'tensorflow.py_func', 'tf.py_func', (['generate_seed_step', "[self.net['label'], self.net[seed_layer], self.net[prob_layer]]", 'tf.float32'], {}), "(generate_seed_step, [self.net['label'], self.net[seed_layer],\n self.net[prob_layer]], tf.float32)\n", (16776, 16877), True, 'import tensorflow as tf\n'), ((20546, 20591), 'tensorflow.argmax', 'tf.argmax', (["self.net['rescale_output']"], {'axis': '(3)'}), "(self.net['rescale_output'], axis=3)\n", (20555, 20591), True, 'import tensorflow as tf\n'), ((21561, 21625), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cues[:, :, :, 0:1]'], {'axis': '(1, 2, 3)', 'keepdims': '(True)'}), '(cues[:, :, :, 0:1], axis=(1, 2, 3), keepdims=True)\n', (21574, 21625), True, 'import tensorflow as tf\n'), ((21775, 21838), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cues[:, :, :, 1:]'], {'axis': '(1, 2, 3)', 'keepdims': '(True)'}), '(cues[:, :, :, 1:], axis=(1, 2, 3), keepdims=True)\n', (21788, 21838), True, 'import tensorflow as tf\n'), ((22362, 22373), 'tensorflow.exp', 'tf.exp', (['crf'], {}), '(crf)\n', (22368, 22373), True, 'import tensorflow as tf\n'), ((2986, 3008), 'numpy.array', 'np.array', (['cclab.labels'], {}), '(cclab.labels)\n', (2994, 3008), True, 'import numpy as np\n'), ((7231, 7251), 'tensorflow.name_scope', 'tf.name_scope', (['"""vgg"""'], {}), "('vgg')\n", (7244, 7251), True, 'import tensorflow as tf\n'), ((14331, 14384), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.net[layer]'], {'axis': '(3)', 'keepdims': '(True)'}), '(self.net[layer], axis=3, keepdims=True)\n', (14344, 14384), True, 'import tensorflow as tf\n'), ((15245, 15287), 'numpy.zeros', 'np.zeros', (['featemap.shape'], {'dtype': 'np.float32'}), '(featemap.shape, dtype=np.float32)\n', (15253, 15287), True, 'import numpy as np\n'), ((15518, 15552), 'numpy.sum', 'np.sum', (['ret'], {'axis': '(3)', 'keepdims': '(True)'}), '(ret, axis=3, keepdims=True)\n', (15524, 15552), True, 'import numpy as np\n'), ((15569, 15580), 'numpy.log', 'np.log', (['ret'], {}), '(ret)\n', (15575, 15580), True, 'import numpy as np\n'), ((16298, 16344), 'numpy.reshape', 'np.reshape', (['tags', '[-1, 1, 1, self.num_classes]'], {}), '(tags, [-1, 1, 1, self.num_classes])\n', (16308, 16344), True, 'import numpy as np\n'), ((18472, 18513), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (18500, 18513), True, 'import tensorflow as tf\n'), ((18536, 18609), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_weights' % layer)", 'initializer': 'init', 'shape': 'shape'}), "(name='%s_weights' % layer, initializer=init, shape=shape)\n", (18551, 18609), True, 'import tensorflow as tf\n'), ((18630, 18656), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (18653, 18656), True, 'import tensorflow as tf\n'), ((18676, 18752), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_bias' % layer)", 'initializer': 'init', 'shape': '[shape[-1]]'}), "(name='%s_bias' % layer, initializer=init, shape=[shape[-1]])\n", (18691, 18752), True, 'import tensorflow as tf\n'), ((18998, 19071), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_weights' % layer)", 'initializer': 'init', 'shape': 'shape'}), "(name='%s_weights' % layer, initializer=init, shape=shape)\n", (19013, 19071), True, 'import tensorflow as tf\n'), ((19275, 19351), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_bias' % layer)", 'initializer': 'init', 'shape': '[shape[-1]]'}), "(name='%s_bias' % layer, initializer=init, shape=[shape[-1]])\n", (19290, 19351), True, 'import tensorflow as tf\n'), ((20000, 20062), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (["self.net['output']", '(self.h, self.w)'], {}), "(self.net['output'], (self.h, self.w))\n", (20024, 20062), True, 'import tensorflow as tf\n'), ((20102, 20179), 'tensorflow.py_func', 'tf.py_func', (['(lambda x: x.shape[1:3])', "[self.net['input']]", '[tf.int64, tf.int64]'], {}), "(lambda x: x.shape[1:3], [self.net['input']], [tf.int64, tf.int64])\n", (20112, 20179), True, 'import tensorflow as tf\n'), ((14225, 14272), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['preds_exp'], {'axis': '(3)', 'keepdims': '(True)'}), '(preds_exp, axis=3, keepdims=True)\n', (14238, 14272), True, 'import tensorflow as tf\n'), ((15358, 15454), 'lib.crf.crf_inference', 'crf_inference', (['image[i]', 'self.crf_config_train', 'self.num_classes', 'featemap[i]'], {'use_log': '(True)'}), '(image[i], self.crf_config_train, self.num_classes, featemap[i\n ], use_log=True)\n', (15371, 15454), False, 'from lib.crf import crf_inference\n'), ((16669, 16711), 'numpy.concatenate', 'np.concatenate', (['[new_cues, ret[i]]'], {'axis': '(0)'}), '([new_cues, ret[i]], axis=0)\n', (16683, 16711), True, 'import numpy as np\n'), ((17038, 17095), 'numpy.load', 'np.load', (['model_path'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(model_path, encoding='latin1', allow_pickle=True)\n", (17045, 17095), True, 'import numpy as np\n'), ((18831, 18881), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(True)'}), '(uniform=True)\n', (18867, 18881), True, 'import tensorflow as tf\n'), ((18923, 18975), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["self.init_model[layer]['w']"], {}), "(self.init_model[layer]['w'])\n", (18946, 18975), True, 'import tensorflow as tf\n'), ((19135, 19161), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (19158, 19161), True, 'import tensorflow as tf\n'), ((19203, 19255), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["self.init_model[layer]['b']"], {}), "(self.init_model[layer]['b'])\n", (19226, 19255), True, 'import tensorflow as tf\n'), ((5191, 5219), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (5204, 5219), True, 'import tensorflow as tf\n'), ((5524, 5557), 'tensorflow.Variable', 'tf.Variable', (['(0.5)'], {'trainable': '(False)'}), '(0.5, trainable=False)\n', (5535, 5557), True, 'import tensorflow as tf\n'), ((10339, 10359), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (10352, 10359), True, 'import tensorflow as tf\n'), ((10473, 10518), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.net[last_layer]'], {'name': '"""relu"""'}), "(self.net[last_layer], name='relu')\n", (10483, 10518), True, 'import tensorflow as tf\n'), ((12351, 12371), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (12364, 12371), True, 'import tensorflow as tf\n'), ((12840, 12890), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (12854, 12890), True, 'import tensorflow as tf\n'), ((12999, 13019), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13012, 13019), True, 'import tensorflow as tf\n'), ((13068, 13118), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['self.net[last_layer]'], {}), '(self.net[last_layer])\n', (13096, 13118), True, 'import tensorflow as tf\n'), ((13220, 13240), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13233, 13240), True, 'import tensorflow as tf\n'), ((13289, 13321), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.net[last_layer]'], {}), '(self.net[last_layer])\n', (13299, 13321), True, 'import tensorflow as tf\n'), ((13424, 13444), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13437, 13444), True, 'import tensorflow as tf\n'), ((13493, 13565), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.net[last_layer]'], {'keep_prob': "(1 - self.net['drop_prob'])"}), "(self.net[last_layer], keep_prob=1 - self.net['drop_prob'])\n", (13506, 13565), True, 'import tensorflow as tf\n'), ((20267, 20299), 'tensorflow.cast', 'tf.cast', (['label_size[0]', 'tf.int32'], {}), '(label_size[0], tf.int32)\n', (20274, 20299), True, 'import tensorflow as tf\n'), ((20388, 20420), 'tensorflow.cast', 'tf.cast', (['label_size[1]', 'tf.int32'], {}), '(label_size[1], tf.int32)\n', (20395, 20420), True, 'import tensorflow as tf\n'), ((22433, 22481), 'tensorflow.log', 'tf.log', (['(probs_smooth / (softmax + 1e-08) + 1e-08)'], {}), '(probs_smooth / (softmax + 1e-08) + 1e-08)\n', (22439, 22481), True, 'import tensorflow as tf\n'), ((2875, 2895), 'numpy.sum', 'np.sum', (['cue[x, y, :]'], {}), '(cue[x, y, :])\n', (2881, 2895), True, 'import numpy as np\n'), ((5743, 5771), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (5756, 5771), True, 'import tensorflow as tf\n'), ((6080, 6113), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6091, 6113), True, 'import tensorflow as tf\n'), ((9330, 9350), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (9343, 9350), True, 'import tensorflow as tf\n'), ((9544, 9643), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.net[last_layer]', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='conv')\n", (9556, 9643), True, 'import tensorflow as tf\n'), ((9681, 9731), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (9695, 9731), True, 'import tensorflow as tf\n'), ((9837, 9857), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (9850, 9857), True, 'import tensorflow as tf\n'), ((10051, 10142), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['self.net[last_layer]', 'weights'], {'rate': '(2)', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, rate=2, padding='SAME',\n name='conv')\n", (10070, 10142), True, 'import tensorflow as tf\n'), ((10182, 10232), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (10196, 10232), True, 'import tensorflow as tf\n'), ((10624, 10644), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (10637, 10644), True, 'import tensorflow as tf\n'), ((10758, 10870), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 1, 1, \n 1], padding='SAME', name='pool')\n", (10772, 10870), True, 'import tensorflow as tf\n'), ((12540, 12641), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['self.net[last_layer]', 'weights'], {'rate': 'dilate_rate', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, rate=dilate_rate,\n padding='SAME', name='conv')\n", (12559, 12641), True, 'import tensorflow as tf\n'), ((12707, 12806), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.net[last_layer]', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='conv')\n", (12719, 12806), True, 'import tensorflow as tf\n'), ((6270, 6298), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (6283, 6298), True, 'import tensorflow as tf\n'), ((6605, 6638), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6616, 6638), True, 'import tensorflow as tf\n'), ((21683, 21712), 'tensorflow.log', 'tf.log', (['softmax[:, :, :, 0:1]'], {}), '(softmax[:, :, :, 0:1])\n', (21689, 21712), True, 'import tensorflow as tf\n'), ((21895, 21923), 'tensorflow.log', 'tf.log', (['softmax[:, :, :, 1:]'], {}), '(softmax[:, :, :, 1:])\n', (21901, 21923), True, 'import tensorflow as tf\n'), ((11012, 11032), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (11025, 11032), True, 'import tensorflow as tf\n'), ((11158, 11270), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 2, 2, \n 1], padding='SAME', name='pool')\n", (11172, 11270), True, 'import tensorflow as tf\n'), ((11369, 11389), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (11382, 11389), True, 'import tensorflow as tf\n'), ((11511, 11623), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 1, 1, \n 1], padding='SAME', name='pool')\n", (11525, 11623), True, 'import tensorflow as tf\n')]
|
import os
import ndjson
import json
import time
from options import TestOptions
from framework import SketchModel
from utils import load_data
from writer import Writer
import numpy as np
from evalTool import *
def run_eval(opt=None, model=None, loader=None, dataset='test', write_result=False):
if opt is None:
opt = TestOptions().parse()
if model is None:
model = SketchModel(opt)
if loader is None:
loader = load_data(opt, datasetType=dataset, permutation=opt.permutation)
# print(len(loader))
if opt.eval_way == 'align':
predictList, lossList = eval_align_batchN(model, loader, P=opt.points_num)
elif opt.eval_way == 'unalign':
predictList, lossList = eval_unalign_batch1(model, loader)
else:
raise NotImplementedError('eval_way {} not implemented!'.format(opt.eval_way))
# print(predictList.shape)
testData = []
with open(os.path.join('data', opt.dataset, 'train',
'{}_{}.ndjson'.format(opt.class_name, dataset)), 'r') as f:
testData = ndjson.load(f)
if opt.metric_way == 'wlen':
p_metric_list, c_metric_list = eval_with_len(testData, predictList)
elif opt.metric_way == 'wolen':
p_metric_list, c_metric_list = eval_without_len(testData, predictList)
else:
raise NotImplementedError('metric_way {} not implemented!'.format(opt.metric_way))
if write_result:
testData = get_eval_result(testData, predictList)
result_path = os.path.join('data', opt.dataset, 'train', '{}_{}.ndjson'.format(opt.class_name, 'res'))
with open(result_path, 'w') as f:
ndjson.dump(testData, f)
loss_avg = np.average(lossList)
P_metric = np.average(p_metric_list)
C_metric = np.average(c_metric_list)
# print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
return loss_avg, P_metric, C_metric
if __name__ == "__main__":
_, P_metric, C_metric = run_eval(write_result=True)
print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
|
[
"utils.load_data",
"numpy.average",
"ndjson.load",
"options.TestOptions",
"ndjson.dump",
"framework.SketchModel"
] |
[((1682, 1702), 'numpy.average', 'np.average', (['lossList'], {}), '(lossList)\n', (1692, 1702), True, 'import numpy as np\n'), ((1718, 1743), 'numpy.average', 'np.average', (['p_metric_list'], {}), '(p_metric_list)\n', (1728, 1743), True, 'import numpy as np\n'), ((1759, 1784), 'numpy.average', 'np.average', (['c_metric_list'], {}), '(c_metric_list)\n', (1769, 1784), True, 'import numpy as np\n'), ((390, 406), 'framework.SketchModel', 'SketchModel', (['opt'], {}), '(opt)\n', (401, 406), False, 'from framework import SketchModel\n'), ((447, 511), 'utils.load_data', 'load_data', (['opt'], {'datasetType': 'dataset', 'permutation': 'opt.permutation'}), '(opt, datasetType=dataset, permutation=opt.permutation)\n', (456, 511), False, 'from utils import load_data\n'), ((1051, 1065), 'ndjson.load', 'ndjson.load', (['f'], {}), '(f)\n', (1062, 1065), False, 'import ndjson\n'), ((1637, 1661), 'ndjson.dump', 'ndjson.dump', (['testData', 'f'], {}), '(testData, f)\n', (1648, 1661), False, 'import ndjson\n'), ((330, 343), 'options.TestOptions', 'TestOptions', ([], {}), '()\n', (341, 343), False, 'from options import TestOptions\n')]
|
"""
Author: <NAME> (<EMAIL>)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
a = anchor_feature.detach().cpu().numpy()
b = contrast_feature.T.detach().cpu().numpy()
c = anchor_dot_contrast.detach().cpu().numpy()
d = np.matmul(a, b)
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
def testNan(self, x):
x = x.detach().cpu().numpy()
return np.isnan(x).any()
# CLOCS 中用于对比学习的loss
def obtain_contrastive_loss(latent_embeddings, pids, trial):
""" Calculate NCE Loss For Latent Embeddings in Batch
Args:
latent_embeddings (torch.Tensor): embeddings from model for different perturbations of same instance (BxHxN)
pids (list): patient ids of instances in batch
Outputs:
loss (torch.Tensor): scalar NCE loss
"""
if trial in ['CMSC', 'CMLC', 'CMSMLC']:
pids = np.array(pids, dtype=np.object)
pid1, pid2 = np.meshgrid(pids, pids)
pid_matrix = pid1 + '-' + pid2
pids_of_interest = np.unique(pids + '-' + pids) # unique combinations of pids of interest i.e. matching
bool_matrix_of_interest = np.zeros((len(pids), len(pids)))
for pid in pids_of_interest:
bool_matrix_of_interest += pid_matrix == pid
rows1, cols1 = np.where(np.triu(bool_matrix_of_interest, 1))
rows2, cols2 = np.where(np.tril(bool_matrix_of_interest, -1))
nviews = set(range(latent_embeddings.shape[2]))
view_combinations = combinations(nviews, 2)
loss = 0
ncombinations = 0
loss_terms = 2
# 如果报错误 UnboundLocalError: local variable 'loss_terms' referenced before assignment
# 那就重启PyCharm吧!
for combination in view_combinations:
view1_array = latent_embeddings[:, :, combination[0]] # (BxH)
view2_array = latent_embeddings[:, :, combination[1]] # (BxH)
norm1_vector = view1_array.norm(dim=1).unsqueeze(0)
norm2_vector = view2_array.norm(dim=1).unsqueeze(0)
sim_matrix = torch.mm(view1_array, view2_array.transpose(0, 1))
norm_matrix = torch.mm(norm1_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = sim_matrix / (norm_matrix * temperature)
sim_matrix_exp = torch.exp(argument)
if trial == 'CMC':
""" Obtain Off Diagonal Entries """
# upper_triangle = torch.triu(sim_matrix_exp,1)
# lower_triangle = torch.tril(sim_matrix_exp,-1)
# off_diagonals = upper_triangle + lower_triangle
diagonals = torch.diag(sim_matrix_exp)
""" Obtain Loss Terms(s) """
loss_term1 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 1)))
loss_term2 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 0)))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial == 'SimCLR':
self_sim_matrix1 = torch.mm(view1_array, view1_array.transpose(0, 1))
self_norm_matrix1 = torch.mm(norm1_vector.transpose(0, 1), norm1_vector)
temperature = 0.1
argument = self_sim_matrix1 / (self_norm_matrix1 * temperature)
self_sim_matrix_exp1 = torch.exp(argument)
self_sim_matrix_off_diagonals1 = torch.triu(self_sim_matrix_exp1, 1) + torch.tril(self_sim_matrix_exp1, -1)
self_sim_matrix2 = torch.mm(view2_array, view2_array.transpose(0, 1))
self_norm_matrix2 = torch.mm(norm2_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = self_sim_matrix2 / (self_norm_matrix2 * temperature)
self_sim_matrix_exp2 = torch.exp(argument)
self_sim_matrix_off_diagonals2 = torch.triu(self_sim_matrix_exp2, 1) + torch.tril(self_sim_matrix_exp2, -1)
denominator_loss1 = torch.sum(sim_matrix_exp, 1) + torch.sum(self_sim_matrix_off_diagonals1, 1)
denominator_loss2 = torch.sum(sim_matrix_exp, 0) + torch.sum(self_sim_matrix_off_diagonals2, 0)
diagonals = torch.diag(sim_matrix_exp)
loss_term1 = -torch.mean(torch.log(diagonals / denominator_loss1))
loss_term2 = -torch.mean(torch.log(diagonals / denominator_loss2))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial in ['CMSC', 'CMLC', 'CMSMLC']: # ours #CMSMLC = positive examples are same instance and same patient
triu_elements = sim_matrix_exp[rows1, cols1]
tril_elements = sim_matrix_exp[rows2, cols2]
diag_elements = torch.diag(sim_matrix_exp)
triu_sum = torch.sum(sim_matrix_exp, 1)
tril_sum = torch.sum(sim_matrix_exp, 0)
loss_diag1 = -torch.mean(torch.log(diag_elements / triu_sum))
loss_diag2 = -torch.mean(torch.log(diag_elements / tril_sum))
loss_triu = -torch.mean(torch.log(triu_elements / triu_sum[rows1]))
loss_tril = -torch.mean(torch.log(tril_elements / tril_sum[cols2]))
loss = loss_diag1 + loss_diag2
loss_terms = 2
if len(rows1) > 0:
loss += loss_triu # technically need to add 1 more term for symmetry
loss_terms += 1
if len(rows2) > 0:
loss += loss_tril # technically need to add 1 more term for symmetry
loss_terms += 1
# print(loss,loss_triu,loss_tril)
ncombinations += 1
loss = loss / (loss_terms * ncombinations)
return loss
|
[
"numpy.triu",
"torch.eye",
"numpy.isnan",
"torch.arange",
"torch.device",
"numpy.unique",
"numpy.meshgrid",
"torch.diag",
"torch.exp",
"torch.triu",
"torch.unbind",
"torch.matmul",
"torch.log",
"itertools.combinations",
"torch.max",
"torch.sum",
"torch.ones_like",
"torch.eq",
"numpy.tril",
"numpy.array",
"torch.tril",
"numpy.matmul"
] |
[((5136, 5159), 'itertools.combinations', 'combinations', (['nviews', '(2)'], {}), '(nviews, 2)\n', (5148, 5159), False, 'from itertools import combinations\n'), ((2896, 2947), 'torch.max', 'torch.max', (['anchor_dot_contrast'], {'dim': '(1)', 'keepdim': '(True)'}), '(anchor_dot_contrast, dim=1, keepdim=True)\n', (2905, 2947), False, 'import torch\n'), ((3180, 3195), 'numpy.matmul', 'np.matmul', (['a', 'b'], {}), '(a, b)\n', (3189, 3195), True, 'import numpy as np\n'), ((4530, 4561), 'numpy.array', 'np.array', (['pids'], {'dtype': 'np.object'}), '(pids, dtype=np.object)\n', (4538, 4561), True, 'import numpy as np\n'), ((4583, 4606), 'numpy.meshgrid', 'np.meshgrid', (['pids', 'pids'], {}), '(pids, pids)\n', (4594, 4606), True, 'import numpy as np\n'), ((4673, 4701), 'numpy.unique', 'np.unique', (["(pids + '-' + pids)"], {}), "(pids + '-' + pids)\n", (4682, 4701), True, 'import numpy as np\n'), ((5884, 5903), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (5893, 5903), False, 'import torch\n'), ((1213, 1233), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1225, 1233), False, 'import torch\n'), ((1295, 1314), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1307, 1314), False, 'import torch\n'), ((2308, 2337), 'torch.unbind', 'torch.unbind', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (2320, 2337), False, 'import torch\n'), ((2758, 2806), 'torch.matmul', 'torch.matmul', (['anchor_feature', 'contrast_feature.T'], {}), '(anchor_feature, contrast_feature.T)\n', (2770, 2806), False, 'import torch\n'), ((3363, 3384), 'torch.ones_like', 'torch.ones_like', (['mask'], {}), '(mask)\n', (3378, 3384), False, 'import torch\n'), ((3584, 3601), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (3593, 3601), False, 'import torch\n'), ((4952, 4987), 'numpy.triu', 'np.triu', (['bool_matrix_of_interest', '(1)'], {}), '(bool_matrix_of_interest, 1)\n', (4959, 4987), True, 'import numpy as np\n'), ((5021, 5057), 'numpy.tril', 'np.tril', (['bool_matrix_of_interest', '(-1)'], {}), '(bool_matrix_of_interest, -1)\n', (5028, 5057), True, 'import numpy as np\n'), ((6187, 6213), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (6197, 6213), False, 'import torch\n'), ((4064, 4075), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4072, 4075), True, 'import numpy as np\n'), ((6846, 6865), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (6855, 6865), False, 'import torch\n'), ((7295, 7314), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (7304, 7314), False, 'import torch\n'), ((7677, 7703), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (7687, 7703), False, 'import torch\n'), ((6911, 6946), 'torch.triu', 'torch.triu', (['self_sim_matrix_exp1', '(1)'], {}), '(self_sim_matrix_exp1, 1)\n', (6921, 6946), False, 'import torch\n'), ((6949, 6985), 'torch.tril', 'torch.tril', (['self_sim_matrix_exp1', '(-1)'], {}), '(self_sim_matrix_exp1, -1)\n', (6959, 6985), False, 'import torch\n'), ((7360, 7395), 'torch.triu', 'torch.triu', (['self_sim_matrix_exp2', '(1)'], {}), '(self_sim_matrix_exp2, 1)\n', (7370, 7395), False, 'import torch\n'), ((7398, 7434), 'torch.tril', 'torch.tril', (['self_sim_matrix_exp2', '(-1)'], {}), '(self_sim_matrix_exp2, -1)\n', (7408, 7434), False, 'import torch\n'), ((7468, 7496), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (7477, 7496), False, 'import torch\n'), ((7499, 7543), 'torch.sum', 'torch.sum', (['self_sim_matrix_off_diagonals1', '(1)'], {}), '(self_sim_matrix_off_diagonals1, 1)\n', (7508, 7543), False, 'import torch\n'), ((7576, 7604), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (7585, 7604), False, 'import torch\n'), ((7607, 7651), 'torch.sum', 'torch.sum', (['self_sim_matrix_off_diagonals2', '(0)'], {}), '(self_sim_matrix_off_diagonals2, 0)\n', (7616, 7651), False, 'import torch\n'), ((8196, 8222), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (8206, 8222), False, 'import torch\n'), ((8247, 8275), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (8256, 8275), False, 'import torch\n'), ((8299, 8327), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (8308, 8327), False, 'import torch\n'), ((1838, 1880), 'torch.eye', 'torch.eye', (['batch_size'], {'dtype': 'torch.float32'}), '(batch_size, dtype=torch.float32)\n', (1847, 1880), False, 'import torch\n'), ((7741, 7781), 'torch.log', 'torch.log', (['(diagonals / denominator_loss1)'], {}), '(diagonals / denominator_loss1)\n', (7750, 7781), False, 'import torch\n'), ((7820, 7860), 'torch.log', 'torch.log', (['(diagonals / denominator_loss2)'], {}), '(diagonals / denominator_loss2)\n', (7829, 7860), False, 'import torch\n'), ((3413, 3452), 'torch.arange', 'torch.arange', (['(batch_size * anchor_count)'], {}), '(batch_size * anchor_count)\n', (3425, 3452), False, 'import torch\n'), ((6314, 6342), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (6323, 6342), False, 'import torch\n'), ((6404, 6432), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (6413, 6432), False, 'import torch\n'), ((8366, 8401), 'torch.log', 'torch.log', (['(diag_elements / triu_sum)'], {}), '(diag_elements / triu_sum)\n', (8375, 8401), False, 'import torch\n'), ((8440, 8475), 'torch.log', 'torch.log', (['(diag_elements / tril_sum)'], {}), '(diag_elements / tril_sum)\n', (8449, 8475), False, 'import torch\n'), ((8514, 8556), 'torch.log', 'torch.log', (['(triu_elements / triu_sum[rows1])'], {}), '(triu_elements / triu_sum[rows1])\n', (8523, 8556), False, 'import torch\n'), ((8594, 8636), 'torch.log', 'torch.log', (['(tril_elements / tril_sum[cols2])'], {}), '(tril_elements / tril_sum[cols2])\n', (8603, 8636), False, 'import torch\n'), ((2124, 2150), 'torch.eq', 'torch.eq', (['labels', 'labels.T'], {}), '(labels, labels.T)\n', (2132, 2150), False, 'import torch\n')]
|
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import time
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = (image_numpy - np.min(image_numpy)) / (np.max(image_numpy) - np.min(image_numpy))
image_numpy = image_numpy * 2 - 1
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = np.clip(image_numpy, 0.0, 255.0)
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class Timer(object):
def __init__(self, name=None, acc=False, avg=False):
self.name = name
self.acc = acc
self.avg = avg
self.total = 0.0
self.iters = 0
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
self.tstart = time.time()
def stop(self):
self.iters += 1
self.total += time.time() - self.tstart
if not self.acc:
self.reset()
def reset(self):
name_string = ''
if self.name:
name_string = '[' + self.name + '] '
value = self.total
msg = 'Elapsed'
if self.avg:
value /= self.iters
msg = 'Avg Elapsed'
print('%s%s: %.4f' % (name_string, msg, value))
self.total = 0.0
|
[
"os.makedirs",
"numpy.median",
"numpy.std",
"os.path.exists",
"numpy.transpose",
"numpy.clip",
"time.time",
"numpy.min",
"numpy.max",
"numpy.mean",
"numpy.tile",
"PIL.Image.fromarray",
"torch.abs"
] |
[((772, 804), 'numpy.clip', 'np.clip', (['image_numpy', '(0.0)', '(255.0)'], {}), '(image_numpy, 0.0, 255.0)\n', (779, 804), True, 'import numpy as np\n'), ((1206, 1234), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (1221, 1234), False, 'from PIL import Image\n'), ((647, 678), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (654, 678), True, 'import numpy as np\n'), ((1773, 1793), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1787, 1793), False, 'import os\n'), ((1803, 1820), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1814, 1820), False, 'import os\n'), ((2255, 2266), 'time.time', 'time.time', ([], {}), '()\n', (2264, 2266), False, 'import time\n'), ((2324, 2335), 'time.time', 'time.time', ([], {}), '()\n', (2333, 2335), False, 'import time\n'), ((516, 535), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (522, 535), True, 'import numpy as np\n'), ((540, 559), 'numpy.max', 'np.max', (['image_numpy'], {}), '(image_numpy)\n', (546, 559), True, 'import numpy as np\n'), ((562, 581), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (568, 581), True, 'import numpy as np\n'), ((698, 734), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (710, 734), True, 'import numpy as np\n'), ((1018, 1044), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (1027, 1044), False, 'import torch\n'), ((1517, 1527), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1524, 1527), True, 'import numpy as np\n'), ((1529, 1538), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1535, 1538), True, 'import numpy as np\n'), ((1540, 1549), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1546, 1549), True, 'import numpy as np\n'), ((1551, 1563), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (1560, 1563), True, 'import numpy as np\n'), ((1565, 1574), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1571, 1574), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 09:10:29 2021
Author: <NAME>
Functions for implementing the edge detection scheme first proposed by Zhang and Bao [1].
Modified for use with pywt's SWT2 transform and employs double thresholding similar to canny to improve noise resilience and revovery of weak edges.
Portions of code adapted from scikit-image's implementation of the canny edge detector;
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
[1] <NAME>. and <NAME>., 2002. Edge detection by scale multiplication in wavelet domain. Pattern Recognition Letters, 23(14), pp.1771-1784.
"""
import numpy as np
from pywt import swt2, Wavelet
from scipy.ndimage import generate_binary_structure, binary_erosion, label
from scipy import ndimage as ndi
def wavelet_edge_detector(image, start_level=0, levels=2, wavelet='rbio3.1', c=0.15, noise_var=40, t1=1, t2=2, dbl_th=True):
"""
Extracts the edge local maxima of the passed image using the product of two
consecutive stationary wavelet coefficients
-----------
image : 2D array
Input image, grayscale
start_level : int
Initial coefficient scale level to be extracted by the SWT
levels : int
number of levels to consider, must be even
wavelet : string
Name of wavelet as listed by pywt.wavelist()
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
local_maxima : 2D array
local maxima extracted by the local maxima method
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
assert(levels%2 == 0)
#calculate the maximum level to decompose the image with
max_level = start_level+levels
#Decompse the image to its detail coefficients using the 2D SWT
coeffs = swt2(image, wavelet=wavelet, level=max_level,
start_level=start_level, norm=False,
trim_approx=True)
#create empty arrays to store the detail coefficients
#algoritmhs only require Horizontal and Vertical details, so Diagonal is not calculated
coeff_arr_H = np.empty((image.shape + (max_level-start_level,)))
coeff_arr_V = np.empty((image.shape + (max_level-start_level,)))
#offset the coefficients based on the decomposition scale
for i in range(max_level-start_level):
coeff_arr_H[:,:,i] = np.roll(coeffs[-1-i][0], 2**(i+start_level))
coeff_arr_V[:,:,i] = np.roll(coeffs[-1-i][1], 2**(i+start_level))
#Get the Horizontal and Vertical products; the magnitude gradient matrices
Mdx = np.prod(coeff_arr_H, axis=2)
Mdy = np.prod(coeff_arr_V, axis=2)
#Remove negative coefficients, as these are solely due to noise
pts_Mdx_plus = (Mdx >= 0)
Mdx = pts_Mdx_plus * Mdx
pts_Mdy_plus = (Mdy >= 0)
Mdy = pts_Mdy_plus * Mdy
#Get the angle gradient matrices
Adx = np.sign(coeff_arr_H[:,:,1])*np.sqrt(Mdx)
Ady = np.sign(coeff_arr_V[:,:,1])*np.sqrt(Mdy)
#Obtain the local modulus maximum in the direction of the normal of the edge
local_maxima = local_modulus_maxima(Adx, Ady, Mdx, Mdy)
if dbl_th:
#Perform double thresholding and return the edge mask
edge_mask = dbl_thresholding_ZhangBao(local_maxima, wavelet=wavelet,
start_level=start_level,
c=c, noise_var=noise_var,
t1=t1, t2=t2)
else:
edge_mask = None
return local_maxima, edge_mask
def local_modulus_maxima(Adx, Ady, Mdx, Mdy, mask=None):
"""
Code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Fast computation of the local maxima using custom gradient and angle matrices
Parameters
-----------
Adx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the normal to the edges
Ady : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the normal to the edges
Mdx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the value of the edges
Mdy : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the value of the edges
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
Returns
-------
output : 2D array
The local maxima
-----
The steps of the algorithm are as follows:
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
"""
#
# The steps involved:
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
assert (Mdx.shape == Mdy.shape)
assert (Mdx.shape == Adx.shape)
assert (Adx.shape == Ady.shape)
if mask is None:
mask = np.ones(Mdx.shape, dtype=bool)
jsobel = Ady
isobel = Adx
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(Mdx, Mdy)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(Mdx.shape)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
return local_maxima * magnitude
def dbl_thresholding_ZhangBao(local_maxima, start_level=0, wavelet='rbio3.1', c=20, noise_var=1, t1=1, t2=2):
"""
Portions of code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Performs double thresholding based the wavelet energy and noise variance values
Parameters
-----------
local_maxima : 2D array
Local maxima extracted by the local maxima method, same shape as input image
wavelet : string
Name of wavelet as listed by pywt.wavelist()
start_level : int
Initial coefficient scale level to be extracted by the SWT
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
#
#---- Create two masks at the two thresholds.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
#First lower threshold is the same as in Zhang and Bao's paper
#Set to remove the majority of the noise present
#threshold = c * energy of wavelet at scale j, energy at scale j+1,
#noise_var, scaled noise_var
#get wavelet coefficients
w = Wavelet(wavelet)
if w.orthogonal:
(_, psi_d1, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _) = w.wavefun(level=start_level+2)
else:
(_, psi_d1, _, _, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _, _, _) = w.wavefun(level=start_level+2)
#compute their enegries (in reality, square root of energy)
energy_psi_d1 = np.sqrt(np.sum(psi_d1**2))
energy_psi_d2 = np.sqrt(np.sum(psi_d2**2))
#add zeros to psi_d1 to compute the next variable
psi_d1_up = psi_d1.repeat(2)
psi_d1_up[1::2] = 0
if wavelet == 'haar':
psi_d1_up = psi_d1_up[1:-1]
#get the sigma_i value
sigma_i_sq = 2*np.sum((psi_d1_up/energy_psi_d1 + psi_d2/energy_psi_d2)**2)
t = c * energy_psi_d1 * energy_psi_d2 * noise_var * sigma_i_sq
T_low = t*t1
T_high = t*t2
high_mask = (local_maxima >= T_high)
low_mask = (local_maxima >= T_low)
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels, np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
#run demo
if __name__ == "__main__":
import cv2 as cv
lvl = 0
c = 0.345
t1 = 1.0
t2 = 2.75
noise_var = 7237.754103671255
cv.namedWindow('Camera Capture', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
image = cv.imread('test_images/USAF.tiff', cv.IMREAD_GRAYSCALE)
#convert image from 8-bit to 12-bit, same as camera depth
image = image.astype(np.float)
image = image * 4095/256
image = image.astype(np.uint16)
#find local maxima and edges using the Haar wavelet
local_maxima_hr, edges_hr = wavelet_edge_detector(image, start_level=lvl,
wavelet='haar',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_hr = local_maxima_hr / np.max(local_maxima_hr) * 65535
local_maxima_hr = local_maxima_hr.astype(np.uint16)
edges_hr = edges_hr * np.ones(edges_hr.shape) * 65535
edges_hr = edges_hr.astype(np.uint16)
comb_hr = np.zeros((image.shape + (3,)))
comb_hr[:,:,0] = image / 4096
comb_hr[:,:,1] = comb_hr[:,:,0]
comb_hr[:,:,2] = comb_hr[:,:,0]
comb_hr[:,:,2] += (edges_hr/65535)
comb_hr[:,:,2] = np.clip(comb_hr[:,:,2], 0, 1)
#find local maxima and edges using the Reverse Biorthogonal 3.1 wavelet
local_maxima_rb, edges_rb = wavelet_edge_detector(image, start_level=lvl,
wavelet='rbio3.1',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_rb = local_maxima_rb / np.max(local_maxima_rb) * 65535
local_maxima_rb = local_maxima_rb.astype(np.uint16)
edges_rb = edges_rb * np.ones(edges_rb.shape) * 65535
edges_rb = edges_rb.astype(np.uint16)
comb_rb = np.zeros((image.shape + (3,)))
comb_rb[:,:,0] = image / 4096
comb_rb[:,:,1] = comb_rb[:,:,0]
comb_rb[:,:,2] = comb_rb[:,:,0]
comb_rb[:,:,2] += (edges_rb/65535)
comb_rb[:,:,2] = np.clip(comb_rb[:,:,2], 0, 1)
image = image.astype(np.float)
image = image * 65535/4096
image = image.astype(np.uint16)
try:
while True:
cv.imshow('Camera Capture', image)
cv.imshow('Product Local Maxima - Haar Wavelet', local_maxima_hr)
cv.imshow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', local_maxima_rb)
cv.imshow('Edges - Haar Wavelet', edges_hr)
cv.imshow('Edges - Reverse Biorthogonal 3.1 Wavelet', edges_rb)
cv.imshow('Overlay - Haar Wavelet', comb_hr)
cv.imshow('Overlay - Reverse Biorthogonal 3.1 Wavelet', comb_rb)
cv.waitKey(1)
except KeyboardInterrupt:
cv.destroyAllWindows()
|
[
"scipy.ndimage.generate_binary_structure",
"numpy.abs",
"numpy.sum",
"numpy.empty",
"numpy.ones",
"numpy.clip",
"pywt.swt2",
"numpy.arange",
"cv2.imshow",
"numpy.prod",
"numpy.max",
"cv2.destroyAllWindows",
"numpy.roll",
"cv2.waitKey",
"numpy.hypot",
"pywt.Wavelet",
"scipy.ndimage.binary_erosion",
"numpy.zeros",
"cv2.imread",
"scipy.ndimage.label",
"numpy.sign",
"cv2.namedWindow",
"numpy.sqrt"
] |
[((2283, 2388), 'pywt.swt2', 'swt2', (['image'], {'wavelet': 'wavelet', 'level': 'max_level', 'start_level': 'start_level', 'norm': '(False)', 'trim_approx': '(True)'}), '(image, wavelet=wavelet, level=max_level, start_level=start_level, norm\n =False, trim_approx=True)\n', (2287, 2388), False, 'from pywt import swt2, Wavelet\n'), ((2599, 2649), 'numpy.empty', 'np.empty', (['(image.shape + (max_level - start_level,))'], {}), '(image.shape + (max_level - start_level,))\n', (2607, 2649), True, 'import numpy as np\n'), ((2669, 2719), 'numpy.empty', 'np.empty', (['(image.shape + (max_level - start_level,))'], {}), '(image.shape + (max_level - start_level,))\n', (2677, 2719), True, 'import numpy as np\n'), ((3074, 3102), 'numpy.prod', 'np.prod', (['coeff_arr_H'], {'axis': '(2)'}), '(coeff_arr_H, axis=2)\n', (3081, 3102), True, 'import numpy as np\n'), ((3114, 3142), 'numpy.prod', 'np.prod', (['coeff_arr_V'], {'axis': '(2)'}), '(coeff_arr_V, axis=2)\n', (3121, 3142), True, 'import numpy as np\n'), ((6917, 6931), 'numpy.abs', 'np.abs', (['isobel'], {}), '(isobel)\n', (6923, 6931), True, 'import numpy as np\n'), ((6950, 6964), 'numpy.abs', 'np.abs', (['jsobel'], {}), '(jsobel)\n', (6956, 6964), True, 'import numpy as np\n'), ((6982, 7000), 'numpy.hypot', 'np.hypot', (['Mdx', 'Mdy'], {}), '(Mdx, Mdy)\n', (6990, 7000), True, 'import numpy as np\n'), ((7137, 7168), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['(2)', '(2)'], {}), '(2, 2)\n', (7162, 7168), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((7188, 7227), 'scipy.ndimage.binary_erosion', 'binary_erosion', (['mask', 's'], {'border_value': '(0)'}), '(mask, s, border_value=0)\n', (7202, 7227), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((7484, 7503), 'numpy.zeros', 'np.zeros', (['Mdx.shape'], {}), '(Mdx.shape)\n', (7492, 7503), True, 'import numpy as np\n'), ((12443, 12459), 'pywt.Wavelet', 'Wavelet', (['wavelet'], {}), '(wavelet)\n', (12450, 12459), False, 'from pywt import swt2, Wavelet\n'), ((13543, 13564), 'numpy.ones', 'np.ones', (['(3, 3)', 'bool'], {}), '((3, 3), bool)\n', (13550, 13564), True, 'import numpy as np\n'), ((13586, 13608), 'scipy.ndimage.label', 'label', (['low_mask', 'strel'], {}), '(low_mask, strel)\n', (13591, 13608), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((13806, 13834), 'numpy.zeros', 'np.zeros', (['(count + 1,)', 'bool'], {}), '((count + 1,), bool)\n', (13814, 13834), True, 'import numpy as np\n'), ((14110, 14160), 'cv2.namedWindow', 'cv.namedWindow', (['"""Camera Capture"""', 'cv.WINDOW_NORMAL'], {}), "('Camera Capture', cv.WINDOW_NORMAL)\n", (14124, 14160), True, 'import cv2 as cv\n'), ((14166, 14237), 'cv2.namedWindow', 'cv.namedWindow', (['"""Product Local Maxima - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Product Local Maxima - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14180, 14237), True, 'import cv2 as cv\n'), ((14243, 14338), 'cv2.namedWindow', 'cv.namedWindow', (['"""Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet',\n cv.WINDOW_NORMAL)\n", (14257, 14338), True, 'import cv2 as cv\n'), ((14340, 14396), 'cv2.namedWindow', 'cv.namedWindow', (['"""Edges - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Edges - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14354, 14396), True, 'import cv2 as cv\n'), ((14402, 14478), 'cv2.namedWindow', 'cv.namedWindow', (['"""Edges - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Edges - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)\n", (14416, 14478), True, 'import cv2 as cv\n'), ((14484, 14542), 'cv2.namedWindow', 'cv.namedWindow', (['"""Overlay - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Overlay - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14498, 14542), True, 'import cv2 as cv\n'), ((14548, 14626), 'cv2.namedWindow', 'cv.namedWindow', (['"""Overlay - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Overlay - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)\n", (14562, 14626), True, 'import cv2 as cv\n'), ((14646, 14701), 'cv2.imread', 'cv.imread', (['"""test_images/USAF.tiff"""', 'cv.IMREAD_GRAYSCALE'], {}), "('test_images/USAF.tiff', cv.IMREAD_GRAYSCALE)\n", (14655, 14701), True, 'import cv2 as cv\n'), ((15426, 15454), 'numpy.zeros', 'np.zeros', (['(image.shape + (3,))'], {}), '(image.shape + (3,))\n', (15434, 15454), True, 'import numpy as np\n'), ((15641, 15672), 'numpy.clip', 'np.clip', (['comb_hr[:, :, 2]', '(0)', '(1)'], {}), '(comb_hr[:, :, 2], 0, 1)\n', (15648, 15672), True, 'import numpy as np\n'), ((16246, 16274), 'numpy.zeros', 'np.zeros', (['(image.shape + (3,))'], {}), '(image.shape + (3,))\n', (16254, 16274), True, 'import numpy as np\n'), ((16461, 16492), 'numpy.clip', 'np.clip', (['comb_rb[:, :, 2]', '(0)', '(1)'], {}), '(comb_rb[:, :, 2], 0, 1)\n', (16468, 16492), True, 'import numpy as np\n'), ((2860, 2910), 'numpy.roll', 'np.roll', (['coeffs[-1 - i][0]', '(2 ** (i + start_level))'], {}), '(coeffs[-1 - i][0], 2 ** (i + start_level))\n', (2867, 2910), True, 'import numpy as np\n'), ((2936, 2986), 'numpy.roll', 'np.roll', (['coeffs[-1 - i][1]', '(2 ** (i + start_level))'], {}), '(coeffs[-1 - i][1], 2 ** (i + start_level))\n', (2943, 2986), True, 'import numpy as np\n'), ((3393, 3422), 'numpy.sign', 'np.sign', (['coeff_arr_H[:, :, 1]'], {}), '(coeff_arr_H[:, :, 1])\n', (3400, 3422), True, 'import numpy as np\n'), ((3421, 3433), 'numpy.sqrt', 'np.sqrt', (['Mdx'], {}), '(Mdx)\n', (3428, 3433), True, 'import numpy as np\n'), ((3445, 3474), 'numpy.sign', 'np.sign', (['coeff_arr_V[:, :, 1]'], {}), '(coeff_arr_V[:, :, 1])\n', (3452, 3474), True, 'import numpy as np\n'), ((3473, 3485), 'numpy.sqrt', 'np.sqrt', (['Mdy'], {}), '(Mdy)\n', (3480, 3485), True, 'import numpy as np\n'), ((6830, 6860), 'numpy.ones', 'np.ones', (['Mdx.shape'], {'dtype': 'bool'}), '(Mdx.shape, dtype=bool)\n', (6837, 6860), True, 'import numpy as np\n'), ((12847, 12866), 'numpy.sum', 'np.sum', (['(psi_d1 ** 2)'], {}), '(psi_d1 ** 2)\n', (12853, 12866), True, 'import numpy as np\n'), ((12895, 12914), 'numpy.sum', 'np.sum', (['(psi_d2 ** 2)'], {}), '(psi_d2 ** 2)\n', (12901, 12914), True, 'import numpy as np\n'), ((13158, 13223), 'numpy.sum', 'np.sum', (['((psi_d1_up / energy_psi_d1 + psi_d2 / energy_psi_d2) ** 2)'], {}), '((psi_d1_up / energy_psi_d1 + psi_d2 / energy_psi_d2) ** 2)\n', (13164, 13223), True, 'import numpy as np\n'), ((15208, 15231), 'numpy.max', 'np.max', (['local_maxima_hr'], {}), '(local_maxima_hr)\n', (15214, 15231), True, 'import numpy as np\n'), ((15330, 15353), 'numpy.ones', 'np.ones', (['edges_hr.shape'], {}), '(edges_hr.shape)\n', (15337, 15353), True, 'import numpy as np\n'), ((16028, 16051), 'numpy.max', 'np.max', (['local_maxima_rb'], {}), '(local_maxima_rb)\n', (16034, 16051), True, 'import numpy as np\n'), ((16150, 16173), 'numpy.ones', 'np.ones', (['edges_rb.shape'], {}), '(edges_rb.shape)\n', (16157, 16173), True, 'import numpy as np\n'), ((16652, 16686), 'cv2.imshow', 'cv.imshow', (['"""Camera Capture"""', 'image'], {}), "('Camera Capture', image)\n", (16661, 16686), True, 'import cv2 as cv\n'), ((16700, 16765), 'cv2.imshow', 'cv.imshow', (['"""Product Local Maxima - Haar Wavelet"""', 'local_maxima_hr'], {}), "('Product Local Maxima - Haar Wavelet', local_maxima_hr)\n", (16709, 16765), True, 'import cv2 as cv\n'), ((16779, 16868), 'cv2.imshow', 'cv.imshow', (['"""Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet"""', 'local_maxima_rb'], {}), "('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet',\n local_maxima_rb)\n", (16788, 16868), True, 'import cv2 as cv\n'), ((16878, 16921), 'cv2.imshow', 'cv.imshow', (['"""Edges - Haar Wavelet"""', 'edges_hr'], {}), "('Edges - Haar Wavelet', edges_hr)\n", (16887, 16921), True, 'import cv2 as cv\n'), ((16935, 16998), 'cv2.imshow', 'cv.imshow', (['"""Edges - Reverse Biorthogonal 3.1 Wavelet"""', 'edges_rb'], {}), "('Edges - Reverse Biorthogonal 3.1 Wavelet', edges_rb)\n", (16944, 16998), True, 'import cv2 as cv\n'), ((17012, 17056), 'cv2.imshow', 'cv.imshow', (['"""Overlay - Haar Wavelet"""', 'comb_hr'], {}), "('Overlay - Haar Wavelet', comb_hr)\n", (17021, 17056), True, 'import cv2 as cv\n'), ((17070, 17134), 'cv2.imshow', 'cv.imshow', (['"""Overlay - Reverse Biorthogonal 3.1 Wavelet"""', 'comb_rb'], {}), "('Overlay - Reverse Biorthogonal 3.1 Wavelet', comb_rb)\n", (17079, 17134), True, 'import cv2 as cv\n'), ((17162, 17175), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (17172, 17175), True, 'import cv2 as cv\n'), ((17230, 17252), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (17250, 17252), True, 'import cv2 as cv\n'), ((13705, 13737), 'numpy.arange', 'np.arange', (['count'], {'dtype': 'np.int32'}), '(count, dtype=np.int32)\n', (13714, 13737), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
import xarray as xr
import dask.array as da
from xrspatial import curvature
from xrspatial.utils import doesnt_have_cuda
from xrspatial.tests.general_checks import general_output_checks
elevation = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[1584.8767, 1584.8767, 1585.0546, 1585.2324, 1585.2324, 1585.2324],
[1585.0546, 1585.0546, 1585.2324, 1585.588, 1585.588, 1585.588],
[1585.2324, 1585.4102, 1585.588, 1585.588, 1585.588, 1585.588],
[1585.588, 1585.588, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.7659, 1585.9437, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.9437, 1585.9437, 1585.9437, 1585.7659, 1585.7659, 1585.7659]],
dtype=np.float32
)
def test_curvature_on_flat_surface():
# flat surface
test_arr1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.array([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster1 = xr.DataArray(test_arr1, attrs={'res': (1, 1)})
curv = curvature(test_raster1)
general_output_checks(test_raster1, curv, expected_results)
def test_curvature_on_convex_surface():
# convex
test_arr2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, 100., -400., 100., np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster2 = xr.DataArray(test_arr2, attrs={'res': (1, 1)})
curv = curvature(test_raster2)
general_output_checks(test_raster2, curv, expected_results)
def test_curvature_on_concave_surface():
# concave
test_arr3 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, -100., 400., -100., np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster3 = xr.DataArray(test_arr3, attrs={'res': (1, 1)})
curv = curvature(test_raster3)
general_output_checks(test_raster3, curv, expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_curvature_gpu_equals_cpu():
import cupy
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
cpu = curvature(agg_numpy, name='numpy_result')
agg_cupy = xr.DataArray(
cupy.asarray(elevation), attrs={'res': (10.0, 10.0)}
)
gpu = curvature(agg_cupy, name='cupy_result')
general_output_checks(agg_cupy, gpu)
np.testing.assert_allclose(cpu.data, gpu.data.get(), equal_nan=True)
# NOTE: Dask + GPU code paths don't currently work because of
# dask casting cupy arrays to numpy arrays during
# https://github.com/dask/dask/issues/4842
def test_curvature_numpy_equals_dask():
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
numpy_curvature = curvature(agg_numpy, name='numpy_curvature')
agg_dask = xr.DataArray(
da.from_array(elevation, chunks=(3, 3)), attrs={'res': (10.0, 10.0)}
)
dask_curvature = curvature(agg_dask, name='dask_curvature')
general_output_checks(agg_dask, dask_curvature)
# both produce same results
np.testing.assert_allclose(
numpy_curvature.data, dask_curvature.data.compute(), equal_nan=True)
|
[
"xrspatial.utils.doesnt_have_cuda",
"cupy.asarray",
"numpy.asarray",
"xrspatial.tests.general_checks.general_output_checks",
"numpy.array",
"xarray.DataArray",
"dask.array.from_array",
"xrspatial.curvature"
] |
[((248, 756), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [1584.8767, 1584.8767, \n 1585.0546, 1585.2324, 1585.2324, 1585.2324], [1585.0546, 1585.0546, \n 1585.2324, 1585.588, 1585.588, 1585.588], [1585.2324, 1585.4102, \n 1585.588, 1585.588, 1585.588, 1585.588], [1585.588, 1585.588, 1585.7659,\n 1585.7659, 1585.7659, 1585.7659], [1585.7659, 1585.9437, 1585.7659, \n 1585.7659, 1585.7659, 1585.7659], [1585.9437, 1585.9437, 1585.9437, \n 1585.7659, 1585.7659, 1585.7659]]'], {'dtype': 'np.float32'}), '([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [1584.8767, \n 1584.8767, 1585.0546, 1585.2324, 1585.2324, 1585.2324], [1585.0546, \n 1585.0546, 1585.2324, 1585.588, 1585.588, 1585.588], [1585.2324, \n 1585.4102, 1585.588, 1585.588, 1585.588, 1585.588], [1585.588, 1585.588,\n 1585.7659, 1585.7659, 1585.7659, 1585.7659], [1585.7659, 1585.9437, \n 1585.7659, 1585.7659, 1585.7659, 1585.7659], [1585.9437, 1585.9437, \n 1585.9437, 1585.7659, 1585.7659, 1585.7659]], dtype=np.float32)\n', (258, 756), True, 'import numpy as np\n'), ((851, 951), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (859, 951), True, 'import numpy as np\n'), ((1079, 1264), 'numpy.array', 'np.array', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0, 0, 0, np.nan], [np.\n nan, 0, 0, 0, np.nan], [np.nan, 0, 0, 0, np.nan], [np.nan, np.nan, np.\n nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0, 0, 0, np.\n nan], [np.nan, 0, 0, 0, np.nan], [np.nan, 0, 0, 0, np.nan], [np.nan, np\n .nan, np.nan, np.nan, np.nan]])\n', (1087, 1264), True, 'import numpy as np\n'), ((1372, 1418), 'xarray.DataArray', 'xr.DataArray', (['test_arr1'], {'attrs': "{'res': (1, 1)}"}), "(test_arr1, attrs={'res': (1, 1)})\n", (1384, 1418), True, 'import xarray as xr\n'), ((1431, 1454), 'xrspatial.curvature', 'curvature', (['test_raster1'], {}), '(test_raster1)\n', (1440, 1454), False, 'from xrspatial import curvature\n'), ((1460, 1519), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster1', 'curv', 'expected_results'], {}), '(test_raster1, curv, expected_results)\n', (1481, 1519), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((1596, 1697), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0]])\n', (1604, 1697), True, 'import numpy as np\n'), ((1825, 2041), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, 100.0, 0.0, np.nan\n ], [np.nan, 100.0, -400.0, 100.0, np.nan], [np.nan, 0.0, 100.0, 0.0, np\n .nan], [np.nan, np.nan, np.nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, 100.0, \n 0.0, np.nan], [np.nan, 100.0, -400.0, 100.0, np.nan], [np.nan, 0.0, \n 100.0, 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]])\n', (1835, 2041), True, 'import numpy as np\n'), ((2130, 2176), 'xarray.DataArray', 'xr.DataArray', (['test_arr2'], {'attrs': "{'res': (1, 1)}"}), "(test_arr2, attrs={'res': (1, 1)})\n", (2142, 2176), True, 'import xarray as xr\n'), ((2189, 2212), 'xrspatial.curvature', 'curvature', (['test_raster2'], {}), '(test_raster2)\n', (2198, 2212), False, 'from xrspatial import curvature\n'), ((2218, 2277), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster2', 'curv', 'expected_results'], {}), '(test_raster2, curv, expected_results)\n', (2239, 2277), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((2356, 2456), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (2364, 2456), True, 'import numpy as np\n'), ((2584, 2802), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, -100.0, 0.0, np.\n nan], [np.nan, -100.0, 400.0, -100.0, np.nan], [np.nan, 0.0, -100.0, \n 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, -100.0,\n 0.0, np.nan], [np.nan, -100.0, 400.0, -100.0, np.nan], [np.nan, 0.0, -\n 100.0, 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]])\n', (2594, 2802), True, 'import numpy as np\n'), ((2889, 2935), 'xarray.DataArray', 'xr.DataArray', (['test_arr3'], {'attrs': "{'res': (1, 1)}"}), "(test_arr3, attrs={'res': (1, 1)})\n", (2901, 2935), True, 'import xarray as xr\n'), ((2948, 2971), 'xrspatial.curvature', 'curvature', (['test_raster3'], {}), '(test_raster3)\n', (2957, 2971), False, 'from xrspatial import curvature\n'), ((2977, 3036), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster3', 'curv', 'expected_results'], {}), '(test_raster3, curv, expected_results)\n', (2998, 3036), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3194, 3246), 'xarray.DataArray', 'xr.DataArray', (['elevation'], {'attrs': "{'res': (10.0, 10.0)}"}), "(elevation, attrs={'res': (10.0, 10.0)})\n", (3206, 3246), True, 'import xarray as xr\n'), ((3258, 3299), 'xrspatial.curvature', 'curvature', (['agg_numpy'], {'name': '"""numpy_result"""'}), "(agg_numpy, name='numpy_result')\n", (3267, 3299), False, 'from xrspatial import curvature\n'), ((3412, 3451), 'xrspatial.curvature', 'curvature', (['agg_cupy'], {'name': '"""cupy_result"""'}), "(agg_cupy, name='cupy_result')\n", (3421, 3451), False, 'from xrspatial import curvature\n'), ((3459, 3495), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['agg_cupy', 'gpu'], {}), '(agg_cupy, gpu)\n', (3480, 3495), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3062, 3080), 'xrspatial.utils.doesnt_have_cuda', 'doesnt_have_cuda', ([], {}), '()\n', (3078, 3080), False, 'from xrspatial.utils import doesnt_have_cuda\n'), ((3804, 3856), 'xarray.DataArray', 'xr.DataArray', (['elevation'], {'attrs': "{'res': (10.0, 10.0)}"}), "(elevation, attrs={'res': (10.0, 10.0)})\n", (3816, 3856), True, 'import xarray as xr\n'), ((3880, 3924), 'xrspatial.curvature', 'curvature', (['agg_numpy'], {'name': '"""numpy_curvature"""'}), "(agg_numpy, name='numpy_curvature')\n", (3889, 3924), False, 'from xrspatial import curvature\n'), ((4064, 4106), 'xrspatial.curvature', 'curvature', (['agg_dask'], {'name': '"""dask_curvature"""'}), "(agg_dask, name='dask_curvature')\n", (4073, 4106), False, 'from xrspatial import curvature\n'), ((4112, 4159), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['agg_dask', 'dask_curvature'], {}), '(agg_dask, dask_curvature)\n', (4133, 4159), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3341, 3364), 'cupy.asarray', 'cupy.asarray', (['elevation'], {}), '(elevation)\n', (3353, 3364), False, 'import cupy\n'), ((3966, 4005), 'dask.array.from_array', 'da.from_array', (['elevation'], {'chunks': '(3, 3)'}), '(elevation, chunks=(3, 3))\n', (3979, 4005), True, 'import dask.array as da\n')]
|
import os
path = os.getcwd()
from cu__grid_cell.data_gen import data_gen
from cu__grid_cell.preparation import preparation
import numpy as np
from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells
import matplotlib.pyplot as plt
import cv2
def sigmoid(x):
return 1. / (1. + np.exp(-x))
batch = 2
model_obj = preparation(testing = True)
config = model_obj.config
a = data_gen(dataset=config.CU_test6_curve_hdf5_path, batchsize=batch, config=config, augment=False)
generator = a.batch_gen(test=True)
x_img, y, gt_image, gt_lanes = next(generator)
y = y[0]
concatenate_cells(y[0], config)
prediction = model_obj.predict(x_img)
scale_size_y = (1640 -1) / config.img_w
scale_size_x = (590 -1) /config.img_h
M = np.array([[scale_size_y, 0, 0],
[0, scale_size_x, 0],
[0, 0, 1.]])
M=M[0:2]
if config.splitted:
lok = prediction[-2]
conf = prediction[-1]
prediction = np.concatenate([lok, conf], axis=-1)
#elif config.staged:
# prediction = prediction[-1]
for i, s in enumerate(prediction):
s = nms(s, config)
plt.figure(1)
#f, axarr = plt.subplots(1, 2)
#axarr[0].imshow(gt_image[i,:,:,::-1].astype(np.uint8))
#axarr[0].set_title('Ground Thruth', color='0.7')
for a in gt_lanes[i]:
gt_image[i] = cv2.polylines(gt_image[i], np.int32([a]), isClosed=0, color=(0, 255, 0), thickness=10)
lanes_pred = concatenate_cells(s, config, prediction=True)
original_points = lanes_pred
for j, o in enumerate(original_points):
o = np.array(o).T
ones = np.ones_like(o[:, 0])
ones = ones[..., None]
original_points[j] = np.concatenate((o, ones),
axis=1) # we reuse 3rd column in completely different way here, it is hack for matmul with M
original_points[j] = np.matmul(M, original_points[j].T).T # transpose for multiplikation
lanes = original_points # take only coords!
for a in lanes:
gt_image[i] = cv2.polylines(gt_image[i], np.int32([a]), isClosed=0,color=(0,0,255), thickness=10)
#pred_img = plot_image(s, config, with_print=True, plot_image =x_img[i,:,:])
plt.imshow(gt_image[i,:,:,::-1].astype(np.uint8))
# plt.set_title('Predicted', color='0.7')
# now 3d plot
plot_image3d(s, config, True, with_print=False)
# plot_image3d(y[i], config, False, with_print=False)
plt.show()
test = 0
|
[
"matplotlib.pyplot.show",
"numpy.ones_like",
"os.getcwd",
"cu__grid_cell.Validation.validation_utils.concatenate_cells",
"matplotlib.pyplot.figure",
"cu__grid_cell.data_gen.data_gen",
"numpy.array",
"numpy.exp",
"numpy.int32",
"numpy.matmul",
"cu__grid_cell.Validation.validation_utils.nms",
"cu__grid_cell.Validation.validation_utils.plot_image3d",
"numpy.concatenate",
"cu__grid_cell.preparation.preparation"
] |
[((18, 29), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (27, 29), False, 'import os\n'), ((392, 417), 'cu__grid_cell.preparation.preparation', 'preparation', ([], {'testing': '(True)'}), '(testing=True)\n', (403, 417), False, 'from cu__grid_cell.preparation import preparation\n'), ((450, 551), 'cu__grid_cell.data_gen.data_gen', 'data_gen', ([], {'dataset': 'config.CU_test6_curve_hdf5_path', 'batchsize': 'batch', 'config': 'config', 'augment': '(False)'}), '(dataset=config.CU_test6_curve_hdf5_path, batchsize=batch, config=\n config, augment=False)\n', (458, 551), False, 'from cu__grid_cell.data_gen import data_gen\n'), ((638, 669), 'cu__grid_cell.Validation.validation_utils.concatenate_cells', 'concatenate_cells', (['y[0]', 'config'], {}), '(y[0], config)\n', (655, 669), False, 'from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells\n'), ((796, 863), 'numpy.array', 'np.array', (['[[scale_size_y, 0, 0], [0, scale_size_x, 0], [0, 0, 1.0]]'], {}), '([[scale_size_y, 0, 0], [0, scale_size_x, 0], [0, 0, 1.0]])\n', (804, 863), True, 'import numpy as np\n'), ((2483, 2493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2491, 2493), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1050), 'numpy.concatenate', 'np.concatenate', (['[lok, conf]'], {'axis': '(-1)'}), '([lok, conf], axis=-1)\n', (1028, 1050), True, 'import numpy as np\n'), ((1150, 1164), 'cu__grid_cell.Validation.validation_utils.nms', 'nms', (['s', 'config'], {}), '(s, config)\n', (1153, 1164), False, 'from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells\n'), ((1169, 1182), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1179, 1182), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1531), 'cu__grid_cell.Validation.validation_utils.concatenate_cells', 'concatenate_cells', (['s', 'config'], {'prediction': '(True)'}), '(s, config, prediction=True)\n', (1503, 1531), False, 'from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells\n'), ((2377, 2424), 'cu__grid_cell.Validation.validation_utils.plot_image3d', 'plot_image3d', (['s', 'config', '(True)'], {'with_print': '(False)'}), '(s, config, True, with_print=False)\n', (2389, 2424), False, 'from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells\n'), ((1651, 1672), 'numpy.ones_like', 'np.ones_like', (['o[:, 0]'], {}), '(o[:, 0])\n', (1663, 1672), True, 'import numpy as np\n'), ((1733, 1766), 'numpy.concatenate', 'np.concatenate', (['(o, ones)'], {'axis': '(1)'}), '((o, ones), axis=1)\n', (1747, 1766), True, 'import numpy as np\n'), ((357, 367), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (363, 367), True, 'import numpy as np\n'), ((1409, 1422), 'numpy.int32', 'np.int32', (['[a]'], {}), '([a])\n', (1417, 1422), True, 'import numpy as np\n'), ((1622, 1633), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (1630, 1633), True, 'import numpy as np\n'), ((1926, 1960), 'numpy.matmul', 'np.matmul', (['M', 'original_points[j].T'], {}), '(M, original_points[j].T)\n', (1935, 1960), True, 'import numpy as np\n'), ((2114, 2127), 'numpy.int32', 'np.int32', (['[a]'], {}), '([a])\n', (2122, 2127), True, 'import numpy as np\n')]
|
import argparse
import torch
from pathlib import Path
import h5py
import logging
from tqdm import tqdm
import pprint
import numpy as np
from . import matchers
from .utils.base_model import dynamic_load
from .utils.parsers import names_to_pair
'''
A set of standard configurations that can be directly selected from the command
line using their name. Each is a dictionary with the following entries:
- output: the name of the match file that will be generated.
- model: the model configuration, as passed to a feature matcher.
'''
confs = {
'superglue': {
'output': 'matches-superglue',
'model': {
'name': 'superglue',
'weights': 'outdoor',
'sinkhorn_iterations': 50,
},
},
'NN': {
'output': 'matches-NN-mutual-dist.7',
'model': {
'name': 'nearest_neighbor',
'mutual_check': True,
'distance_threshold': 0.7,
},
}
}
def get_model(conf):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(matchers, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
return model
@torch.no_grad()
def do_match (name0, name1, pairs, matched, num_matches_found, model, match_file, feature_file, query_feature_file, min_match_score, min_valid_ratio):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pair = names_to_pair(name0, name1)
# Avoid to recompute duplicates to save time
if len({(name0, name1), (name1, name0)} & matched) or pair in match_file:
return num_matches_found
data = {}
feats0, feats1 = query_feature_file[name0], feature_file[name1]
for k in feats1.keys():
data[k+'0'] = feats0[k].__array__()
for k in feats1.keys():
data[k+'1'] = feats1[k].__array__()
data = {k: torch.from_numpy(v)[None].float().to(device)
for k, v in data.items()}
# some matchers might expect an image but only use its size
data['image0'] = torch.empty((1, 1,)+tuple(feats0['image_size'])[::-1])
data['image1'] = torch.empty((1, 1,)+tuple(feats1['image_size'])[::-1])
pred = model(data)
matches = pred['matches0'][0].cpu().short().numpy()
scores = pred['matching_scores0'][0].cpu().half().numpy()
# if score < min_match_score, set match to invalid
matches[ scores < min_match_score ] = -1
num_valid = np.count_nonzero(matches > -1)
if float(num_valid)/len(matches) > min_valid_ratio:
v = pairs.get(name0)
if v is None:
v = set(())
v.add(name1)
pairs[name0] = v
grp = match_file.create_group(pair)
grp.create_dataset('matches0', data=matches)
grp.create_dataset('matching_scores0', data=scores)
matched |= {(name0, name1), (name1, name0)}
num_matches_found += 1
return num_matches_found
@torch.no_grad()
def best_match(conf, global_feature_path, feature_path, match_output_path, query_global_feature_path=None, query_feature_path=None, num_match_required=10,
max_try=None, min_matched=None, pair_file_path=None, num_seq=False, sample_list=None, sample_list_path=None, min_match_score=0.85, min_valid_ratio=0.09):
logging.info('Dyn Matching local features with configuration:'
f'\n{pprint.pformat(conf)}')
assert global_feature_path.exists(), feature_path
global_feature_file = h5py.File(str(global_feature_path), 'r')
if query_global_feature_path is not None:
logging.info(f'(Using query_global_feature_path:{query_global_feature_path}')
query_global_feature_file = h5py.File(str(query_global_feature_path), 'r')
else:
query_global_feature_file = global_feature_file
assert feature_path.exists(), feature_path
feature_file = h5py.File(str(feature_path), 'r')
if query_feature_path is not None:
logging.info(f'(Using query_feature_path:{query_feature_path}')
query_feature_file = h5py.File(str(query_feature_path), 'r')
else:
query_feature_file = feature_file
match_file = h5py.File(str(match_output_path), 'a')
if sample_list_path is not None:
sample_list = json.load(open(str(sample_list_path, 'r')))
# get all sample names
if sample_list is not None:
names = sample_list
q_names = names
else:
names = []
global_feature_file.visititems(
lambda _, obj: names.append(obj.parent.name.strip('/'))
if isinstance(obj, h5py.Dataset) else None)
names = list(set(names))
names.sort()
q_names = []
query_global_feature_file.visititems(
lambda _, obj: q_names.append(obj.parent.name.strip('/'))
if isinstance(obj, h5py.Dataset) else None)
q_names = list(set(q_names))
q_names.sort()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def tensor_from_names(names, hfile):
desc = [hfile[i]['global_descriptor'].__array__() for i in names]
desc = torch.from_numpy(np.stack(desc, 0)).to(device).float()
return desc
desc = tensor_from_names(names, global_feature_file)
if query_global_feature_path is not None:
q_desc = tensor_from_names(q_names, query_global_feature_file)
else:
q_desc = desc
# descriptors are normalized, dot product indicates how close they are
sim = torch.einsum('id,jd->ij', q_desc, desc)
if max_try is None:
max_try = len(names)
topk = torch.topk(sim, max_try, dim=1).indices.cpu().numpy()
Model = dynamic_load(matchers, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
pairs = {}
matched = set()
for name0, indices in tqdm(zip(q_names, topk)):
num_matches_found = 0
# try sequential neighbor first
if num_seq is not None:
name0_at = names.index(name0)
begin_from = name0_at - num_seq
if begin_from < 0:
begin_from = 0
for i in range(begin_from, name0_at+num_seq):
if i >= len(names):
break
name1 = names[i]
if name0 != name1:
num_matches_found = do_match(name0, name1, pairs, matched, num_matches_found, model, match_file, feature_file, query_feature_file, min_match_score, min_valid_ratio)
# then the global retrievel
for i in indices:
name1 = names[i]
if query_global_feature_path is not None or name0 != name1:
num_matches_found = do_match(name0, name1, pairs, matched, num_matches_found, model, match_file, feature_file, query_feature_file, min_match_score, min_valid_ratio)
if num_matches_found >= num_match_required:
break
if num_matches_found < num_match_required:
logging.warning(f'num match for {name0} found {num_matches_found} less than num_match_required:{num_match_required}')
match_file.close()
if pair_file_path is not None:
if min_matched is not None:
pairs = {k:v for k,v in pairs.items() if len(v) >= min_matched }
pairs_list = []
for n0 in pairs.keys():
for n1 in pairs.get(n0):
pairs_list.append((n0,n1))
with open(str(pair_file_path), 'w') as f:
f.write('\n'.join(' '.join([i, j]) for i, j in pairs_list))
logging.info('Finished exporting matches.')
@torch.no_grad()
def main(conf, pairs, features, export_dir, db_features=None, query_features=None, output_dir=None, exhaustive=False):
logging.info('Matching local features with configuration:'
f'\n{pprint.pformat(conf)}')
if db_features:
feature_path = db_features
else:
feature_path = Path(export_dir, features+'.h5')
assert feature_path.exists(), feature_path
feature_file = h5py.File(str(feature_path), 'r')
if query_features is not None:
logging.info(f'Using query_features {query_features}')
else:
logging.info('No query_features')
query_features = feature_path
assert query_features.exists(), query_features
query_feature_file = h5py.File(str(query_features), 'r')
pairs_name = pairs.stem
if not exhaustive:
assert pairs.exists(), pairs
with open(pairs, 'r') as f:
pair_list = f.read().rstrip('\n').split('\n')
elif exhaustive:
logging.info(f'Writing exhaustive match pairs to {pairs}.')
assert not pairs.exists(), pairs
# get the list of images from the feature file
images = []
feature_file.visititems(
lambda name, obj: images.append(obj.parent.name.strip('/'))
if isinstance(obj, h5py.Dataset) else None)
images = list(set(images))
pair_list = [' '.join((images[i], images[j]))
for i in range(len(images)) for j in range(i)]
with open(str(pairs), 'w') as f:
f.write('\n'.join(pair_list))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(matchers, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
match_name = f'{features}_{conf["output"]}_{pairs_name}'
if output_dir is None:
output_dir = export_dir
match_path = Path(output_dir, match_name+'.h5')
match_path.parent.mkdir(exist_ok=True, parents=True)
match_file = h5py.File(str(match_path), 'a')
matched = set()
for pair in tqdm(pair_list, smoothing=.1):
name0, name1 = pair.split(' ')
pair = names_to_pair(name0, name1)
# Avoid to recompute duplicates to save time
if len({(name0, name1), (name1, name0)} & matched) \
or pair in match_file:
continue
data = {}
feats0, feats1 = query_feature_file[name0], feature_file[name1]
for k in feats1.keys():
data[k+'0'] = feats0[k].__array__()
for k in feats1.keys():
data[k+'1'] = feats1[k].__array__()
data = {k: torch.from_numpy(v)[None].float().to(device)
for k, v in data.items()}
# some matchers might expect an image but only use its size
data['image0'] = torch.empty((1, 1,)+tuple(feats0['image_size'])[::-1])
data['image1'] = torch.empty((1, 1,)+tuple(feats1['image_size'])[::-1])
pred = model(data)
grp = match_file.create_group(pair)
matches = pred['matches0'][0].cpu().short().numpy()
grp.create_dataset('matches0', data=matches)
if 'matching_scores0' in pred:
scores = pred['matching_scores0'][0].cpu().half().numpy()
grp.create_dataset('matching_scores0', data=scores)
matched |= {(name0, name1), (name1, name0)}
match_file.close()
logging.info('Finished exporting matches.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--export_dir', type=Path)
parser.add_argument('--output_dir', type=Path, required=False)
parser.add_argument('--features', type=str,
default='feats-superpoint-n4096-r1024')
parser.add_argument('--db_features', type=Path)
parser.add_argument('--query_features', type=Path, required=False)
parser.add_argument('--pairs', type=Path)
parser.add_argument('--conf', type=str, default='superglue',
choices=list(confs.keys()))
parser.add_argument('--exhaustive', action='store_true')
# best_match
parser.add_argument('--best_match', action='store_true')
parser.add_argument('--global_feature_path', type=Path)
parser.add_argument('--feature_path', type=Path)
parser.add_argument('--query_global_feature_path', type=Path)
parser.add_argument('--query_feature_path', type=Path)
parser.add_argument('--match_output_path', type=Path)
parser.add_argument('--num_match_required', type=int, default=10)
parser.add_argument('--min_matched', type=int, default=1)
parser.add_argument('--max_try', type=int)
parser.add_argument('--num_seq', type=int)
parser.add_argument('--min_match_score', type=float, default=0.85)
parser.add_argument('--min_valid_ratio', type=float, default=0.09)
parser.add_argument('--sample_list_path', type=Path)
parser.add_argument('--pair_file_path', type=Path)
args = parser.parse_args()
if args.best_match:
best_match(confs[args.conf], args.global_feature_path, args.feature_path, args.match_output_path,
query_global_feature_path=args.query_global_feature_path, query_feature_path=args.query_feature_path,
num_match_required=args.num_match_required, min_matched=args.min_matched, min_match_score=args.min_match_score, min_valid_ratio=args.min_valid_ratio,
max_try=args.max_try, num_seq=args.num_seq, sample_list_path=args.sample_list_path, pair_file_path=args.pair_file_path)
else:
main(
confs[args.conf], args.pairs, args.features,args.export_dir,
db_features=args.db_features, query_features=args.query_features, output_dir=args.output_dir, exhaustive=args.exhaustive)
|
[
"numpy.stack",
"tqdm.tqdm",
"pprint.pformat",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"torch.topk",
"logging.warning",
"logging.info",
"torch.einsum",
"pathlib.Path",
"torch.cuda.is_available",
"torch.no_grad",
"torch.from_numpy"
] |
[((1171, 1186), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1184, 1186), False, 'import torch\n'), ((2879, 2894), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2892, 2894), False, 'import torch\n'), ((7473, 7488), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7486, 7488), False, 'import torch\n'), ((2398, 2428), 'numpy.count_nonzero', 'np.count_nonzero', (['(matches > -1)'], {}), '(matches > -1)\n', (2414, 2428), True, 'import numpy as np\n'), ((5401, 5440), 'torch.einsum', 'torch.einsum', (['"""id,jd->ij"""', 'q_desc', 'desc'], {}), "('id,jd->ij', q_desc, desc)\n", (5413, 5440), False, 'import torch\n'), ((7427, 7470), 'logging.info', 'logging.info', (['"""Finished exporting matches."""'], {}), "('Finished exporting matches.')\n", (7439, 7470), False, 'import logging\n'), ((9339, 9375), 'pathlib.Path', 'Path', (['output_dir', "(match_name + '.h5')"], {}), "(output_dir, match_name + '.h5')\n", (9343, 9375), False, 'from pathlib import Path\n'), ((9517, 9547), 'tqdm.tqdm', 'tqdm', (['pair_list'], {'smoothing': '(0.1)'}), '(pair_list, smoothing=0.1)\n', (9521, 9547), False, 'from tqdm import tqdm\n'), ((10831, 10874), 'logging.info', 'logging.info', (['"""Finished exporting matches."""'], {}), "('Finished exporting matches.')\n", (10843, 10874), False, 'import logging\n'), ((10917, 10942), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10940, 10942), False, 'import argparse\n'), ((1006, 1031), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1029, 1031), False, 'import torch\n'), ((1361, 1386), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1384, 1386), False, 'import torch\n'), ((3509, 3586), 'logging.info', 'logging.info', (['f"""(Using query_global_feature_path:{query_global_feature_path}"""'], {}), "(f'(Using query_global_feature_path:{query_global_feature_path}')\n", (3521, 3586), False, 'import logging\n'), ((3884, 3947), 'logging.info', 'logging.info', (['f"""(Using query_feature_path:{query_feature_path}"""'], {}), "(f'(Using query_feature_path:{query_feature_path}')\n", (3896, 3947), False, 'import logging\n'), ((4866, 4891), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4889, 4891), False, 'import torch\n'), ((7806, 7840), 'pathlib.Path', 'Path', (['export_dir', "(features + '.h5')"], {}), "(export_dir, features + '.h5')\n", (7810, 7840), False, 'from pathlib import Path\n'), ((7983, 8037), 'logging.info', 'logging.info', (['f"""Using query_features {query_features}"""'], {}), "(f'Using query_features {query_features}')\n", (7995, 8037), False, 'import logging\n'), ((8056, 8089), 'logging.info', 'logging.info', (['"""No query_features"""'], {}), "('No query_features')\n", (8068, 8089), False, 'import logging\n'), ((9055, 9080), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9078, 9080), False, 'import torch\n'), ((6875, 7002), 'logging.warning', 'logging.warning', (['f"""num match for {name0} found {num_matches_found} less than num_match_required:{num_match_required}"""'], {}), "(\n f'num match for {name0} found {num_matches_found} less than num_match_required:{num_match_required}'\n )\n", (6890, 7002), False, 'import logging\n'), ((8452, 8511), 'logging.info', 'logging.info', (['f"""Writing exhaustive match pairs to {pairs}."""'], {}), "(f'Writing exhaustive match pairs to {pairs}.')\n", (8464, 8511), False, 'import logging\n'), ((3308, 3328), 'pprint.pformat', 'pprint.pformat', (['conf'], {}), '(conf)\n', (3322, 3328), False, 'import pprint\n'), ((7693, 7713), 'pprint.pformat', 'pprint.pformat', (['conf'], {}), '(conf)\n', (7707, 7713), False, 'import pprint\n'), ((5505, 5536), 'torch.topk', 'torch.topk', (['sim', 'max_try'], {'dim': '(1)'}), '(sim, max_try, dim=1)\n', (5515, 5536), False, 'import torch\n'), ((1840, 1859), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (1856, 1859), False, 'import torch\n'), ((5051, 5068), 'numpy.stack', 'np.stack', (['desc', '(0)'], {}), '(desc, 0)\n', (5059, 5068), True, 'import numpy as np\n'), ((10075, 10094), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (10091, 10094), False, 'import torch\n')]
|
import LMRt
import os
import numpy as np
import pandas as pd
import xarray as xr
# preprocessing
print("\n======== Preprocessing ========\n")
config = 'configs.yml'
recon_iterations = 1
figure = 'graph'
job = LMRt.ReconJob()
job.load_configs(config, verbose=True)
job.load_proxydb(verbose=True)
job.filter_proxydb(verbose=True)
job.seasonalize_proxydb(verbose=True)
job.load_prior(verbose=True)
job.load_obs(verbose=True)
job_dirpath = job.configs['job_dirpath']
seasonalized_prior_path = os.path.join(job_dirpath, 'seasonalized_prior.pkl')
seasonalized_obs_path = os.path.join(job_dirpath, 'seasonalized_obs.pkl')
prior_loc_path = os.path.join(job_dirpath, 'prior_loc.pkl')
obs_loc_path = os.path.join(job_dirpath, 'obs_loc.pkl')
calibed_psm_path = os.path.join(job_dirpath, 'calibed_psm.pkl')
job.calibrate_psm(
seasonalized_prior_path=seasonalized_prior_path,
seasonalized_obs_path=seasonalized_obs_path,
prior_loc_path=prior_loc_path,
obs_loc_path=obs_loc_path,
calibed_psm_path=calibed_psm_path,
verbose=True,
)
job.forward_psm(verbose=True)
job.seasonalize_prior(verbose=True)
job.regrid_prior(verbose=True)
job.save()
print("\n======== Data Assimilation ========\n")
# Data assimilation
job.run(recon_seeds=np.arange(recon_iterations), verbose=True)
print("\n======== Preview of results ========\n")
# Preview of Results
# create the res object for reconstruction results
res = LMRt.ReconRes(job.configs['job_dirpath'], verbose=True)
# get the varialbes from the recon_paths
res.get_vars(['tas', 'nino3.4'], verbose=True)
if(figure_type == 'map'):
# plot the tas field
fig, ax = res.vars['tas'].field_list[0].plot()
fig.savefig("./map.png")
elif(figure_type == 'graph'):
# plot and validate the NINO3.4
from scipy.io import loadmat
data = loadmat('./data/obs/NINO34_BC09.mat')
syr, eyr = 1873, 2000
nyr = eyr-syr+1
nino34 = np.zeros(nyr)
for i in range(nyr):
nino34[i] = np.mean(data['nino34'][i*12:12+i*12])
target_series = LMRt.Series(time=np.arange(syr, eyr+1), value=nino34, label='BC09')
fig, ax = res.vars['nino3.4'].validate(target_series, verbose=True).plot(xlim=[1880, 2000])
fig.savefig("./graph.png")
else:
print("not a valid figure parameter \n")
|
[
"scipy.io.loadmat",
"LMRt.ReconJob",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"LMRt.ReconRes",
"os.path.join"
] |
[((213, 228), 'LMRt.ReconJob', 'LMRt.ReconJob', ([], {}), '()\n', (226, 228), False, 'import LMRt\n'), ((494, 545), 'os.path.join', 'os.path.join', (['job_dirpath', '"""seasonalized_prior.pkl"""'], {}), "(job_dirpath, 'seasonalized_prior.pkl')\n", (506, 545), False, 'import os\n'), ((570, 619), 'os.path.join', 'os.path.join', (['job_dirpath', '"""seasonalized_obs.pkl"""'], {}), "(job_dirpath, 'seasonalized_obs.pkl')\n", (582, 619), False, 'import os\n'), ((637, 679), 'os.path.join', 'os.path.join', (['job_dirpath', '"""prior_loc.pkl"""'], {}), "(job_dirpath, 'prior_loc.pkl')\n", (649, 679), False, 'import os\n'), ((695, 735), 'os.path.join', 'os.path.join', (['job_dirpath', '"""obs_loc.pkl"""'], {}), "(job_dirpath, 'obs_loc.pkl')\n", (707, 735), False, 'import os\n'), ((755, 799), 'os.path.join', 'os.path.join', (['job_dirpath', '"""calibed_psm.pkl"""'], {}), "(job_dirpath, 'calibed_psm.pkl')\n", (767, 799), False, 'import os\n'), ((1418, 1473), 'LMRt.ReconRes', 'LMRt.ReconRes', (["job.configs['job_dirpath']"], {'verbose': '(True)'}), "(job.configs['job_dirpath'], verbose=True)\n", (1431, 1473), False, 'import LMRt\n'), ((1245, 1272), 'numpy.arange', 'np.arange', (['recon_iterations'], {}), '(recon_iterations)\n', (1254, 1272), True, 'import numpy as np\n'), ((1806, 1843), 'scipy.io.loadmat', 'loadmat', (['"""./data/obs/NINO34_BC09.mat"""'], {}), "('./data/obs/NINO34_BC09.mat')\n", (1813, 1843), False, 'from scipy.io import loadmat\n'), ((1903, 1916), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (1911, 1916), True, 'import numpy as np\n'), ((1962, 2005), 'numpy.mean', 'np.mean', (["data['nino34'][i * 12:12 + i * 12]"], {}), "(data['nino34'][i * 12:12 + i * 12])\n", (1969, 2005), True, 'import numpy as np\n'), ((2038, 2061), 'numpy.arange', 'np.arange', (['syr', '(eyr + 1)'], {}), '(syr, eyr + 1)\n', (2047, 2061), True, 'import numpy as np\n')]
|
import json
import numpy as np
import cv2
from numpy.core.records import array
import cv2.aruco as aruco
import socket
from urllib.request import urlopen
from get_img import get_img as gi
ADDRESS = ('', 10000)
central = None
conn_pool = []
central = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
central.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
central.setblocking(False)
central.bind(ADDRESS)
central.listen(5)
print("Waiting...")
position = {}
orientation = {}
temp_threshold = 40
indication = [233]
font = cv2.FONT_HERSHEY_SIMPLEX
green_lower = np.array([35, 110, 106])
green_upper = np.array([77, 255, 255])
red_lower = np.array([156, 43, 46])
red_upper = np.array([180, 255, 255])
yellow_lower = np.array([26, 43, 46])
yellow_upper = np.array([34, 255, 255])
bts = b''
fix_size = (640, 480)
CAMERA_BUFFRER_SIZE = 8192
class Agent():
def __init__(self, id, order, state=0, test=False) -> None:
self.id = id
self.state = state
self.order = order
self.position = np.inf
self.orientation = np.inf
self.tick = 0
self.come_from = str(self.id) + 'come_from'
self.target = str(self.id) + 'target'
self.flag = True
self.url = 'http://192.168.1.27:81/stream'
if test:
self.path = [15, 16]
pass
def set_location(self):
if self.id in position:
self.position = position[self.id]
def set_orientation(self):
if self.id in orientation:
self.orientation = orientation[self.id]
def set_path(self, path):
self.path = path
self.come_from = self.path.pop(0)
self.target = self.path.pop(0)
def set_agent_list(self, agent_list):
self.agent_list = agent_list
def forward(self):
msg = str.encode('w')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: forward..., target:{}'.format(self.id, self.target))
pass
def backward(self):
msg = str.encode('s')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: backward..., target:{}'.format(self.id, self.target))
pass
def turn_right(self):
msg = str.encode('d')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: right..., target:{}'.format(self.id, self.target))
pass
def turn_left(self):
msg = str.encode('a')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: left..., target:{}'.format(self.id, self.target))
pass
def turn_to(self, target):
v1 = position[target] - position[self.id]
v2 = np.array([1, 0])
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
if v1[1] < 0:
angle *= -1
agent_ori = self.orientation
# print(angle)
# print(agent_ori)
if abs(angle - agent_ori) > 180:
if angle > agent_ori:
self.turn_left()
else:
self.turn_right()
else:
if angle < agent_ori:
self.turn_left()
else:
self.turn_right()
def turn_to_ori(self, angle):
agent_ori = self.orientation
# print(angle)
# print(agent_ori)
if abs(angle - agent_ori) > 180:
if angle > agent_ori:
self.turn_left()
else:
self.turn_right()
else:
if angle < agent_ori:
self.turn_left()
else:
self.turn_right()
def stop(self):
msg = str.encode('t')
conn_pool[self.order].send(msg)
if self.id in indication:
print('Agent {}: stopping..., target:{}'.format(self.id, self.target))
pass
def look_for_target(self):
msg = str.encode('o')
conn_pool[self.order].send(msg)
pass
def thermal(self):
msg = str.encode('l')
conn_pool[self.order].send(msg)
pass
def attack(self):
msg = str.encode('k')
conn_pool[self.order].send(msg)
print('Agent {} is attacking!!'.format(self.id))
pass
def get_img(self):
return gi(self.url)
def find_edge(self):
msg = str.encode('g')
conn_pool[self.order].send(msg)
pass
def circle(self):
msg = str.encode('r')
conn_pool[self.order].send(msg)
pass
def quit(self):
msg = str.encode('q')
conn_pool[self.order].send(msg)
pass
def reach(self, target):
if cal_distance(target, self.id, position) < 0.04:
return True
else:
return False
def head_to(self, id):
v1 = position[id] - position[self.id]
v2 = np.array([1, 0])
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
if v1[1] < 0:
angle *= -1
if self.orientation - angle < 3 and self.orientation - angle > -3:
return True
else:
return False
def head_to_ori(self, angle):
if abs(self.orientation - angle) < 12:
return True
else:
return False
def set_state(self, new_state):
self.state = new_state
def state_control_2(self):
if self.state == 0:
if self.id == 233:
self.set_state(11)
else:
self.set_state(0)
if self.state == 10:
# initialization
self.set_state(93)
if self.state == 911:
self.forward()
self.set_state(912)
if self.state == 912:
if self.reach(self.target):
self.set_state(-1)
else:
if self.tick % 30 == 0:
if self.head_to(self.target):
self.set_state(912)
else:
self.set_state(921)
else:
self.set_state(912)
if self.state == 921:
self.turn_to(self.target)
self.set_state(922)
if self.state == 922:
if self.head_to(self.target):
self.set_state(93)
else:
# self.turn_right()
self.set_state(922)
if self.state == 93:
self.stop()
if self.head_to(self.target):
self.set_state(911)
else:
self.set_state(921)
if self.state == 11:
self.look_for_target()
self.set_state(12)
if self.state == 12:
try:
data = conn_pool[self.order].recv(1064)
if len(data) != 0:
msg = data.decode('utf-8')
print(msg)
if msg == 'Reach the object':
self.set_state(21)
except Exception:
# print('12 except')
self.set_state(12)
pass
if self.state == 21:
self.thermal()
self.set_state(22)
if self.state == 22:
try:
data = conn_pool[self.order].recv(1064)
json_string = json.loads(data)
self.array = format_thermal(json_string)
print(self.array)
self.set_state(23)
except Exception:
# print('22 except')
self.set_state(22)
pass
if self.state == 23:
self.max_temp = max(max(self.array))
if self.max_temp == 0:
self.set_state(21)
else:
self.set_state(24)
if self.state == 24:
if self.max_temp > temp_threshold:
self.set_state(31)
else:
self.set_state(41)
if self.state == 31:
self.find_edge()
self.set_state(32)
if self.state == 32:
try:
data = conn_pool[self.order].recv(1064)
self.edge_len = float(data.decode('utf-8'))
print('edge length:', self.edge_len)
position['start'] = position[self.id]
self.forward()
self.set_state(33)
except Exception:
self.set_state(32)
pass
if self.state == 33:
# print('distance: ', cal_distance(self.id, 'start'))
if cal_distance(self.id, 'start') < 0.5:
self.set_state(33)
else:
position[str(self.id) + 'come_from'] = position[self.id]
self.set_state(10)
if self.state == 41:
color = self.get_img()
position['obj'] = position[self.id]
orientation['obj'] = orientation[self.id]
if color == 'red':
print('Red!!!!!')
self.agent_list[2].set_state(61)
self.set_state(10)
pass
elif color == 'yellow':
print('Yellow!!!!!')
self.agent_list[1].set_state(61)
self.set_state(10)
pass
elif color == 'green':
print('Green!!!!!')
self.set_state(51)
pass
else:
self.set_state(41)
pass
if self.state == 51:
self.circle()
self.set_state(52)
if self.state == 52:
try:
data = conn_pool[self.order].recv(1064)
msg = data.decode('utf-8')
if msg == 'Complete':
self.set_state(-1)
except Exception:
self.set_state(52)
pass
if self.state == 61:
position[str(self.id) + 'target'] = position['obj']
self.set_state(10)
if self.state == -1:
if self.id == 233:
self.stop()
else:
self.set_state(-21)
pass
if self.state == -21:
self.turn_to_ori(orientation['obj'])
self.set_state(-22)
pass
if self.state == -22:
if self.head_to_ori(orientation['obj']):
self.set_state(-23)
else:
self.set_state(-22)
if self.state == -23:
self.forward()
self.set_state(-24)
if self.state == -24:
if self.head_to_ori(orientation['obj']):
if cal_distance('obj', self.id) >= 0.9:
self.set_state(-4)
else:
self.set_state(-24)
else:
self.set_state(-31)
# if cal_distance('obj', self.id) >= 1:
# self.set_state(-4)
# else:
# self.set_state(-24)
if self.state == -31:
self.turn_to_ori(orientation['obj'])
self.set_state(-32)
if self.state == -32:
print('Ori: {}, OBJ_ori: {}'.format(self.orientation, orientation['obj']))
if self.head_to_ori(orientation['obj']):
self.set_state(-23)
else:
self.set_state(-32)
if self.state == -4:
self.stop()
self.attack()
if self.tick % 50 ==0:
if self.id in indication:
print(str(self.id) + ' state: ' + str(self.state))
self.tick += 1
def open_camera():
cap = cv2.VideoCapture(1)
cap.set(3, 1920)
cap.set(4, 1080)
return cap
def init_parameters():
mtx = np.array([[1051.1, 0, 695.0741],
[0, 1052.2, 297.7604],
[0., 0., 1.]])
dist = np.array([[-0.4223, 0.1412, 0, 0, 0.0921]])
return mtx, dist
def capture_frame(cap):
ret, frame = cap.read()
frame = cv2.GaussianBlur(frame, (5, 5), 0)
return frame
def detect_aruco(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
parameters = aruco.DetectorParameters_create()
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
corners, ids, rIP = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
return corners, ids, rIP
def get_position(ids, tvec, position):
for i in range(ids.shape[0]):
position[ids[i][0]] = (tvec[i][0])[:2]
def get_orientation(ids, rvec, orientation):
for i in range(ids.shape[0]):
temp = rvec[i][0]
r, _ = cv2.Rodrigues(temp)
theta_z = np.arctan2(r[1][0], r[0][0]) / np.pi * 180
orientation[ids[i][0]] = theta_z
def cal_distance(id1, id2, pos=position):
if id1 in pos and id2 in pos:
distance = np.linalg.norm(pos[id1] - pos[id2])
return distance
else:
return np.inf
def cal_angle(agent, vertex_id, next_id, pos):
try:
vertex = pos[vertex_id]
next = pos[next_id]
v1 = agent.position - vertex
v2 = next - vertex
cos_angle = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cos_angle) / np.pi * 180
return angle
except Exception:
return np.inf
def format_thermal(one_d_array):
two_d_array = []
i = 0
for row in range(8):
temp = []
for col in range(8):
temp.append(one_d_array[i])
i = i + 1
two_d_array.append(temp)
return two_d_array
def main():
mtx, dist = init_parameters()
cap = open_camera()
initialization = True
while True:
if len(conn_pool) < 3:
try:
client, _ = central.accept()
# print('address: {},port: {} is connected'.format(addr[0], addr[1]))
conn_pool.append(client)
except BlockingIOError:
pass
else:
try:
frame = capture_frame(cap)
corners, ids, _ = detect_aruco(frame)
if ids is not None:
aruco.drawDetectedMarkers(frame, corners, ids)
rvec, tvec, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.158, mtx, dist)
for i in range(rvec.shape[0]):
aruco.drawAxis(frame, mtx, dist, rvec[i, :, :], tvec[i, :, :], 0.1)
aruco.drawDetectedMarkers(frame, corners, ids, (0, 0, 255))
get_position(ids, tvec, position)
get_orientation(ids, rvec, orientation)
if initialization:
if ids.shape[0] >= 4:
initialization = False
agent_1 = Agent(233, order=0, state=21)
agent_2 = Agent(234, order=1)
agent_3 = Agent(235, order=2)
agent_list = [agent_1, agent_2, agent_3]
for agent_id, id in zip((agent_1.id, agent_2.id, agent_3.id), (101, 102, 103)):
position[str(agent_id) + 'come_from'] = position[id]
position[str(agent_id) + 'target'] = position[104]
for agent in agent_list:
agent.set_agent_list(agent_list)
print('initialization complete...')
else:
print('initializing...')
if not initialization:
if agent_1.id in position and agent_2.id in position and agent_3.id in position:
for agent in agent_list:
agent.set_location()
agent.set_orientation()
agent.state_control_2()
if cv2.waitKey(1) & 0xFF == ord('q'):
for agent in agent_list:
agent.stop()
agent.quit()
break
cv2.imshow("Capture", frame)
except(BlockingIOError, ConnectionResetError):
print("Error 2")
pass
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"cv2.GaussianBlur",
"cv2.aruco.drawDetectedMarkers",
"numpy.arctan2",
"socket.socket",
"cv2.aruco.detectMarkers",
"numpy.linalg.norm",
"cv2.imshow",
"json.loads",
"cv2.cvtColor",
"cv2.aruco.drawAxis",
"numpy.arccos",
"cv2.destroyAllWindows",
"cv2.aruco.estimatePoseSingleMarkers",
"cv2.waitKey",
"cv2.aruco.Dictionary_get",
"cv2.Rodrigues",
"get_img.get_img",
"cv2.aruco.DetectorParameters_create",
"cv2.VideoCapture",
"numpy.array"
] |
[((252, 301), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (265, 301), False, 'import socket\n'), ((570, 594), 'numpy.array', 'np.array', (['[35, 110, 106]'], {}), '([35, 110, 106])\n', (578, 594), True, 'import numpy as np\n'), ((609, 633), 'numpy.array', 'np.array', (['[77, 255, 255]'], {}), '([77, 255, 255])\n', (617, 633), True, 'import numpy as np\n'), ((646, 669), 'numpy.array', 'np.array', (['[156, 43, 46]'], {}), '([156, 43, 46])\n', (654, 669), True, 'import numpy as np\n'), ((682, 707), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (690, 707), True, 'import numpy as np\n'), ((723, 745), 'numpy.array', 'np.array', (['[26, 43, 46]'], {}), '([26, 43, 46])\n', (731, 745), True, 'import numpy as np\n'), ((761, 785), 'numpy.array', 'np.array', (['[34, 255, 255]'], {}), '([34, 255, 255])\n', (769, 785), True, 'import numpy as np\n'), ((11882, 11901), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (11898, 11901), False, 'import cv2\n'), ((11994, 12067), 'numpy.array', 'np.array', (['[[1051.1, 0, 695.0741], [0, 1052.2, 297.7604], [0.0, 0.0, 1.0]]'], {}), '([[1051.1, 0, 695.0741], [0, 1052.2, 297.7604], [0.0, 0.0, 1.0]])\n', (12002, 12067), True, 'import numpy as np\n'), ((12116, 12159), 'numpy.array', 'np.array', (['[[-0.4223, 0.1412, 0, 0, 0.0921]]'], {}), '([[-0.4223, 0.1412, 0, 0, 0.0921]])\n', (12124, 12159), True, 'import numpy as np\n'), ((12247, 12281), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(5, 5)', '(0)'], {}), '(frame, (5, 5), 0)\n', (12263, 12281), False, 'import cv2\n'), ((12337, 12376), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (12349, 12376), False, 'import cv2\n'), ((12394, 12427), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (12425, 12427), True, 'import cv2.aruco as aruco\n'), ((12445, 12485), 'cv2.aruco.Dictionary_get', 'aruco.Dictionary_get', (['aruco.DICT_6X6_250'], {}), '(aruco.DICT_6X6_250)\n', (12465, 12485), True, 'import cv2.aruco as aruco\n'), ((12510, 12570), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', (['gray', 'aruco_dict'], {'parameters': 'parameters'}), '(gray, aruco_dict, parameters=parameters)\n', (12529, 12570), True, 'import cv2.aruco as aruco\n'), ((16575, 16598), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (16596, 16598), False, 'import cv2\n'), ((2757, 2773), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2765, 2773), True, 'import numpy as np\n'), ((4390, 4402), 'get_img.get_img', 'gi', (['self.url'], {}), '(self.url)\n', (4392, 4402), True, 'from get_img import get_img as gi\n'), ((4961, 4977), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (4969, 4977), True, 'import numpy as np\n'), ((12843, 12862), 'cv2.Rodrigues', 'cv2.Rodrigues', (['temp'], {}), '(temp)\n', (12856, 12862), False, 'import cv2\n'), ((13062, 13097), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos[id1] - pos[id2])'], {}), '(pos[id1] - pos[id2])\n', (13076, 13097), True, 'import numpy as np\n'), ((2808, 2826), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2822, 2826), True, 'import numpy as np\n'), ((2829, 2847), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2843, 2847), True, 'import numpy as np\n'), ((2865, 2885), 'numpy.arccos', 'np.arccos', (['cos_angle'], {}), '(cos_angle)\n', (2874, 2885), True, 'import numpy as np\n'), ((5012, 5030), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (5026, 5030), True, 'import numpy as np\n'), ((5033, 5051), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (5047, 5051), True, 'import numpy as np\n'), ((5069, 5089), 'numpy.arccos', 'np.arccos', (['cos_angle'], {}), '(cos_angle)\n', (5078, 5089), True, 'import numpy as np\n'), ((7501, 7517), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (7511, 7517), False, 'import json\n'), ((12881, 12909), 'numpy.arctan2', 'np.arctan2', (['r[1][0]', 'r[0][0]'], {}), '(r[1][0], r[0][0])\n', (12891, 12909), True, 'import numpy as np\n'), ((13370, 13388), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (13384, 13388), True, 'import numpy as np\n'), ((13391, 13409), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (13405, 13409), True, 'import numpy as np\n'), ((13427, 13447), 'numpy.arccos', 'np.arccos', (['cos_angle'], {}), '(cos_angle)\n', (13436, 13447), True, 'import numpy as np\n'), ((16409, 16437), 'cv2.imshow', 'cv2.imshow', (['"""Capture"""', 'frame'], {}), "('Capture', frame)\n", (16419, 16437), False, 'import cv2\n'), ((14360, 14406), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['frame', 'corners', 'ids'], {}), '(frame, corners, ids)\n', (14385, 14406), True, 'import cv2.aruco as aruco\n'), ((14452, 14510), 'cv2.aruco.estimatePoseSingleMarkers', 'aruco.estimatePoseSingleMarkers', (['corners', '(0.158)', 'mtx', 'dist'], {}), '(corners, 0.158, mtx, dist)\n', (14483, 14510), True, 'import cv2.aruco as aruco\n'), ((14691, 14750), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['frame', 'corners', 'ids', '(0, 0, 255)'], {}), '(frame, corners, ids, (0, 0, 255))\n', (14716, 14750), True, 'import cv2.aruco as aruco\n'), ((14603, 14670), 'cv2.aruco.drawAxis', 'aruco.drawAxis', (['frame', 'mtx', 'dist', 'rvec[i, :, :]', 'tvec[i, :, :]', '(0.1)'], {}), '(frame, mtx, dist, rvec[i, :, :], tvec[i, :, :], 0.1)\n', (14617, 14670), True, 'import cv2.aruco as aruco\n'), ((16217, 16231), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (16228, 16231), False, 'import cv2\n')]
|
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.Input.WaterBudget import Percolation
class TestPercolation(VariableUnitTest):
def test_Percolation_ground_truth(self):
z = self.z
np.testing.assert_array_almost_equal(
np.load(self.basepath + "/Percolation.npy"),
Percolation.Percolation(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap), decimal=7)
def test_Percolation(self):
z = self.z
np.testing.assert_array_almost_equal(
Percolation.Percolation_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap),
Percolation.Percolation(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap), decimal=7)
|
[
"gwlfe.Input.WaterBudget.Percolation.Percolation_f",
"numpy.load",
"gwlfe.Input.WaterBudget.Percolation.Percolation"
] |
[((281, 324), 'numpy.load', 'np.load', (["(self.basepath + '/Percolation.npy')"], {}), "(self.basepath + '/Percolation.npy')\n", (288, 324), True, 'import numpy as np\n'), ((338, 579), 'gwlfe.Input.WaterBudget.Percolation.Percolation', 'Percolation.Percolation', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap)\n', (361, 579), False, 'from gwlfe.Input.WaterBudget import Percolation\n'), ((760, 1003), 'gwlfe.Input.WaterBudget.Percolation.Percolation_f', 'Percolation.Percolation_f', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap)\n', (785, 1003), False, 'from gwlfe.Input.WaterBudget import Percolation\n'), ((1079, 1320), 'gwlfe.Input.WaterBudget.Percolation.Percolation', 'Percolation.Percolation', (['z.NYrs', 'z.DaysMonth', 'z.Temp', 'z.InitSnow_0', 'z.Prec', 'z.NRur', 'z.NUrb', 'z.Area', 'z.CNI_0', 'z.AntMoist_0', 'z.Grow_0', 'z.CNP_0', 'z.Imper', 'z.ISRR', 'z.ISRA', 'z.CN', 'z.UnsatStor_0', 'z.KV', 'z.PcntET', 'z.DayHrs', 'z.MaxWaterCap'], {}), '(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec,\n z.NRur, z.NUrb, z.Area, z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.\n Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z\n .MaxWaterCap)\n', (1102, 1320), False, 'from gwlfe.Input.WaterBudget import Percolation\n')]
|
"""
Solution-based probabilistic linear solvers.
Implementations of solution-based linear solvers which perform inference on the solution
of a linear system given linear observations.
"""
import warnings
import numpy as np
from probnum.linalg.linearsolvers.matrixbased import ProbabilisticLinearSolver
class SolutionBasedSolver(ProbabilisticLinearSolver):
"""
Solver iteration of BayesCG.
Implements the solve iteration of the solution-based solver BayesCG [1]_.
Parameters
----------
A : array-like or LinearOperator or RandomVariable, shape=(n,n)
The square matrix or linear operator of the linear system.
b : array_like, shape=(n,) or (n, nrhs)
Right-hand side vector or matrix in :math:`A x = b`.
References
----------
.. [1] <NAME> al., A Bayesian Conjugate Gradient Method, *Bayesian
Analysis*, 2019, 14, 937-1012
"""
def __init__(self, A, b, x0=None):
self.x0 = x0
super().__init__(A=A, b=b)
def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None):
"""
Check convergence of a linear solver.
Evaluates a set of convergence criteria based on its input arguments to decide
whether the iteration has converged.
Parameters
----------
iter : int
Current iteration of solver.
maxiter : int
Maximum number of iterations
resid : array-like
Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of
the current iteration.
atol : float
Absolute residual tolerance. Stops if
:math:`\\lVert r_i \\rVert < \\text{atol}`.
rtol : float
Relative residual tolerance. Stops if
:math:`\\lVert r_i \\rVert < \\text{rtol} \\lVert b \\rVert`.
Returns
-------
has_converged : bool
True if the method has converged.
convergence_criterion : str
Convergence criterion which caused termination.
"""
# maximum iterations
if iter >= maxiter:
warnings.warn(
"Iteration terminated. Solver reached the maximum number of iterations."
)
return True, "maxiter"
# residual below error tolerance
elif np.linalg.norm(resid) <= atol:
return True, "resid_atol"
elif np.linalg.norm(resid) <= rtol * np.linalg.norm(self.b):
return True, "resid_rtol"
else:
return False, ""
def solve(self, callback=None, maxiter=None, atol=None, rtol=None):
raise NotImplementedError
|
[
"warnings.warn",
"numpy.linalg.norm"
] |
[((2132, 2224), 'warnings.warn', 'warnings.warn', (['"""Iteration terminated. Solver reached the maximum number of iterations."""'], {}), "(\n 'Iteration terminated. Solver reached the maximum number of iterations.')\n", (2145, 2224), False, 'import warnings\n'), ((2339, 2360), 'numpy.linalg.norm', 'np.linalg.norm', (['resid'], {}), '(resid)\n', (2353, 2360), True, 'import numpy as np\n'), ((2421, 2442), 'numpy.linalg.norm', 'np.linalg.norm', (['resid'], {}), '(resid)\n', (2435, 2442), True, 'import numpy as np\n'), ((2453, 2475), 'numpy.linalg.norm', 'np.linalg.norm', (['self.b'], {}), '(self.b)\n', (2467, 2475), True, 'import numpy as np\n')]
|
import numpy as np
#from ..utils import *
from ..metrics import Metrics
from .map_data import StdMapData
class StdMapMetrics():
"""
Class used for calculating pattern attributes and difficulty.
.. warning::
Undocumented functions in this class are not supported and are experimental.
"""
@staticmethod
def calc_tapping_intervals(map_data=[]):
"""
Gets the timing difference between note starting times.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, intervals)``. ``times`` are hitobject timings. ``intervals`` are the timings
difference between current and previous note. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.start_times(map_data)
dt = np.diff(t)
return t[1:], dt
@staticmethod
def calc_notes_per_sec(map_data=[]):
"""
Gets number of notes tapped per second based on immidiate duration between notes.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, nps)``. ``times`` are hitobject timings. ``nps`` is notes per second.
Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.start_times(map_data)
dt = 1000/np.diff(t)
return t[1:], dt
@staticmethod
def calc_path_dist(map_data=[]):
"""
Calculates distance between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, dists)``. ``times`` are aimpoint timings. ``dists`` are distances
between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.dists(x, y)
@staticmethod
def calc_path_vel(map_data=[]):
"""
Calculates velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, vels)``. ``times`` are aimpoint timings. ``vels`` are based on time and distance
between aimpoints. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.vel_2d(x, y, t)
@staticmethod
def calc_path_accel(map_data=[]):
"""
Calculates acceleration between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of (times, accels). ``times`` are aimpoint timings. ``accels`` are based on
change in velocity between aimpoints. Resultant array size is ``len(map_data) - 3``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.accel_2d(x, y, t)
@staticmethod
def calc_xy_dist(map_data=[]):
"""
Calculates parametric distance between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
map_data
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_dists, y_dists)``. ``times`` are aimpoint timings. ``x_dists`` are distances
between aimpoints in the x-coordinate direction. ``y_dists`` are distances between aimpoints
in the y-coordinate direction. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
dx = np.diff(x)
dy = np.diff(y)
return t[1:], dx, dy
@staticmethod
def calc_xy_vel(map_data=[]):
"""
Calculates parametric velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_vels, y_vels)``. ``times`` are aimpoint timings. ``x_vels`` are velocities
between aimpoints in the x-coordinate direction. ``y_vels`` are velocities between aimpoints
in the y-coordinate direction. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
dt = np.diff(t)
dx = np.diff(x)
dy = np.diff(y)
return t[1:], dx/dt, dy/dt
@staticmethod
def calc_xy_accel(map_data=[]):
"""
Calculates parametric acceleration between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_accels, y_accels)``. ``times`` are aimpoint timings. ``x_accels`` are
accelerations between aimpoints in the x-coordinate direction. ``y_accels`` are accelerations
between aimpoints in the y-coordinate direction. Resultant array size is ``len(map_data) - 3``.
"""
t, vx, vy = StdMapMetrics.calc_xy_vel(map_data.iloc[2:])
dvx = np.diff(vx)
dvy = np.diff(vy)
dt = np.diff(t)
return t[1:], dvx/dt, dvy/dt
@staticmethod
def calc_xy_jerk(map_data=[]):
"""
Calculates parametric jerks between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, x_jerks, y_jerks)``. ``times`` are aimpoint timings. ``x_jerks`` are
jerks between aimpoints in the x-coordinate direction. ``y_jerks`` are jerks
between aimpoints in the y-coordinate direction. Resultant array size is ``len(map_data) - 4``.
"""
map_data = np.asarray(map_data[2:])
t, ax, ay = StdMapMetrics.calc_xy_accel(map_data)
dax = np.diff(ax)
day = np.diff(ay)
dt = np.diff(t)
return t[1:], dax/dt, day/dt
@staticmethod
def calc_velocity_start(map_data=[]):
t = StdMapData.start_times(map_data)
x, y = StdMapData.start_positions(map_data)
return t[1:], Metrics.vel_2d(x, y, t)
@staticmethod
def calc_intensity(map_data=[]):
t, v = StdMapMetrics.calc_velocity_start(map_data)
t, nps = StdMapMetrics.calc_notes_per_sec(map_data)
intensity = v*nps
return t, intensity
@staticmethod
def calc_angles(map_data=[]):
"""
Calculates angle between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, angles)``. ``times`` are aimpoint timings. ``angles`` are
angles between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[1:], Metrics.angle(x, y, t)
@staticmethod
def calc_theta_per_second(map_data=[]):
"""
Calculates immediate path rotation (in radians per second) from previous aimpoint.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, rps)``. ``times`` are aimpoint timings. ``rps`` are
radians per second between aimpoints. Resultant array size is ``len(map_data) - 1``.
"""
t, thetas = StdMapMetrics.calc_angles(map_data)
dt = np.diff(t)
return t[1:], thetas*(1000/dt)
@staticmethod
def calc_radial_velocity(map_data=[]):
"""
Calculates radial velocity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Radial velocity is how fast a path
moves in a circle in radians per second. Unlike ``calc_theta_per_second``, which
calculates immediate rotation, this calculates average rotation.
The difference between the two implemtations is apparent when considering zig-zag and circular patterns.
Zig-zag patterns has no average angular velocity, but have average linear velocity. In a zig-zag
pattern one angle would be positive indicating a rotation in a clockwise direction, and another angle
would be negative indicating a rotation in a counter-clockwise direction. Ultimately those two cancel
out to result in no overall rotation direction. A circular pattern would have either both angles positive
or both angles negative, yielding a net negative or a net positive rotation direction.
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, avg_rad_vels)``. ``times`` are aimpoint timings. ``avg_rad_vels`` are
average radial velocities. Resultant array size is ``len(map_data) - 2``.
"""
t = StdMapData.all_times(map_data)
x, y = StdMapData.all_positions(map_data)
return t[2:], Metrics.avg_ang_vel(x, y, t[1:])
@staticmethod
def calc_perp_int(map_data=[]):
"""
Calculates perpendicular intensity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Perpendicular intensity is how much strongly the path
between aimpoints turns 90 deg, factoring in average radial velocity of the path as well as
overall velocity throughout the path (measured in osu!px*radians/millisconds^2).
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, perp_ints)``. ``times`` are aimpoint timings. ``perp_ints`` are
perpendicular intensities. Resultant array size is ``len(map_data) - 2``.
"""
times, rv = StdMapMetrics.calc_radial_velocity(map_data)
times, x_vel, y_vel = StdMapMetrics.calc_xy_vel(map_data)
# Construct vector angles from parametric velocities
theta1 = np.arctan2(y_vel[1:], x_vel[1:])
theta2 = np.arctan2(y_vel[:-1], x_vel[:-1])
# Make stacks 0 angle change
mask = np.where(np.logical_and(y_vel[1:] == 0, x_vel[1:] == 0))[0]
theta1[mask] = theta1[mask - 1]
mask = np.where(np.logical_and(y_vel[:-1] == 0, x_vel[:-1] == 0))[0]
theta2[mask] = theta2[mask - 1]
# Velocity in the perpendicular direction relative to current
dy_vel = np.sin(theta2 - theta1)
return times, rv*dy_vel[1:]
# Linear intensity
@staticmethod
def calc_lin_int(map_data=[]):
"""
Calculates linear intensity between aimpoints. Aimpoints are hitobject start
and end times, and slider ticks. Linear intensity is how much strongly the path
between aimpoints is linear, factoring in average radial velocity of the path as well as
overall velocity throughout the path (measured in osu!px*radians/millisconds^2).
Parameters
----------
map_data : numpy.array
Hitobject data from ``StdMapData.get_aimpoint_data``
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, lin_ints)``. ``times`` are aimpoint timings. ``lin_ints`` are
linear intensities. Resultant array size is ``len(map_data) - 2``.
"""
times, rv = StdMapMetrics.calc_radial_velocity(map_data)
times, x_vel, y_vel = StdMapMetrics.calc_xy_vel(map_data)
# Construct vector angles from parametric velocities
theta1 = np.arctan2(y_vel[1:], x_vel[1:])
theta2 = np.arctan2(y_vel[:-1], x_vel[:-1])
# Make stacks 0 angle change
mask = np.where(np.logical_and(y_vel[1:] == 0, x_vel[1:] == 0))[0]
theta1[mask] = theta1[mask - 1]
mask = np.where(np.logical_and(y_vel[:-1] == 0, x_vel[:-1] == 0))[0]
theta2[mask] = theta2[mask - 1]
# Velocity in the parellel direction relative to current
dx_vel = np.cos(theta2 - theta1)
return times, rv*dx_vel[1:]
all_times = StdMapData.all_times(map_data)
all_positions = StdMapData.all_positions(map_data)
if len(all_positions) < 3: return [], []
positions = [ Pos(*pos) for pos in all_positions ]
angles = [ get_angle(*param) for param in zip(positions[:-2], positions[1:-1], positions[2:]) ]
return all_times[1:-1], angles
@staticmethod
def calc_acceleration(map_data=[]):
pass
pass
'''
Response metrics
'''
@staticmethod
def calc_speed_response(resolution=1, x_range=(1, 100)):
return ([x for x in range(*x_range)], [ 1/x for x in range(*x_range) ])
'''
Advanced metrics
'''
@staticmethod
def calc_rhythmic_complexity(map_data=[]):
def calc_harmonic(prev_note_interval, curr_note_interval, target_time, v_scale):
if prev_note_interval == 0: print('WARNING: 0 note interval detected at ', target_time, ' ms')
return -(v_scale/2)*math.cos((2*math.pi)/prev_note_interval*curr_note_interval) + (v_scale/2)
def decay(interval, decay_factor):
return math.exp(-decay_factor*interval)
def speed(interval, speed_factor):
return speed_factor/interval
def calc_note(time, curr_interval, prev_interval, decay_factor, v_scale):
return decay(curr_interval, decay_factor) * calc_harmonic(prev_interval, curr_interval, time, v_scale)
speed_factor = 600.0
v_factor = 10.0
decay_factor = 0.005
time, intervals = StdMapMetrics.calc_tapping_intervals(map_data)
harmonics = [ calc_note(time[i], intervals[i], intervals[i - 1], decay_factor, v_factor) for i in range(1, len(intervals)) ]
return time, [ sum(harmonics[:i])*speed(intervals[i], speed_factor) for i in range(0, len(intervals)) ]
@staticmethod
def calc_path_curvature(hitobjects):
pass
@staticmethod
def calc_visual_density(hitobjects):
pass
'''
Skill metrics
'''
@staticmethod
def calc_speed_skill(hitobjects):
pass
@staticmethod
def calc_tapping_skill(hitobjects):
pass
@staticmethod
def calc_targeting_skill(hitobjects):
pass
@staticmethod
def calc_agility_skill(hitobjects):
pass
|
[
"numpy.arctan2",
"numpy.logical_and",
"numpy.asarray",
"numpy.sin",
"numpy.diff",
"numpy.cos"
] |
[((944, 954), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (951, 954), True, 'import numpy as np\n'), ((4638, 4648), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (4645, 4648), True, 'import numpy as np\n'), ((4662, 4672), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (4669, 4672), True, 'import numpy as np\n'), ((5533, 5543), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (5540, 5543), True, 'import numpy as np\n'), ((5557, 5567), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (5564, 5567), True, 'import numpy as np\n'), ((5581, 5591), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (5588, 5591), True, 'import numpy as np\n'), ((6444, 6455), 'numpy.diff', 'np.diff', (['vx'], {}), '(vx)\n', (6451, 6455), True, 'import numpy as np\n'), ((6470, 6481), 'numpy.diff', 'np.diff', (['vy'], {}), '(vy)\n', (6477, 6481), True, 'import numpy as np\n'), ((6496, 6506), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (6503, 6506), True, 'import numpy as np\n'), ((7280, 7304), 'numpy.asarray', 'np.asarray', (['map_data[2:]'], {}), '(map_data[2:])\n', (7290, 7304), True, 'import numpy as np\n'), ((7386, 7397), 'numpy.diff', 'np.diff', (['ax'], {}), '(ax)\n', (7393, 7397), True, 'import numpy as np\n'), ((7412, 7423), 'numpy.diff', 'np.diff', (['ay'], {}), '(ay)\n', (7419, 7423), True, 'import numpy as np\n'), ((7438, 7448), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (7445, 7448), True, 'import numpy as np\n'), ((9283, 9293), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (9290, 9293), True, 'import numpy as np\n'), ((11994, 12026), 'numpy.arctan2', 'np.arctan2', (['y_vel[1:]', 'x_vel[1:]'], {}), '(y_vel[1:], x_vel[1:])\n', (12004, 12026), True, 'import numpy as np\n'), ((12044, 12078), 'numpy.arctan2', 'np.arctan2', (['y_vel[:-1]', 'x_vel[:-1]'], {}), '(y_vel[:-1], x_vel[:-1])\n', (12054, 12078), True, 'import numpy as np\n'), ((12438, 12461), 'numpy.sin', 'np.sin', (['(theta2 - theta1)'], {}), '(theta2 - theta1)\n', (12444, 12461), True, 'import numpy as np\n'), ((13547, 13579), 'numpy.arctan2', 'np.arctan2', (['y_vel[1:]', 'x_vel[1:]'], {}), '(y_vel[1:], x_vel[1:])\n', (13557, 13579), True, 'import numpy as np\n'), ((13597, 13631), 'numpy.arctan2', 'np.arctan2', (['y_vel[:-1]', 'x_vel[:-1]'], {}), '(y_vel[:-1], x_vel[:-1])\n', (13607, 13631), True, 'import numpy as np\n'), ((13986, 14009), 'numpy.cos', 'np.cos', (['(theta2 - theta1)'], {}), '(theta2 - theta1)\n', (13992, 14009), True, 'import numpy as np\n'), ((1581, 1591), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (1588, 1591), True, 'import numpy as np\n'), ((12141, 12187), 'numpy.logical_and', 'np.logical_and', (['(y_vel[1:] == 0)', '(x_vel[1:] == 0)'], {}), '(y_vel[1:] == 0, x_vel[1:] == 0)\n', (12155, 12187), True, 'import numpy as np\n'), ((12257, 12305), 'numpy.logical_and', 'np.logical_and', (['(y_vel[:-1] == 0)', '(x_vel[:-1] == 0)'], {}), '(y_vel[:-1] == 0, x_vel[:-1] == 0)\n', (12271, 12305), True, 'import numpy as np\n'), ((13694, 13740), 'numpy.logical_and', 'np.logical_and', (['(y_vel[1:] == 0)', '(x_vel[1:] == 0)'], {}), '(y_vel[1:] == 0, x_vel[1:] == 0)\n', (13708, 13740), True, 'import numpy as np\n'), ((13810, 13858), 'numpy.logical_and', 'np.logical_and', (['(y_vel[:-1] == 0)', '(x_vel[:-1] == 0)'], {}), '(y_vel[:-1] == 0, x_vel[:-1] == 0)\n', (13824, 13858), True, 'import numpy as np\n')]
|
#%%
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
def loss_fn(X, Y, beta):
return cp.norm2(cp.matmul(X, beta) - Y)**2
def regularizer(beta):
return cp.norm1(beta)
def objective_fn(X, Y, beta, lambd):
return loss_fn(X, Y, beta) + lambd * regularizer(beta)
def mse(X, Y, beta):
return (1.0 / X.shape[0]) * loss_fn(X, Y, beta).value
def generate_data(m=100, n=20, sigma=5, density=0.2):
"Generates data matrix X and observations Y."
np.random.seed(1)
beta_star = np.random.randn(n)
idxs = np.random.choice(range(n), int((1-density)*n), replace=False)
for idx in idxs:
beta_star[idx] = 0
X = np.random.randn(m,n)
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star
m = 100
n = 20
sigma = 5
density = 0.2
X, Y, _ = generate_data(m, n, sigma)
X_train = X[:50, :]
Y_train = Y[:50]
X_test = X[50:, :]
Y_test = Y[50:]
beta = cp.Variable(n)
lambd = cp.Parameter(nonneg=True)
problem = cp.Problem(cp.Minimize(objective_fn(X_train, Y_train, beta, lambd)))
lambd_values = np.logspace(-2, 3, 50)
train_errors = []
test_errors = []
beta_values = []
for v in lambd_values:
lambd.value = v
problem.solve()
train_errors.append(mse(X_train, Y_train, beta))
test_errors.append(mse(X_test, Y_test, beta))
beta_values.append(beta.value)
# matplotlib inline
# config InlineBackend.figure_format = 'svg'
def plot_train_test_errors(train_errors, test_errors, lambd_values):
plt.plot(lambd_values, train_errors, label="Train error")
plt.plot(lambd_values, test_errors, label="Test error")
plt.xscale("log")
plt.legend(loc="upper left")
plt.xlabel(r"$\lambda$", fontsize=16)
plt.title("Mean Squared Error (MSE)")
plt.show()
plot_train_test_errors(train_errors, test_errors, lambd_values)
print('done')
|
[
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.title",
"numpy.random.seed",
"cvxpy.Parameter",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.logspace",
"matplotlib.pyplot.legend",
"cvxpy.matmul",
"cvxpy.norm1",
"numpy.random.normal",
"cvxpy.Variable",
"matplotlib.pyplot.xlabel"
] |
[((936, 950), 'cvxpy.Variable', 'cp.Variable', (['n'], {}), '(n)\n', (947, 950), True, 'import cvxpy as cp\n'), ((959, 984), 'cvxpy.Parameter', 'cp.Parameter', ([], {'nonneg': '(True)'}), '(nonneg=True)\n', (971, 984), True, 'import cvxpy as cp\n'), ((1080, 1102), 'numpy.logspace', 'np.logspace', (['(-2)', '(3)', '(50)'], {}), '(-2, 3, 50)\n', (1091, 1102), True, 'import numpy as np\n'), ((183, 197), 'cvxpy.norm1', 'cp.norm1', (['beta'], {}), '(beta)\n', (191, 197), True, 'import cvxpy as cp\n'), ((484, 501), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (498, 501), True, 'import numpy as np\n'), ((518, 536), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (533, 536), True, 'import numpy as np\n'), ((666, 687), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (681, 687), True, 'import numpy as np\n'), ((1496, 1553), 'matplotlib.pyplot.plot', 'plt.plot', (['lambd_values', 'train_errors'], {'label': '"""Train error"""'}), "(lambd_values, train_errors, label='Train error')\n", (1504, 1553), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1613), 'matplotlib.pyplot.plot', 'plt.plot', (['lambd_values', 'test_errors'], {'label': '"""Test error"""'}), "(lambd_values, test_errors, label='Test error')\n", (1566, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1635), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1628, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1650, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1710), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$"""'], {'fontsize': '(16)'}), "('$\\\\lambda$', fontsize=16)\n", (1683, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1752), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Squared Error (MSE)"""'], {}), "('Mean Squared Error (MSE)')\n", (1724, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1765, 1767), True, 'import matplotlib.pyplot as plt\n'), ((714, 748), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': 'm'}), '(0, sigma, size=m)\n', (730, 748), True, 'import numpy as np\n'), ((121, 139), 'cvxpy.matmul', 'cp.matmul', (['X', 'beta'], {}), '(X, beta)\n', (130, 139), True, 'import cvxpy as cp\n')]
|
import numpy as np
def stringify_vec(vec):
s = ""
for x in vec: s += str(x) + " "
return s
def distance(p1, p2):
pv1 = np.asarray(p1)
pv2 = np.asarray(p2)
return np.linalg.norm(pv1 - pv2)
def multireplace(arr, x, sub_arr):
new_arr = []
for entry in arr:
if (entry == x).all():
new_arr += sub_arr
else:
new_arr += [entry]
return new_arr
def rotate_about_line(point, base_pt, vec, theta):
pv = np.asarray(point)
bpv = np.asarray(base_pt)
lv = np.asarray(vec)
diffv = pv - bpv
diffproj = lv * np.dot(diffv, lv) / np.linalg.norm(lv)**2
projv = bpv + diffproj
rv1 = pv - projv
rv2 = np.cross(lv, rv1)
rv2 = rv2 * np.linalg.norm(rv1) / np.linalg.norm(rv2)
new_pv = projv + rv1 * np.cos(theta) + rv2 * np.sin(theta)
return new_pv
|
[
"numpy.asarray",
"numpy.cross",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot"
] |
[((137, 151), 'numpy.asarray', 'np.asarray', (['p1'], {}), '(p1)\n', (147, 151), True, 'import numpy as np\n'), ((162, 176), 'numpy.asarray', 'np.asarray', (['p2'], {}), '(p2)\n', (172, 176), True, 'import numpy as np\n'), ((188, 213), 'numpy.linalg.norm', 'np.linalg.norm', (['(pv1 - pv2)'], {}), '(pv1 - pv2)\n', (202, 213), True, 'import numpy as np\n'), ((476, 493), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (486, 493), True, 'import numpy as np\n'), ((504, 523), 'numpy.asarray', 'np.asarray', (['base_pt'], {}), '(base_pt)\n', (514, 523), True, 'import numpy as np\n'), ((533, 548), 'numpy.asarray', 'np.asarray', (['vec'], {}), '(vec)\n', (543, 548), True, 'import numpy as np\n'), ((690, 707), 'numpy.cross', 'np.cross', (['lv', 'rv1'], {}), '(lv, rv1)\n', (698, 707), True, 'import numpy as np\n'), ((746, 765), 'numpy.linalg.norm', 'np.linalg.norm', (['rv2'], {}), '(rv2)\n', (760, 765), True, 'import numpy as np\n'), ((590, 607), 'numpy.dot', 'np.dot', (['diffv', 'lv'], {}), '(diffv, lv)\n', (596, 607), True, 'import numpy as np\n'), ((610, 628), 'numpy.linalg.norm', 'np.linalg.norm', (['lv'], {}), '(lv)\n', (624, 628), True, 'import numpy as np\n'), ((724, 743), 'numpy.linalg.norm', 'np.linalg.norm', (['rv1'], {}), '(rv1)\n', (738, 743), True, 'import numpy as np\n'), ((815, 828), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (821, 828), True, 'import numpy as np\n'), ((793, 806), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (799, 806), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
colNames = ['x', 'y', 'z', 'particle.type', 'BPM no']
particleTypeNames = {
-1: 'other',
0: 'e-',
1: 'e+',
2: 'gamma'
}
data = pd.read_csv(
'../build-10/out_nt_bpmScreenHits.csv',
header=None,
names=colNames,
comment='#'
)
particleTypes = sorted(data['particle.type'].unique())
def plot(data, typeName, pp):
histo, xEdges, yEdges = np.histogram2d(
data['x'], data['y'],
bins=300, range=[[-300, 300], [-300, 300]]
)
histo = histo.T
histoMasked = np.ma.masked_where(histo == 0, histo)
fig, ax = plt.subplots()
cm = ax.pcolormesh(
xEdges, yEdges, histoMasked,
cmap='viridis', rasterized=True,
zorder=6
)
cb = fig.colorbar(cm)
circle = plt.Circle(
(0, 0), 13.125/2*2.54*10,
color=(1.0, 0.0, 1.0), fill=False,
zorder=5
)
ax.add_artist(circle)
ax.grid(True)
xlims = ax.get_xlim()
ax.set_xlim(xlims[1], xlims[0])
ax.set_title('{} hits'.format(typeName))
ax.set_xlabel('$x_\mathrm{dump} \quad [\mathrm{mm}]$')
ax.set_ylabel('$y_\mathrm{dump} \quad [\mathrm{mm}]$')
cb.set_label('$\#_\mathrm{counts}$')
fig.tight_layout()
pp.savefig(dpi=150)
with PdfPages('plot_BPM.pdf') as pp:
for bpmNo in xrange(1, 3):
plot(data[data['BPM no'] == bpmNo], 'BPM {}'.format(bpmNo), pp)
|
[
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.ma.masked_where",
"pandas.read_csv",
"numpy.histogram2d",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.subplots"
] |
[((345, 443), 'pandas.read_csv', 'pd.read_csv', (['"""../build-10/out_nt_bpmScreenHits.csv"""'], {'header': 'None', 'names': 'colNames', 'comment': '"""#"""'}), "('../build-10/out_nt_bpmScreenHits.csv', header=None, names=\n colNames, comment='#')\n", (356, 443), True, 'import pandas as pd\n'), ((573, 658), 'numpy.histogram2d', 'np.histogram2d', (["data['x']", "data['y']"], {'bins': '(300)', 'range': '[[-300, 300], [-300, 300]]'}), "(data['x'], data['y'], bins=300, range=[[-300, 300], [-300, 300]]\n )\n", (587, 658), True, 'import numpy as np\n'), ((715, 752), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(histo == 0)', 'histo'], {}), '(histo == 0, histo)\n', (733, 752), True, 'import numpy as np\n'), ((768, 782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (780, 782), True, 'import matplotlib.pyplot as plt\n'), ((949, 1041), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(13.125 / 2 * 2.54 * 10)'], {'color': '(1.0, 0.0, 1.0)', 'fill': '(False)', 'zorder': '(5)'}), '((0, 0), 13.125 / 2 * 2.54 * 10, color=(1.0, 0.0, 1.0), fill=\n False, zorder=5)\n', (959, 1041), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1451), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""plot_BPM.pdf"""'], {}), "('plot_BPM.pdf')\n", (1435, 1451), False, 'from matplotlib.backends.backend_pdf import PdfPages\n')]
|
"""
LSH for euclidean distance.
"""
from pyspark import SparkContext, RDD
from datming.utils import join_multiple_keys
import numpy as np
__all__ = [
"EuclideanDistance"
]
class EuclideanDistanceLSH(object):
"""
Find item pairs between which Euclidean Distance is closed enough.
"""
def __init__(self, n_dimension: int, threshold: int,
block_size: int=1, n_bands: int=20, signature_length: int=200,
random_seed: int=None, n_partitions=5):
"""
:param n_dimension: Dimension of vector
:param block_size: size of block to split the dimensions.
:param threshold: Maximum distance to consider a pair of vectors as similar vectors.
:param n_partitions: Maximum number of partitions during the computation.
"""
self.__block_size = block_size
self.__n_dim = n_dimension
self.__threshold = threshold
self.__n_bands = n_bands
self.__n_rows = signature_length // n_bands
self.__signature_length = self.__n_rows * self.__n_bands
self.__random_seed = (random_seed if isinstance(random_seed, int)
else np.random.randint(0, 2**32-1))
self.__n_partitions = n_partitions
def _lsh_predict(self, data: RDD) -> RDD:
"""
:param data: RDD<(int, np.array)>
= RDD<(id, vector)>
:return: RDD<(int, int, float)>
= RDD<(id, id, distance)>
"""
hyperplanes = self.__init_hyperplanes(
self.__n_dim, self.__signature_length, self.__random_seed
)
candidates = self.__compute_candidates(
data, hyperplanes,
self.__block_size, self.__n_bands, self.__n_rows, self.__n_partitions
)
similarity = self.__compute_similarity(
data, candidates
)
threshold = self.__threshold
similarity = similarity.filter(lambda u: u[2] <= threshold).cache()
similarity.count()
return similarity
@staticmethod
def __init_hyperplanes(n_dim: int, signature_length: int,
random_seed: int):
"""
Initialize random n-D Unit vectors.
Muller, <NAME>. "A note on a method for generating points uniformly on n-dimensional spheres."
Communications of the ACM 2.4 (1959): 19-20.
"""
np.random.seed(random_seed)
hyperplanes = np.random.randn(signature_length, n_dim)
hyperplanes = (hyperplanes / np.linalg.norm(hyperplanes, axis=1)
.reshape(-1, 1))
return hyperplanes
@staticmethod
def __compute_candidates(data, hyperplanes,
block_size, n_bands, n_rows, num_partitions):
"""
Compute signatures, group items according to signature and generate candidate pairs.
"""
def compute(generator_of_key_values):
for key, values in generator_of_key_values:
blocks = np.floor(
np.dot(hyperplanes, values) / block_size
)
for i in range(n_bands):
yield (
(i, tuple(blocks[i*n_rows:(i+1)*n_rows])), key
)
def generate_pairs(list_of_keys: list):
if len(list_of_keys) < 2:
return []
list_of_keys.sort()
for idxA, keyA in enumerate(list_of_keys[:-1]):
for keyB in list_of_keys[idxA+1:]:
yield ((keyA, keyB), -1)
candidates = (data
.mapPartitions(compute)
.coalesce(num_partitions)
.aggregateByKey(list(), lambda u, v: u + [v], lambda u1, u2: u1 + u2)
.map(lambda u: u[1])
.flatMap(generate_pairs)
.distinct()
.coalesce(num_partitions)
.cache()
)
return candidates
@staticmethod
def __compute_similarity(data, candidates):
def compute(key_values):
(key1, key2), (_, vector1, vector2) = key_values
return key1, key2, euclidean_distance(vector1, vector2)
similarity = (join_multiple_keys(left=candidates, right=data, n=2)
.map(compute)
)
return similarity
class Euclidean(EuclideanDistanceLSH):
def __init__(self, mode: str="lsh", **kwargs):
self.mode = mode.lower()
if mode.lower() == "lsh":
EuclideanDistanceLSH.__init__(self, **kwargs)
else:
raise NotImplementedError
def predict(self, data: RDD) -> RDD:
if self.mode == "lsh":
return self._lsh_predict(data)
else:
raise NotImplementedError
def euclidean_distance(vector1, vector2):
return np.linalg.norm(vector1 - vector2)
def test_case_with_random_data():
test_data = [
np.random.randn(5) for _ in range(1000)
]
sc = SparkContext.getOrCreate()
test_rdd = sc.parallelize(
[(i, arr) for i, arr in enumerate(test_data)]
)
_threshold = 1
lsh_result = Euclidean(
block_size=8, n_dimension=5, threshold=_threshold, n_bands=10, signature_length=50
).predict(data=test_rdd).collect()
lsh_result = set([
(i, j) for i, j, _ in lsh_result
])
print("number of LSH-selected pairs: ", len(lsh_result))
truth = set()
for i, arr1 in enumerate(test_data[:-1]):
for j, arr2 in enumerate(test_data[i + 1:]):
if euclidean_distance(arr1, arr2) <= _threshold:
truth.add((i, j + i + 1))
print("number of true pairs: ", len(truth))
print("TP rate=", len(lsh_result & truth) / len(truth))
print("FN rate=", len(truth - lsh_result) / len(truth))
if __name__ == '__main__':
test_case_with_random_data()
|
[
"numpy.random.seed",
"numpy.random.randn",
"pyspark.SparkContext.getOrCreate",
"datming.utils.join_multiple_keys",
"numpy.random.randint",
"numpy.linalg.norm",
"numpy.dot"
] |
[((4908, 4941), 'numpy.linalg.norm', 'np.linalg.norm', (['(vector1 - vector2)'], {}), '(vector1 - vector2)\n', (4922, 4941), True, 'import numpy as np\n'), ((5059, 5085), 'pyspark.SparkContext.getOrCreate', 'SparkContext.getOrCreate', ([], {}), '()\n', (5083, 5085), False, 'from pyspark import SparkContext, RDD\n'), ((2389, 2416), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2403, 2416), True, 'import numpy as np\n'), ((2439, 2479), 'numpy.random.randn', 'np.random.randn', (['signature_length', 'n_dim'], {}), '(signature_length, n_dim)\n', (2454, 2479), True, 'import numpy as np\n'), ((5004, 5022), 'numpy.random.randn', 'np.random.randn', (['(5)'], {}), '(5)\n', (5019, 5022), True, 'import numpy as np\n'), ((1181, 1214), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (1198, 1214), True, 'import numpy as np\n'), ((4277, 4329), 'datming.utils.join_multiple_keys', 'join_multiple_keys', ([], {'left': 'candidates', 'right': 'data', 'n': '(2)'}), '(left=candidates, right=data, n=2)\n', (4295, 4329), False, 'from datming.utils import join_multiple_keys\n'), ((2517, 2552), 'numpy.linalg.norm', 'np.linalg.norm', (['hyperplanes'], {'axis': '(1)'}), '(hyperplanes, axis=1)\n', (2531, 2552), True, 'import numpy as np\n'), ((3036, 3063), 'numpy.dot', 'np.dot', (['hyperplanes', 'values'], {}), '(hyperplanes, values)\n', (3042, 3063), True, 'import numpy as np\n')]
|
# =============================================================================
#
# Explicit Finite Difference Method Code
# Solves the 2D Temperature Convection-Diffusion Equation
# Assumes Tubular Plug-Flow-Reactor in Laminar Regime
# Assumes hagen poiseuille velocity profile
# Heat Source-Sink Included Uses Laminar Nusselt Correlation for "h"
# Written by: <NAME> (2020)
# Institution: Virginia Commonwealth University
#
# =============================================================================
# Required Modules
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import math
from array import array
D = 0.0015875 # tubing diameter in m
xl = 30/100 # tubing length in m & x range
yl = D # tubing diameter & y range
nx = 300 # x grid points
ny = 50 # y grid points
dx = xl/(nx-1) # x stepsize
dy = yl/(ny-1) # y stepsize
k= .12 # thermal conductvity W/(m*K)
p = 1750 # density (kg/m3)
Cp = 1172 # specifc heat (J/kg/K)
a = k/(p*Cp) # thermal diffusivity (m2/s)
sigma = .001 # time step factor
dt = sigma * dx * dy / a # time stepsize
Vr = math.pi*(D/2)**2*xl # tubing volume (m3)
Qmlm = 1 # volumetric flowrate (mL/min)
Q = (Qmlm*10**-6)/60 # volumetric flowrate (m3/s)
Ac = math.pi*(D/2)**2 # cross-sectional area (m2)
lamx = a*dt/dx**2 # lumped coefficient
lamy = a*dt/dy**2 # lumped coefficient
Nu = 3.66 # nusselt laminar flow in tube
h = Nu*k/D # convective heat transfer coefficient (W/m2/K)
T0 = 130+273.15 # stream inlet temperature (degK)
Tw = 25+273.15 # wall temperature (degK)
reltol = 1e-8 # tolerance for convergence
# grid formation
x = np.linspace(0, xl, nx)
y = np.linspace(0, yl, ny)
X, Y = np.meshgrid(x, y)
# hagen poiseuille velocity field generation
uAvg = Q/Ac # average velocity (m/s)
uMax = 2*uAvg # max velocity (m/s)
u = np.zeros(ny) # array initilization
u[:] = np.linspace(-(D/2),(D/2),ny) # array intialization
u[:] = uMax*(1-(u[:]/(D/2))**2) # hagan-poiselle profile
u[0]=u[-1]=0 # no slip BC
u = np.array([u,]*nx) # velocity field
u = u.T # transpose/align field
maxCFL = np.max(u*dt/dx) # CFL condition calc.
print('The max CFL is %s'%(maxCFL))
# main function loop
def lets_get_tubular():
# array initialization
Ttol = np.zeros((ny,nx))
T = np.ones((ny, nx))*Tw
Tn = np.ones((ny, nx))*Tw
# initialize termination condition
# compares norms of current and previous solution arrays
termcond = (np.abs((np.linalg.norm(Ttol)-np.linalg.norm(Tn))))/np.linalg.norm(Tn)
stepcount = 1 # step counter
while termcond >= reltol:
termcond = np.abs((np.linalg.norm(Ttol)-np.linalg.norm(Tn)))/np.linalg.norm(Tn)
Tn = T.copy()
# FDM vectorized solution using explicit euler and CDS
T[1:-1, 1:-1] = (Tn[1:-1,1:-1] - (u[1:-1,1:-1]*(dt/(2*dx))*(Tn[1:-1,2:] \
-Tn[1:-1,:-2])) \
+ lamx *(Tn[1:-1, 2:] - 2 * Tn[1:-1, 1:-1] + Tn[1:-1, :-2]) \
+ lamy* (Tn[2:,1:-1] - 2 * Tn[1:-1, 1:-1] + Tn[:-2, 1:-1])) \
- h*D*math.pi*(Tn[1:-1,1:-1]-Tw)*dt/p/Cp*xl/Vr
# BCs
T[0, :] = Tw # tubing wall temp dirichlet BC
T[-1, :] = Tw # tubing wall temp dirichlet BC
T[:, 0] = T0 # inlet flow temp dirichlet BC
T[:, -1] = T[:,-2] # outlet flow temp neumann BC
Ttol=T.copy() # update solution
stepcount += 1 # update counter
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# surf = ax.plot_surface(X, Y, T[:], rstride=1, cstride=1, cmap=cm.viridis,
# linewidth=0, antialiased=True)
# ax.set_xlabel('$x$')
# ax.set_ylabel('$y$');
T[:]=T[:]-273.15 # converts back to degC
# generates plots
# top plot is 2D filled contour plot
# bottom plot is centerline and near-wall line data points
fig1 = plt.subplot(211)
# ax = fig1.gca()
# plt.imshow(T[:])
cont = plt.contourf(X,Y,T[:],50)
ax = plt.gca()
ax.axis('scaled')
ax.axes.get_yaxis().set_visible(False)
plt.xlim(0,.05)
plt.xlabel('Tubing Length (m)')
cbar = plt.colorbar(cont)
cbar.ax.set_ylabel('Temperature (degC)')
centerline = ny/2
wallline = ny-5
centerline = int(centerline)
wallline = int(wallline)
centerT = T[centerline,:]
wallT = T[wallline,:]
fig2 = plt.subplot(212)
plt.plot(x, centerT,label='center')
plt.plot(x,wallT,label='wall')
plt.legend()
plt.ylabel('Temperature (degC)')
plt.xlabel('Tubing Length (m)')
plt.show()
print('Stepcount = %s' %(stepcount))
if __name__ == "__main__":
lets_get_tubular()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.linalg.norm",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((2506, 2528), 'numpy.linspace', 'np.linspace', (['(0)', 'xl', 'nx'], {}), '(0, xl, nx)\n', (2517, 2528), True, 'import numpy as np\n'), ((2535, 2557), 'numpy.linspace', 'np.linspace', (['(0)', 'yl', 'ny'], {}), '(0, yl, ny)\n', (2546, 2557), True, 'import numpy as np\n'), ((2567, 2584), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2578, 2584), True, 'import numpy as np\n'), ((2783, 2795), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (2791, 2795), True, 'import numpy as np\n'), ((2857, 2889), 'numpy.linspace', 'np.linspace', (['(-(D / 2))', '(D / 2)', 'ny'], {}), '(-(D / 2), D / 2, ny)\n', (2868, 2889), True, 'import numpy as np\n'), ((3061, 3079), 'numpy.array', 'np.array', (['([u] * nx)'], {}), '([u] * nx)\n', (3069, 3079), True, 'import numpy as np\n'), ((3205, 3224), 'numpy.max', 'np.max', (['(u * dt / dx)'], {}), '(u * dt / dx)\n', (3211, 3224), True, 'import numpy as np\n'), ((3393, 3411), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (3401, 3411), True, 'import numpy as np\n'), ((5093, 5109), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (5104, 5109), True, 'from matplotlib import pyplot as plt\n'), ((5167, 5195), 'matplotlib.pyplot.contourf', 'plt.contourf', (['X', 'Y', 'T[:]', '(50)'], {}), '(X, Y, T[:], 50)\n', (5179, 5195), True, 'from matplotlib import pyplot as plt\n'), ((5203, 5212), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5210, 5212), True, 'from matplotlib import pyplot as plt\n'), ((5285, 5302), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (5293, 5302), True, 'from matplotlib import pyplot as plt\n'), ((5306, 5337), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tubing Length (m)"""'], {}), "('Tubing Length (m)')\n", (5316, 5337), True, 'from matplotlib import pyplot as plt\n'), ((5350, 5368), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cont'], {}), '(cont)\n', (5362, 5368), True, 'from matplotlib import pyplot as plt\n'), ((5599, 5615), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (5610, 5615), True, 'from matplotlib import pyplot as plt\n'), ((5621, 5657), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'centerT'], {'label': '"""center"""'}), "(x, centerT, label='center')\n", (5629, 5657), True, 'from matplotlib import pyplot as plt\n'), ((5662, 5694), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'wallT'], {'label': '"""wall"""'}), "(x, wallT, label='wall')\n", (5670, 5694), True, 'from matplotlib import pyplot as plt\n'), ((5698, 5710), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5708, 5710), True, 'from matplotlib import pyplot as plt\n'), ((5716, 5748), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature (degC)"""'], {}), "('Temperature (degC)')\n", (5726, 5748), True, 'from matplotlib import pyplot as plt\n'), ((5754, 5785), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tubing Length (m)"""'], {}), "('Tubing Length (m)')\n", (5764, 5785), True, 'from matplotlib import pyplot as plt\n'), ((5797, 5807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5805, 5807), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3438), 'numpy.ones', 'np.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (3428, 3438), True, 'import numpy as np\n'), ((3454, 3471), 'numpy.ones', 'np.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (3461, 3471), True, 'import numpy as np\n'), ((3646, 3664), 'numpy.linalg.norm', 'np.linalg.norm', (['Tn'], {}), '(Tn)\n', (3660, 3664), True, 'import numpy as np\n'), ((3801, 3819), 'numpy.linalg.norm', 'np.linalg.norm', (['Tn'], {}), '(Tn)\n', (3815, 3819), True, 'import numpy as np\n'), ((3603, 3623), 'numpy.linalg.norm', 'np.linalg.norm', (['Ttol'], {}), '(Ttol)\n', (3617, 3623), True, 'import numpy as np\n'), ((3624, 3642), 'numpy.linalg.norm', 'np.linalg.norm', (['Tn'], {}), '(Tn)\n', (3638, 3642), True, 'import numpy as np\n'), ((3759, 3779), 'numpy.linalg.norm', 'np.linalg.norm', (['Ttol'], {}), '(Ttol)\n', (3773, 3779), True, 'import numpy as np\n'), ((3780, 3798), 'numpy.linalg.norm', 'np.linalg.norm', (['Tn'], {}), '(Tn)\n', (3794, 3798), True, 'import numpy as np\n')]
|
import os
from stray.scene import Scene
from stray.renderer import Renderer
import numpy as np
import pycocotools.mask as mask_util
import pickle
def write_segmentation_masks(scene_path):
scene = Scene(scene_path)
renderer = Renderer(scene)
segmentation_parent_path = os.path.join(scene_path, "segmentation")
os.makedirs(segmentation_parent_path, exist_ok=True)
for bbox_id, bbox in enumerate(scene.bounding_boxes):
segmentation_path = os.path.join(segmentation_parent_path, f"instance_{bbox_id}")
os.makedirs(segmentation_path, exist_ok=True)
renderer.add_scene_instance(bbox)
for i in range(0, len(scene), 1):
print(f"Processing frame {i:06}", end='\r')
mask = renderer.render_segmentation(i)
segmentation = mask_util.encode(np.asarray(mask, order="F"))
with open(os.path.join(segmentation_path, f"{i:06}.pickle"), 'wb') as handle:
pickle.dump(segmentation, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f"Saved segmetations to {segmentation_path} for instance {bbox_id}")
renderer.clear_scene_instances()
|
[
"pickle.dump",
"os.makedirs",
"numpy.asarray",
"stray.renderer.Renderer",
"stray.scene.Scene",
"os.path.join"
] |
[((202, 219), 'stray.scene.Scene', 'Scene', (['scene_path'], {}), '(scene_path)\n', (207, 219), False, 'from stray.scene import Scene\n'), ((235, 250), 'stray.renderer.Renderer', 'Renderer', (['scene'], {}), '(scene)\n', (243, 250), False, 'from stray.renderer import Renderer\n'), ((282, 322), 'os.path.join', 'os.path.join', (['scene_path', '"""segmentation"""'], {}), "(scene_path, 'segmentation')\n", (294, 322), False, 'import os\n'), ((327, 379), 'os.makedirs', 'os.makedirs', (['segmentation_parent_path'], {'exist_ok': '(True)'}), '(segmentation_parent_path, exist_ok=True)\n', (338, 379), False, 'import os\n'), ((466, 527), 'os.path.join', 'os.path.join', (['segmentation_parent_path', 'f"""instance_{bbox_id}"""'], {}), "(segmentation_parent_path, f'instance_{bbox_id}')\n", (478, 527), False, 'import os\n'), ((536, 581), 'os.makedirs', 'os.makedirs', (['segmentation_path'], {'exist_ok': '(True)'}), '(segmentation_path, exist_ok=True)\n', (547, 581), False, 'import os\n'), ((817, 844), 'numpy.asarray', 'np.asarray', (['mask'], {'order': '"""F"""'}), "(mask, order='F')\n", (827, 844), True, 'import numpy as np\n'), ((952, 1019), 'pickle.dump', 'pickle.dump', (['segmentation', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(segmentation, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (963, 1019), False, 'import pickle\n'), ((868, 917), 'os.path.join', 'os.path.join', (['segmentation_path', 'f"""{i:06}.pickle"""'], {}), "(segmentation_path, f'{i:06}.pickle')\n", (880, 917), False, 'import os\n')]
|
from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
import numpy
import mpi4py
import os
class build_ext_subclass(build_ext):
user_options = build_ext.user_options + \
[
('mpicc', None, 'MPICC')
]
def initialize_options(self):
try:
compiler = str(mpi4py.get_config()['mpicc'])
except:
compiler = "mpicc"
self.mpicc = os.environ.get('MPICC', compiler)
build_ext.initialize_options(self)
def finalize_options(self):
build_ext.finalize_options(self)
def build_extensions(self):
# turns out set_executables only works for linker_so, but for compiler_so
self.compiler.compiler_so[0] = self.mpicc
self.compiler.linker_so[0] = self.mpicc
build_ext.build_extensions(self)
extensions = [
Extension("mpsort.binding", [
"mpsort/binding.pyx",
"radixsort.c",
"mp-mpiu.c",
"mpsort-mpi.c"],
include_dirs = ["./", numpy.get_include()],
depends=[
"mpsort.h",
"mpsort-mpi.h",
"mp-mpiu.h",
]
)
]
def find_version(path):
import re
# path shall be a plain ascii text file.
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Version not found")
setup(
name="mpsort",
version=find_version("mpsort/version.py"),
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/rainwoodman/mpsort",
description="python binding of MP-sort, a peta scale sorting routine",
zip_safe = False,
package_dir = {'mpsort': 'mpsort'},
install_requires=['cython', 'numpy', 'mpi4py'],
packages= ['mpsort', 'mpsort.tests'],
license='BSD-2-Clause',
cmdclass = {
"build_ext": build_ext_subclass
},
ext_modules = cythonize(extensions)
)
|
[
"Cython.Build.cythonize",
"distutils.command.build_ext.build_ext.finalize_options",
"mpi4py.get_config",
"os.environ.get",
"distutils.command.build_ext.build_ext.initialize_options",
"distutils.command.build_ext.build_ext.build_extensions",
"numpy.get_include",
"re.search"
] |
[((1444, 1506), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 's', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', s, re.M)\n', (1453, 1506), False, 'import re\n'), ((516, 549), 'os.environ.get', 'os.environ.get', (['"""MPICC"""', 'compiler'], {}), "('MPICC', compiler)\n", (530, 549), False, 'import os\n'), ((559, 593), 'distutils.command.build_ext.build_ext.initialize_options', 'build_ext.initialize_options', (['self'], {}), '(self)\n', (587, 593), False, 'from distutils.command.build_ext import build_ext\n'), ((635, 667), 'distutils.command.build_ext.build_ext.finalize_options', 'build_ext.finalize_options', (['self'], {}), '(self)\n', (661, 667), False, 'from distutils.command.build_ext import build_ext\n'), ((889, 921), 'distutils.command.build_ext.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (915, 921), False, 'from distutils.command.build_ext import build_ext\n'), ((2148, 2169), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (2157, 2169), False, 'from Cython.Build import cythonize\n'), ((1141, 1160), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1158, 1160), False, 'import numpy\n'), ((417, 436), 'mpi4py.get_config', 'mpi4py.get_config', ([], {}), '()\n', (434, 436), False, 'import mpi4py\n')]
|
"""Tests for graphmode_tensornetwork."""
import numpy as np
import tensorflow as tf
from tensornetwork import (contract, connect, flatten_edges_between,
contract_between, Node)
import pytest
class GraphmodeTensorNetworkTest(tf.test.TestCase):
def test_basic_graphmode(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.ones(10), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
sess = tf.compat.v1.Session()
final_val = sess.run(final_tensor)
self.assertAllClose(final_val, 10.0)
def test_gradient_decent(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.Variable(tf.ones(10)), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
opt = tf.compat.v1.train.GradientDescentOptimizer(0.001)
train_op = opt.minimize(final_tensor)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sess.run(final_tensor), 10.0)
sess.run(train_op)
self.assertLess(sess.run(final_tensor), 10.0)
def test_dynamic_network_sizes(self):
@tf.function
def f(x, n):
x_slice = x[:n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
e = connect(n1[0], n2[0])
return contract(e).get_tensor()
x = np.ones(10)
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)
@pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")
def test_dynamic_network_sizes_contract_between(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract_between(n1, n2).get_tensor()
x = tf.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_standard(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract(flatten_edges_between(n1, n2)).get_tensor()
x = np.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_trace(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
connect(n1[0], n1[2])
connect(n1[1], n1[3])
return contract(flatten_edges_between(n1, n1)).get_tensor()
x = np.ones((3, 4, 3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), np.ones((2,)) * 12)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), np.ones((3,)) * 12)
def test_batch_usage(self,):
def build_tensornetwork(tensors):
a = Node(tensors[0], backend="tensorflow")
b = Node(tensors[1], backend="tensorflow")
e = connect(a[0], b[0])
return contract(e).get_tensor()
tensors = [np.ones((5, 10)), np.ones((5, 10))]
result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float64)
np.testing.assert_allclose(result, np.ones(5) * 10)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.ones",
"tensornetwork.Node",
"tensorflow.convert_to_tensor",
"tensornetwork.contract",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensornetwork.contract_between",
"numpy.ones",
"tensorflow.compat.v1.Session",
"tensornetwork.connect",
"tensorflow.map_fn",
"tensornetwork.flatten_edges_between",
"pytest.mark.skip",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.global_variables_initializer"
] |
[((1743, 1820), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Test fails due to probable bug in tensorflow 2.0.0"""'}), "(reason='Test fails due to probable bug in tensorflow 2.0.0')\n", (1759, 1820), False, 'import pytest\n'), ((3750, 3764), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3762, 3764), True, 'import tensorflow as tf\n'), ((1607, 1618), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1614, 1618), True, 'import numpy as np\n'), ((2178, 2196), 'tensorflow.ones', 'tf.ones', (['(3, 4, 5)'], {}), '((3, 4, 5))\n', (2185, 2196), True, 'import tensorflow as tf\n'), ((2692, 2710), 'numpy.ones', 'np.ones', (['(3, 4, 5)'], {}), '((3, 4, 5))\n', (2699, 2710), True, 'import numpy as np\n'), ((3128, 3152), 'numpy.ones', 'np.ones', (['(3, 4, 3, 4, 5)'], {}), '((3, 4, 3, 4, 5))\n', (3135, 3152), True, 'import numpy as np\n'), ((3605, 3662), 'tensorflow.map_fn', 'tf.map_fn', (['build_tensornetwork', 'tensors'], {'dtype': 'tf.float64'}), '(build_tensornetwork, tensors, dtype=tf.float64)\n', (3614, 3662), True, 'import tensorflow as tf\n'), ((503, 522), 'tensornetwork.connect', 'connect', (['a[0]', 'b[0]'], {}), '(a[0], b[0])\n', (510, 522), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((583, 605), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (603, 605), True, 'import tensorflow as tf\n'), ((934, 953), 'tensornetwork.connect', 'connect', (['a[0]', 'b[0]'], {}), '(a[0], b[0])\n', (941, 953), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1012, 1062), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(0.001)'], {}), '(0.001)\n', (1055, 1062), True, 'import tensorflow as tf\n'), ((1120, 1142), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1140, 1142), True, 'import tensorflow as tf\n'), ((1445, 1480), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (1449, 1480), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1492, 1527), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (1496, 1527), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1538, 1559), 'tensornetwork.connect', 'connect', (['n1[0]', 'n2[0]'], {}), '(n1[0], n2[0])\n', (1545, 1559), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1951, 1986), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (1955, 1986), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1998, 2033), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (2002, 2033), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2040, 2061), 'tensornetwork.connect', 'connect', (['n1[0]', 'n2[0]'], {}), '(n1[0], n2[0])\n', (2047, 2061), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2068, 2089), 'tensornetwork.connect', 'connect', (['n1[1]', 'n2[1]'], {}), '(n1[1], n2[1])\n', (2075, 2089), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2096, 2117), 'tensornetwork.connect', 'connect', (['n1[2]', 'n2[2]'], {}), '(n1[2], n2[2])\n', (2103, 2117), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2450, 2485), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (2454, 2485), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2497, 2532), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (2501, 2532), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2539, 2560), 'tensornetwork.connect', 'connect', (['n1[0]', 'n2[0]'], {}), '(n1[0], n2[0])\n', (2546, 2560), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2567, 2588), 'tensornetwork.connect', 'connect', (['n1[1]', 'n2[1]'], {}), '(n1[1], n2[1])\n', (2574, 2588), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2595, 2616), 'tensornetwork.connect', 'connect', (['n1[2]', 'n2[2]'], {}), '(n1[2], n2[2])\n', (2602, 2616), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2961, 2996), 'tensornetwork.Node', 'Node', (['x_slice'], {'backend': '"""tensorflow"""'}), "(x_slice, backend='tensorflow')\n", (2965, 2996), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3003, 3024), 'tensornetwork.connect', 'connect', (['n1[0]', 'n1[2]'], {}), '(n1[0], n1[2])\n', (3010, 3024), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3031, 3052), 'tensornetwork.connect', 'connect', (['n1[1]', 'n1[3]'], {}), '(n1[1], n1[3])\n', (3038, 3052), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3384, 3422), 'tensornetwork.Node', 'Node', (['tensors[0]'], {'backend': '"""tensorflow"""'}), "(tensors[0], backend='tensorflow')\n", (3388, 3422), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3433, 3471), 'tensornetwork.Node', 'Node', (['tensors[1]'], {'backend': '"""tensorflow"""'}), "(tensors[1], backend='tensorflow')\n", (3437, 3471), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3482, 3501), 'tensornetwork.connect', 'connect', (['a[0]', 'b[0]'], {}), '(a[0], b[0])\n', (3489, 3501), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3556, 3572), 'numpy.ones', 'np.ones', (['(5, 10)'], {}), '((5, 10))\n', (3563, 3572), True, 'import numpy as np\n'), ((3574, 3590), 'numpy.ones', 'np.ones', (['(5, 10)'], {}), '((5, 10))\n', (3581, 3590), True, 'import numpy as np\n'), ((408, 419), 'tensorflow.ones', 'tf.ones', (['(10)'], {}), '(10)\n', (415, 419), True, 'import tensorflow as tf\n'), ((458, 469), 'tensorflow.ones', 'tf.ones', (['(10)'], {}), '(10)\n', (465, 469), True, 'import tensorflow as tf\n'), ((889, 900), 'tensorflow.ones', 'tf.ones', (['(10)'], {}), '(10)\n', (896, 900), True, 'import tensorflow as tf\n'), ((1158, 1201), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (1199, 1201), True, 'import tensorflow as tf\n'), ((1648, 1671), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {}), '(2)\n', (1668, 1671), True, 'import tensorflow as tf\n'), ((1708, 1731), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(3)'], {}), '(3)\n', (1728, 1731), True, 'import tensorflow as tf\n'), ((2226, 2249), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {}), '(2)\n', (2246, 2249), True, 'import tensorflow as tf\n'), ((2287, 2310), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(3)'], {}), '(3)\n', (2307, 2310), True, 'import tensorflow as tf\n'), ((2740, 2763), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {}), '(2)\n', (2760, 2763), True, 'import tensorflow as tf\n'), ((2801, 2824), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(3)'], {}), '(3)\n', (2821, 2824), True, 'import tensorflow as tf\n'), ((3182, 3205), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {}), '(2)\n', (3202, 3205), True, 'import tensorflow as tf\n'), ((3208, 3221), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (3215, 3221), True, 'import numpy as np\n'), ((3257, 3280), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(3)'], {}), '(3)\n', (3277, 3280), True, 'import tensorflow as tf\n'), ((3283, 3296), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (3290, 3296), True, 'import numpy as np\n'), ((3702, 3712), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3709, 3712), True, 'import numpy as np\n'), ((358, 378), 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (376, 378), True, 'import tensorflow as tf\n'), ((544, 555), 'tensornetwork.contract', 'contract', (['e'], {}), '(e)\n', (552, 555), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((776, 796), 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (794, 796), True, 'import tensorflow as tf\n'), ((838, 849), 'tensorflow.ones', 'tf.ones', (['(10)'], {}), '(10)\n', (845, 849), True, 'import tensorflow as tf\n'), ((975, 986), 'tensornetwork.contract', 'contract', (['e'], {}), '(e)\n', (983, 986), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((1573, 1584), 'tensornetwork.contract', 'contract', (['e'], {}), '(e)\n', (1581, 1584), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2131, 2155), 'tensornetwork.contract_between', 'contract_between', (['n1', 'n2'], {}), '(n1, n2)\n', (2147, 2155), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3515, 3526), 'tensornetwork.contract', 'contract', (['e'], {}), '(e)\n', (3523, 3526), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((2639, 2668), 'tensornetwork.flatten_edges_between', 'flatten_edges_between', (['n1', 'n2'], {}), '(n1, n2)\n', (2660, 2668), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n'), ((3075, 3104), 'tensornetwork.flatten_edges_between', 'flatten_edges_between', (['n1', 'n1'], {}), '(n1, n1)\n', (3096, 3104), False, 'from tensornetwork import contract, connect, flatten_edges_between, contract_between, Node\n')]
|
import os
import sys
import glob
import numpy as np
import tensorflow as tf
import scipy
import scipy.io
import keras
from keras.models import Model, Sequential
from keras.layers import *
from keras.optimizers import Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from Benchmark import Benchmark
from Config import MODEL_PARAMS_DIR
class ResNet50(Benchmark):
def buildModel(self):
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
bn_axis = 1
x = Conv2D(filters1, (1, 1))(input_tensor)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same')(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
filters1, filters2, filters3 = filters
bn_axis = 1
x = Conv2D(filters1, (1, 1), strides=strides)(input_tensor)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same')(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1))(x)
x = BatchNormalization(axis=bn_axis)(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides)(input_tensor)
shortcut = BatchNormalization(
axis=bn_axis)(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
img_input = Input(shape=(3, 224, 224))
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2))(x)
# x = BatchNormalization(axis=bn_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7))(x)
x = Flatten()(x)
x = Dense(1000)(x)
x = Activation('softmax')(x)
model = Model(img_input, x)
return model
def data_preprocess(self):
X_train, y_train = None, None
X_test = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_input.bin', dtype=np.float32)
X_test = X_test.reshape((-1, 3, 224, 224))
y_test = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_labels.bin', dtype=np.uint32)
X_tuner = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_input.bin', dtype=np.float32)
X_tuner = X_tuner.reshape((-1, 3, 224, 224))
y_tuner = np.fromfile(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_labels.bin', dtype=np.uint32)
return X_train, y_train, X_test, y_test, X_tuner, y_tuner
def trainModel(self, model):
assert False, "ImageNet training not supported - use Pretrained weights"
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# Changing to NCHW format
K.set_image_data_format('channels_first')
### Parameters specific to each benchmark
reload_dir = MODEL_PARAMS_DIR + '/resnet50_imagenet/'
keras_model_file = MODEL_PARAMS_DIR + '/keras/resnet50_imagenet.h5'
data_dir = 'data/resnet50_imagenet/'
src_dir = 'src/resnet50_imagenet_src/'
num_classes = 1000
batch_size = 50
ResNet50 = ResNet50('ResNet50_imagenet', reload_dir, keras_model_file, data_dir, src_dir, num_classes, batch_size)
ResNet50.exportToHPVM(sys.argv)
|
[
"numpy.fromfile",
"keras.models.Model",
"keras.backend.set_image_data_format"
] |
[((4799, 4840), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_first"""'], {}), "('channels_first')\n", (4822, 4840), True, 'from keras import backend as K\n'), ((3827, 3846), 'keras.models.Model', 'Model', (['img_input', 'x'], {}), '(img_input, x)\n', (3832, 3846), False, 'from keras.models import Model, Sequential\n'), ((3978, 4068), 'numpy.fromfile', 'np.fromfile', (["(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_input.bin')"], {'dtype': 'np.float32'}), "(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_input.bin', dtype=\n np.float32)\n", (3989, 4068), True, 'import numpy as np\n'), ((4133, 4223), 'numpy.fromfile', 'np.fromfile', (["(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_labels.bin')"], {'dtype': 'np.uint32'}), "(MODEL_PARAMS_DIR + '/resnet50_imagenet/test_labels.bin', dtype=\n np.uint32)\n", (4144, 4223), True, 'import numpy as np\n'), ((4246, 4336), 'numpy.fromfile', 'np.fromfile', (["(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_input.bin')"], {'dtype': 'np.float32'}), "(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_input.bin', dtype=\n np.float32)\n", (4257, 4336), True, 'import numpy as np\n'), ((4404, 4494), 'numpy.fromfile', 'np.fromfile', (["(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_labels.bin')"], {'dtype': 'np.uint32'}), "(MODEL_PARAMS_DIR + '/resnet50_imagenet/tune_labels.bin', dtype=\n np.uint32)\n", (4415, 4494), True, 'import numpy as np\n')]
|
# encoding utf-8
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tools import eval_measures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
class Preprocessing:
def __init__(self, data_raw):
self.data_clean = data_raw
def run(self):
# EDAで安価な物件に外れ値が見受けられた
# 下位1%をとりあえず除外とする(適当ではないが、正確でもない)
THRESHOLD = 0.01
self.exclude_outlier(THRESHOLD)
# 上記以外の明らかな外れ値
self.exclude_idx([524, 1299])
# 正規分布に近づけ、線形回帰の精度を高める
self.convert_log(["SalePrice"])
# 多重共線性をなくす
self.create_adding_column("AllSF", ["GrLivArea", "TotalBsmtSF"])
self.create_adding_column("AllFlrsSF", ["1stFlrSF", "2ndFlrSF"])
def exclude_outlier(self, THRESHOLD):
low_row = round(self.data_clean.shape[0] * THRESHOLD)
low_ids = self.data_clean.iloc[:low_row]
low_ids = list(low_ids['Id'].unique())
self.data_clean = self.data_clean.query("Id not in @low_ids")
def exclude_idx(self, ids):
self.data_clean = self.data_clean.query("Id not in @ids")
def convert_log(self, columns):
for c in columns:
self.data_clean[c] = self.data_clean[c].apply(lambda x: np.log(x))
def create_adding_column(self, create, adding):
c1, c2 = adding
self.data_clean[create] = self.data_clean[c1] + self.data_clean[c2]
class Glm:
def __init__(self, preprocessing, X_columns, y_column):
self.X = preprocessing.data_clean[X_columns]
self.y = preprocessing.data_clean[y_column]
def fit(self):
TRAIN_SIZE = 0.8 # >=0.7 なら自由
RANDOM_STATE = 0 # チューニングはしていない
x_train, x_test, y_train, y_test = \
self.train_test_split(TRAIN_SIZE, RANDOM_STATE)
x_train, x_test = self.normalization(x_train, x_test)
self.model = sm.OLS(y_train, sm.add_constant(x_train))
self.model = self.model.fit()
def train_test_split(self, TRAIN_SIZE, RANDOM_STATE):
x_train, x_test, y_train, y_test = train_test_split(self.X, self.y,
train_size=TRAIN_SIZE,
random_state=RANDOM_STATE)
return x_train, x_test, y_train, y_test
def normalization(self, x_train, x_test):
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
return x_train, x_test
def write_summary(self, write_path):
with open(write_path, "w") as f:
f.write(str(self.model.summary()))
def main():
data_raw = pd.read_csv("./../../data/house_prices/train.csv")
preprocessing = Preprocessing(data_raw)
preprocessing.run()
X_columns = ["OverallQual", "GarageArea", "YearBuilt", "AllSF",
"AllFlrsSF", "YearRemodAdd", "OverallCond"]
y_column = ["SalePrice"]
model = Glm(preprocessing, X_columns, y_column)
model.fit()
model.write_summary("./GLM_summary.txt")
if __name__ == "__main__":
main()
|
[
"sklearn.preprocessing.StandardScaler",
"numpy.log",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"statsmodels.api.add_constant"
] |
[((2743, 2793), 'pandas.read_csv', 'pd.read_csv', (['"""./../../data/house_prices/train.csv"""'], {}), "('./../../data/house_prices/train.csv')\n", (2754, 2793), True, 'import pandas as pd\n'), ((2112, 2199), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'train_size': 'TRAIN_SIZE', 'random_state': 'RANDOM_STATE'}), '(self.X, self.y, train_size=TRAIN_SIZE, random_state=\n RANDOM_STATE)\n', (2128, 2199), False, 'from sklearn.model_selection import train_test_split\n'), ((2413, 2429), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2427, 2429), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1946, 1970), 'statsmodels.api.add_constant', 'sm.add_constant', (['x_train'], {}), '(x_train)\n', (1961, 1970), True, 'import statsmodels.api as sm\n'), ((1284, 1293), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1290, 1293), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# stdlib imports
import urllib.request as request
import tempfile
import os.path
import sys
from datetime import datetime
# third party imports
import numpy as np
# local imports
from losspager.utils.expocat import ExpoCat
def commify(value):
if np.isnan(value):
return 'NaN'
return format(int(value), ",d")
def get_max_mmi(tdict, minimum=1000):
indices = ['MMI1', 'MMI2', 'MMI3', 'MMI4',
'MMI5', 'MMI6', 'MMI7', 'MMI8', 'MMI9+']
exparray = np.array([tdict[idx] for idx in indices])
imax = (exparray > 1000).nonzero()[0].max()
return (imax + 1, exparray[imax])
def test():
homedir = os.path.dirname(os.path.abspath(
__file__)) # where is this script?
expocat = ExpoCat.fromDefault()
clat = 0.37
clon = -79.94
radius = 400
ndeaths = 9
minicat = expocat.selectByRadius(clat, clon, radius)
print('Testing that historical events returned are correct...')
maxmmi = 8
nmaxmmi = 103000
events = minicat.getHistoricalEvents(maxmmi, nmaxmmi, ndeaths, clat, clon)
assert events[0]['EventID'] == '199603282303'
assert events[1]['EventID'] == '197912120759'
assert events[2]['EventID'] == '198703060410'
print('Passed.')
print('Testing that events selected by hazard are correct...')
fire = expocat.selectByHazard('fire')
tsunami = expocat.selectByHazard('tsunami')
liquefaction = expocat.selectByHazard('liquefaction')
landslide = expocat.selectByHazard('landslide')
assert fire._dataframe['Fire'].sum() == len(fire)
assert tsunami._dataframe['Tsunami'].sum() == len(tsunami)
assert liquefaction._dataframe['Liquefaction'].sum() == len(liquefaction)
assert landslide._dataframe['Landslide'].sum() == len(landslide)
# test exclusion method
test_time = datetime(1994, 1, 1)
expocat.excludeFutureEvents(test_time)
assert expocat._dataframe['Time'].max() < test_time
print('Passed.')
if __name__ == '__main__':
test()
|
[
"losspager.utils.expocat.ExpoCat.fromDefault",
"numpy.array",
"numpy.isnan",
"datetime.datetime"
] |
[((277, 292), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (285, 292), True, 'import numpy as np\n'), ((509, 550), 'numpy.array', 'np.array', (['[tdict[idx] for idx in indices]'], {}), '([tdict[idx] for idx in indices])\n', (517, 550), True, 'import numpy as np\n'), ((756, 777), 'losspager.utils.expocat.ExpoCat.fromDefault', 'ExpoCat.fromDefault', ([], {}), '()\n', (775, 777), False, 'from losspager.utils.expocat import ExpoCat\n'), ((1836, 1856), 'datetime.datetime', 'datetime', (['(1994)', '(1)', '(1)'], {}), '(1994, 1, 1)\n', (1844, 1856), False, 'from datetime import datetime\n')]
|
import numpy as np
import pandas as pd
import fasttext
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.model_selection import IterativeStratification, \
iterative_train_test_split
from functools import reduce
CIP_TAGS = list(map(lambda x: x.strip(),
"gratis, mat, musik, kurs, casino, dans, musuem, inlines, "
"båt, barn, film, språk, hockey, bowling, fika, sport, "
"biljard, bingo, bio, opera, kultur, grilla, kubb, "
"festival, cykel, brännboll, picknick, konsert, pub, "
"frisbeegolf, mc, gokart, svamp, bangolf, teater, "
"afterwork, promenad, humor, utmaning, fest, shopping, "
"resa, sällskapsspel, träna, pubquiz, poker, bok, foto, "
"hund, skridskor, karaoke, dart, bada, diskussion, "
"badminton, pyssel, golf, klättring, loppis, boule, mässa, "
"flytthjälp, yoga, innebandy, pingis, handboll, jogga, "
"tennis, högtid, astronomi, fiske, beachvolleyboll, "
"friluftsliv, volleyboll, geocaching, vindsurfing, "
"shuffleboard, SUP, standup, paddel".split(',')))
def load_raw_normalized_dataset(path, drop_missing):
"""Load raw CiP dataset.
Args:
path: Path to raw CSV file
drop_missing: If true, drop events with missing titles or descriptions
Returns:
events_df, tags_df: Event and tag dataframes as tuple
"""
# FIXME: Import 'id' as integer
cip_df = pd.read_csv(path,
header=None,
names=['id', 'weekday', 'time', 'title', 'description',
'tag_status', 'tag'],
na_values=['-01:00:00'])
# Drop any events with missing titles or descriptions
cip_df.dropna(subset=['title', 'description'], inplace=True)
# Convert time strings to actual times
cip_df['time'] = pd.to_datetime(cip_df['time']).dt.time
events_df = cip_df.groupby('id').first().drop(
columns=['tag_status', 'tag']).reset_index()
tags_df = pd.DataFrame({
'id': cip_df['id'],
'tag': cip_df['tag'],
'verified': cip_df['tag_status'] == 1,
'removed': cip_df['tag_status'] == 2
})
# Ignore verified and remove 'removed' tags
tags_df = tags_df[~tags_df['removed']]
tags_df.drop(columns=['verified', 'removed'], inplace=True)
return events_df, tags_df
def calculate_top_tags(tags_df, n_tags, use_cip_tags=True):
"""Calculate top tags from tags dataset
Args:
tags_df: Dataset to extract top tags from
n_tags: Number of topmost tags to get if generating
use_cip_tags: Use pre-defined tags from CiP (ignores `n_tags`)
Returns:
List of topmost tags
"""
tag_counts = tags_df['tag'].value_counts()
if use_cip_tags:
# Not all CiP tags are necessarily present in the dataset
# and not necessarily in sufficient amounts
present_tags = set(tag_counts[tag_counts > 5].index)
return list(filter(lambda t: t in present_tags, CIP_TAGS))
else:
return tag_counts.index[:n_tags]
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag'])
def matrix_to_tags(tags, top_tags):
top_array = np.array(top_tags)
joined_tags = []
for row in tags:
joined_tags.append(reduce(lambda a, b: a + "," + b, top_array[row > 0]))
return np.array(joined_tags)
def load_datasets(path, drop_missing=True, n_tags=72,
test_size=0.2, random_state=42):
"""Load and split dataset from raw CiP data.
Args:
path: Path to raw CiP dataset
drop_missing: Drop events with no description or title
n_tags: Number of top tags to use (ignored)
test_size: Fraction of events to include in test set
random_state: Random state for the split
Returns:
(events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
"""
events_df, tags_df = load_raw_normalized_dataset(path,
drop_missing=drop_missing)
top_tags = calculate_top_tags(tags_df, n_tags=n_tags)
# Only keep top tags
tags_df = tags_df[tags_df['tag'].isin(top_tags)]
tag_matrix = tags_to_matrix(events_df, tags_df, top_tags)
# Split data into public training set and private test set
stratifier = IterativeStratification(
n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size],
random_state=random_state)
train_indices, test_indices = next(stratifier.split(events_df, tag_matrix))
events_train, tags_train = events_df.iloc[train_indices], \
tag_matrix[train_indices, :]
events_test, tags_test = events_df.iloc[test_indices], \
tag_matrix[test_indices, :]
tags_train_stats = pd.DataFrame({
'tag': top_tags,
'count': tags_train.sum(axis=0)
}).sort_values('count', ascending=False)
return (events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
def extract_corpus(events_df):
"""Extract text corpus from event descriptions.
Args:
events_df: Event dataset
Returns:
List of event descriptions as raw text
"""
from tagger._preprocessing.html import HTMLToText
from tagger._preprocessing.characterset import CharacterSet
from tagger._preprocessing.lowercase import Lowercase
from sklearn.pipeline import Pipeline
cleaning_pipeline = Pipeline([
('html', HTMLToText()),
('cset', CharacterSet(punctuation=False)),
('lcase', Lowercase())
])
return list(cleaning_pipeline.fit_transform(events_df['description']))
def fasttext_wordvectors(corpus_path, model_path):
"""Compute word vectors using FastText.
Args:
corpus_path: Path to corpus
model_path: Path for storing FastText model
Returns:
FastText model
"""
model = fasttext.train_unsupervised(corpus_path)
model.save_model(model_path)
return model
def save_corpus(events_df, path):
"""Extract and store corpus for events.
Args:
events_df: Events dataset
path: Path for storing corpus
"""
corpus = extract_corpus(events_df)
with open(path, 'w') as f:
for doc in corpus:
f.write(doc + '\n')
if __name__ == '__main__':
# Generate static datasets and wordvectors for local dev
import os
print("Current working directory:", os.getcwd())
# Compute word vectors
events_df, tags_df = load_raw_normalized_dataset(
"../../../data/raw/citypolarna_public_events_out.csv",
drop_missing=True)
CORPUS_PATH = "../../../data/corpus.txt"
MODEL_PATH = "../../../data/wordvectors.bin"
save_corpus(events_df, CORPUS_PATH)
model = fasttext_wordvectors(CORPUS_PATH, MODEL_PATH)
# Split datasets
events_train, tags_train, events_test, tags_test, top_tags, tags_train_stats = load_datasets(
"../../../data/raw/citypolarna_public_events_out.csv"
)
print(f"Number of train events: {len(events_train)}")
print(f"Number of test events: {len(events_test)}")
# TODO: Proper path handling
DATA_PATH = "../../../data/"
events_train.to_csv(DATA_PATH + "events_train.csv", index=False)
events_test.to_csv(DATA_PATH + "events_test.csv", index=False)
# A kludge, but convenient — pandas can load from URL:s
pd.DataFrame(tags_train).to_csv(DATA_PATH + "tags_train.csv", index=False)
pd.DataFrame(tags_test).to_csv(DATA_PATH + "tags_test.csv", index=False)
pd.DataFrame({'tag': top_tags}).to_csv(DATA_PATH + "top_tags.csv",
index=False)
tags_train_stats.to_csv(DATA_PATH + "tags_train_stats.csv", index=False)
|
[
"pandas.DataFrame",
"pandas.read_csv",
"fasttext.train_unsupervised",
"os.getcwd",
"sklearn.preprocessing.MultiLabelBinarizer",
"tagger._preprocessing.characterset.CharacterSet",
"tagger._preprocessing.html.HTMLToText",
"skmultilearn.model_selection.IterativeStratification",
"numpy.array",
"pandas.to_datetime",
"functools.reduce",
"tagger._preprocessing.lowercase.Lowercase",
"pandas.concat"
] |
[((1601, 1738), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'names': "['id', 'weekday', 'time', 'title', 'description', 'tag_status', 'tag']", 'na_values': "['-01:00:00']"}), "(path, header=None, names=['id', 'weekday', 'time', 'title',\n 'description', 'tag_status', 'tag'], na_values=['-01:00:00'])\n", (1612, 1738), True, 'import pandas as pd\n'), ((2189, 2327), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': cip_df['id'], 'tag': cip_df['tag'], 'verified': cip_df['tag_status'] ==\n 1, 'removed': cip_df['tag_status'] == 2}"], {}), "({'id': cip_df['id'], 'tag': cip_df['tag'], 'verified': cip_df[\n 'tag_status'] == 1, 'removed': cip_df['tag_status'] == 2})\n", (2201, 2327), True, 'import pandas as pd\n'), ((3884, 3915), 'pandas.concat', 'pd.concat', (['[tags, missing_tags]'], {}), '([tags, missing_tags])\n', (3893, 3915), True, 'import pandas as pd\n'), ((4044, 4081), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'top_tags'}), '(classes=top_tags)\n', (4063, 4081), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((4186, 4204), 'numpy.array', 'np.array', (['top_tags'], {}), '(top_tags)\n', (4194, 4204), True, 'import numpy as np\n'), ((4339, 4360), 'numpy.array', 'np.array', (['joined_tags'], {}), '(joined_tags)\n', (4347, 4360), True, 'import numpy as np\n'), ((5332, 5467), 'skmultilearn.model_selection.IterativeStratification', 'IterativeStratification', ([], {'n_splits': '(2)', 'order': '(2)', 'sample_distribution_per_fold': '[test_size, 1.0 - test_size]', 'random_state': 'random_state'}), '(n_splits=2, order=2, sample_distribution_per_fold=[\n test_size, 1.0 - test_size], random_state=random_state)\n', (5355, 5467), False, 'from skmultilearn.model_selection import IterativeStratification, iterative_train_test_split\n'), ((6963, 7003), 'fasttext.train_unsupervised', 'fasttext.train_unsupervised', (['corpus_path'], {}), '(corpus_path)\n', (6990, 7003), False, 'import fasttext\n'), ((7499, 7510), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7508, 7510), False, 'import os\n'), ((2031, 2061), 'pandas.to_datetime', 'pd.to_datetime', (["cip_df['time']"], {}), "(cip_df['time'])\n", (2045, 2061), True, 'import pandas as pd\n'), ((4274, 4326), 'functools.reduce', 'reduce', (["(lambda a, b: a + ',' + b)", 'top_array[row > 0]'], {}), "(lambda a, b: a + ',' + b, top_array[row > 0])\n", (4280, 4326), False, 'from functools import reduce\n'), ((8447, 8471), 'pandas.DataFrame', 'pd.DataFrame', (['tags_train'], {}), '(tags_train)\n', (8459, 8471), True, 'import pandas as pd\n'), ((8526, 8549), 'pandas.DataFrame', 'pd.DataFrame', (['tags_test'], {}), '(tags_test)\n', (8538, 8549), True, 'import pandas as pd\n'), ((8604, 8635), 'pandas.DataFrame', 'pd.DataFrame', (["{'tag': top_tags}"], {}), "({'tag': top_tags})\n", (8616, 8635), True, 'import pandas as pd\n'), ((6531, 6543), 'tagger._preprocessing.html.HTMLToText', 'HTMLToText', ([], {}), '()\n', (6541, 6543), False, 'from tagger._preprocessing.html import HTMLToText\n'), ((6563, 6594), 'tagger._preprocessing.characterset.CharacterSet', 'CharacterSet', ([], {'punctuation': '(False)'}), '(punctuation=False)\n', (6575, 6594), False, 'from tagger._preprocessing.characterset import CharacterSet\n'), ((6615, 6626), 'tagger._preprocessing.lowercase.Lowercase', 'Lowercase', ([], {}), '()\n', (6624, 6626), False, 'from tagger._preprocessing.lowercase import Lowercase\n')]
|
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer, WeightedObsDictRelabelingBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from robosuite.wrappers import Wrapper, GymWrapper
import robosuite as suite
from robosuite import load_controller_config
import numpy as np
class GoalMountainCar(gym.Wrapper):
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
shape = False
dense = 100*((math.sin(3*achieved_goal[0]) * 0.0025 + 0.5 * achieved_goal[1] * achieved_goal[1]) - (math.sin(3*desired_goal[0]) * 0.0025 + 0.5 * desired_goal[1] * desired_goal[1]))
if achieved_goal[0] != desired_goal[0]:
return -1 if not shape else dense
else:
return 0 if achieved_goal[0] >= desired_goal[0] else (-1 if not shape else dense)
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
reward = self.compute_reward(ag, g, info)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = reward==0
return state, reward, done, info
class GoalMountainCarContinuous(gym.Wrapper):
def __init__(self, env):
super().__init__(env=env)
env = env.env
print(env)
self.observation_space = gym.spaces.Dict({"observation": env.observation_space, "achieved_goal": env.observation_space, "desired_goal":env.observation_space})
self.action_space = env.action_space
# Default goal_Velocity is 0 - any speed will do (>=)
self.goal = np.array([env.goal_position, 0])
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(state)
g = self.goal
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
return 100 if achieved_goal[1] >= desired_goal[1] and achieved_goal[0] >= desired_goal[0] else -1
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(state)
g = self.goal
reward = self.compute_reward(ag, g, None)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[1] >= g[1] and ag[0] >= g[0])
return state, reward, done, info
class DoorWrapper(Wrapper):
"""
Initializes the Gym wrapper. Mimics many of the required functionalities of the Wrapper class
found in the gym.core module
Args:
env (MujocoEnv): The environment to wrap.
keys (None or list of str): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to robot-state and object-state.
Raises:
AssertionError: [Object observations must be enabled if no keys]
"""
def __init__(self, env, keys=None):
# Run super method
super().__init__(env=env)
# Create name for gym
robots = "".join([type(robot.robot_model).__name__ for robot in self.env.robots])
self.name = robots + "_" + type(self.env).__name__
# Get reward range
self.reward_range = (0, self.env.reward_scale)
if keys is None:
assert self.env.use_object_obs, "Object observations need to be enabled."
keys = ["object-state"]
# Iterate over all robots to add to state
for idx in range(len(self.env.robots)):
keys += ["robot{}_robot-state".format(idx)]
self.keys = keys
# Gym specific attributes
self.env.spec = None
self.metadata = None
self.goal = np.array([.3])
# set up observation and action spaces
flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = gym.spaces.Dict({"observation": gym.spaces.Box(low=low, high=high), "achieved_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,)), "desired_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,))})
low, high = self.env.action_spec
self.action_space = gym.spaces.Box(low=low, high=high)
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict (OrderedDict): ordered dictionary of observations
verbose (bool): Whether to print out to console as observation keys are processed
Returns:
np.array: observations flattened into a 1d array
"""
ob_lst = []
for key in obs_dict:
if key in self.keys:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
def reset(self):
"""
Extends env reset method to return flattened observation instead of normal OrderedDict.
Returns:
np.array: Flattened environment observation space after reset occurs
"""
ob_dict = self.env.reset()
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
return {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
def step(self, action):
"""
Extends vanilla step() function call to return flattened observation instead of normal OrderedDict.
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (np.array) flattened observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ob_dict, reward, done, info = self.env.step(action)
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
ob_dict = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[0] > g[0])
return ob_dict, reward, done, info
def seed(self, seed=None):
"""
Utility function to set numpy seed
Args:
seed (None or int): If specified, numpy seed to set
Raises:
TypeError: [Seed must be integer]
"""
# Seed the generator
if seed is not None:
try:
np.random.seed(seed)
except:
TypeError("Seed must be an integer type!")
def compute_reward(self, achieved_goal, desired_goal, info):
return 1 if achieved_goal[0] > desired_goal[0] else 0
def make_env():
controller = load_controller_config(default_controller="OSC_POSE")
env = GymWrapper(suite.make(
"PickPlaceCan",
robots="Panda", # use Sawyer robot
use_camera_obs=False, # do not use pixel observations
has_offscreen_renderer=False, # not needed since not using pixel obs
has_renderer=False, # make sure we can render to the screen
reward_shaping=True, # use dense rewards
reward_scale=1.0, # scale max 1 per timestep
control_freq=20, # control should happen fast enough so that simulation looks smooth
horizon=500,
ignore_done=True,
hard_reset=False,
controller_configs=controller
))
return env
# GoalMountainCarContinuous(gym.make("MountainCarContinuous-v0"))
# GoalMountainCar(gym.make(MountainCar-v0))
def experiment(variant):
# unwrap the TimeLimitEnv wrapper since we manually termiante after 50 steps
# eval_env = gym.make('FetchPickAndPlace-v1').env
# expl_env = gym.make('FetchPickAndPlace-v1').env
eval_env = make_env()
expl_env = make_env()
print(eval_env.observation_space)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
# achieved_goal_key = desired_goal_key.replace("desired", "achieved")
# replay_buffer = ObsDictRelabelingBuffer(
# env=eval_env,
# observation_key=observation_key,
# desired_goal_key=desired_goal_key,
# achieved_goal_key=achieved_goal_key,
# **variant['replay_buffer_kwargs']
# )
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
# goal_dim = eval_env.observation_space.spaces['desired_goal'].low.size
print(obs_dim)
print(action_dim)
# print(goal_dim)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim + goal_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_policy = MakeDeterministic(policy)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['sac_trainer_kwargs']
)
trainer = HERTrainer(trainer, use_per=False)
eval_path_collector = GoalConditionedPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algorithm='HER-SAC',
version='normal',
algo_kwargs=dict(
batch_size=512,
num_epochs=500,
num_eval_steps_per_epoch=5000,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=500,
min_num_steps_before_training=1000,
max_path_length=500,
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
max_size=int(50000),
fraction_goals_rollout_goals=0.2, # equal to k = 4 in HER paper
fraction_goals_env_goals=0,
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
)
setup_logger('her-sac-door-experiment', variant=variant)
experiment(variant)
|
[
"rlkit.torch.sac.sac.SACTrainer",
"rlkit.torch.sac.policies.MakeDeterministic",
"rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm",
"numpy.concatenate",
"rlkit.samplers.data_collector.GoalConditionedPathCollector",
"robosuite.make",
"numpy.random.seed",
"rlkit.torch.her.her.HERTrainer",
"numpy.zeros",
"numpy.ones",
"rlkit.torch.networks.ConcatMlp",
"numpy.array",
"rlkit.launchers.launcher_util.setup_logger",
"rlkit.torch.sac.policies.TanhGaussianPolicy",
"gym.spaces.Box",
"robosuite.load_controller_config",
"gym.spaces.Dict"
] |
[((7709, 7762), 'robosuite.load_controller_config', 'load_controller_config', ([], {'default_controller': '"""OSC_POSE"""'}), "(default_controller='OSC_POSE')\n", (7731, 7762), False, 'from robosuite import load_controller_config\n'), ((9648, 9745), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9657, 9745), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((9781, 9878), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9790, 9878), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((9921, 10018), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9930, 10018), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((10061, 10158), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (10070, 10158), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((10197, 10299), 'rlkit.torch.sac.policies.TanhGaussianPolicy', 'TanhGaussianPolicy', ([], {'obs_dim': '(obs_dim + goal_dim)', 'action_dim': 'action_dim'}), "(obs_dim=obs_dim + goal_dim, action_dim=action_dim, **\n variant['policy_kwargs'])\n", (10215, 10299), False, 'from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy\n'), ((10343, 10368), 'rlkit.torch.sac.policies.MakeDeterministic', 'MakeDeterministic', (['policy'], {}), '(policy)\n', (10360, 10368), False, 'from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy\n'), ((10383, 10524), 'rlkit.torch.sac.sac.SACTrainer', 'SACTrainer', ([], {'env': 'eval_env', 'policy': 'policy', 'qf1': 'qf1', 'qf2': 'qf2', 'target_qf1': 'target_qf1', 'target_qf2': 'target_qf2'}), "(env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=\n target_qf1, target_qf2=target_qf2, **variant['sac_trainer_kwargs'])\n", (10393, 10524), False, 'from rlkit.torch.sac.sac import SACTrainer\n'), ((10596, 10630), 'rlkit.torch.her.her.HERTrainer', 'HERTrainer', (['trainer'], {'use_per': '(False)'}), '(trainer, use_per=False)\n', (10606, 10630), False, 'from rlkit.torch.her.her import HERTrainer\n'), ((10657, 10781), 'rlkit.samplers.data_collector.GoalConditionedPathCollector', 'GoalConditionedPathCollector', (['eval_env', 'eval_policy'], {'observation_key': 'observation_key', 'desired_goal_key': 'desired_goal_key'}), '(eval_env, eval_policy, observation_key=\n observation_key, desired_goal_key=desired_goal_key)\n', (10685, 10781), False, 'from rlkit.samplers.data_collector import GoalConditionedPathCollector\n'), ((10842, 10961), 'rlkit.samplers.data_collector.GoalConditionedPathCollector', 'GoalConditionedPathCollector', (['expl_env', 'policy'], {'observation_key': 'observation_key', 'desired_goal_key': 'desired_goal_key'}), '(expl_env, policy, observation_key=\n observation_key, desired_goal_key=desired_goal_key)\n', (10870, 10961), False, 'from rlkit.samplers.data_collector import GoalConditionedPathCollector\n'), ((11012, 11264), 'rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm', 'TorchBatchRLAlgorithm', ([], {'trainer': 'trainer', 'exploration_env': 'expl_env', 'evaluation_env': 'eval_env', 'exploration_data_collector': 'expl_path_collector', 'evaluation_data_collector': 'eval_path_collector', 'replay_buffer': 'replay_buffer'}), "(trainer=trainer, exploration_env=expl_env,\n evaluation_env=eval_env, exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector, replay_buffer=\n replay_buffer, **variant['algo_kwargs'])\n", (11033, 11264), False, 'from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n'), ((12398, 12454), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', (['"""her-sac-door-experiment"""'], {'variant': 'variant'}), "('her-sac-door-experiment', variant=variant)\n", (12410, 12454), False, 'from rlkit.launchers.launcher_util import setup_logger\n'), ((825, 849), 'numpy.array', 'np.array', (['self.env.state'], {}), '(self.env.state)\n', (833, 849), True, 'import numpy as np\n'), ((862, 920), 'numpy.array', 'np.array', (['[self.env.goal_position, self.env.goal_velocity]'], {}), '([self.env.goal_position, self.env.goal_velocity])\n', (870, 920), True, 'import numpy as np\n'), ((1595, 1619), 'numpy.array', 'np.array', (['self.env.state'], {}), '(self.env.state)\n', (1603, 1619), True, 'import numpy as np\n'), ((1632, 1690), 'numpy.array', 'np.array', (['[self.env.goal_position, self.env.goal_velocity]'], {}), '([self.env.goal_position, self.env.goal_velocity])\n', (1640, 1690), True, 'import numpy as np\n'), ((2077, 2216), 'gym.spaces.Dict', 'gym.spaces.Dict', (["{'observation': env.observation_space, 'achieved_goal': env.\n observation_space, 'desired_goal': env.observation_space}"], {}), "({'observation': env.observation_space, 'achieved_goal': env\n .observation_space, 'desired_goal': env.observation_space})\n", (2092, 2216), False, 'import gym\n'), ((2332, 2364), 'numpy.array', 'np.array', (['[env.goal_position, 0]'], {}), '([env.goal_position, 0])\n', (2340, 2364), True, 'import numpy as np\n'), ((2452, 2467), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2460, 2467), True, 'import numpy as np\n'), ((2857, 2872), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2865, 2872), True, 'import numpy as np\n'), ((4489, 4504), 'numpy.array', 'np.array', (['[0.3]'], {}), '([0.3])\n', (4497, 4504), True, 'import numpy as np\n'), ((5050, 5084), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (5064, 5084), False, 'import gym\n'), ((5716, 5738), 'numpy.concatenate', 'np.concatenate', (['ob_lst'], {}), '(ob_lst)\n', (5730, 5738), True, 'import numpy as np\n'), ((6070, 6130), 'numpy.array', 'np.array', (['[self.env.sim.data.qpos[self.env.hinge_qpos_addr]]'], {}), '([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])\n', (6078, 6130), True, 'import numpy as np\n'), ((6863, 6923), 'numpy.array', 'np.array', (['[self.env.sim.data.qpos[self.env.hinge_qpos_addr]]'], {}), '([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])\n', (6871, 6923), True, 'import numpy as np\n'), ((7784, 8046), 'robosuite.make', 'suite.make', (['"""PickPlaceCan"""'], {'robots': '"""Panda"""', 'use_camera_obs': '(False)', 'has_offscreen_renderer': '(False)', 'has_renderer': '(False)', 'reward_shaping': '(True)', 'reward_scale': '(1.0)', 'control_freq': '(20)', 'horizon': '(500)', 'ignore_done': '(True)', 'hard_reset': '(False)', 'controller_configs': 'controller'}), "('PickPlaceCan', robots='Panda', use_camera_obs=False,\n has_offscreen_renderer=False, has_renderer=False, reward_shaping=True,\n reward_scale=1.0, control_freq=20, horizon=500, ignore_done=True,\n hard_reset=False, controller_configs=controller)\n", (7794, 8046), True, 'import robosuite as suite\n'), ((4680, 4701), 'numpy.ones', 'np.ones', (['self.obs_dim'], {}), '(self.obs_dim)\n', (4687, 4701), True, 'import numpy as np\n'), ((4787, 4821), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (4801, 4821), False, 'import gym\n'), ((7447, 7467), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7461, 7467), True, 'import numpy as np\n'), ((4859, 4870), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4867, 4870), True, 'import numpy as np\n'), ((4877, 4887), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4884, 4887), True, 'import numpy as np\n'), ((4937, 4948), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4945, 4948), True, 'import numpy as np\n'), ((4955, 4965), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4962, 4965), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
import time
import cv2
global img
global point1, point2
global min_x, min_y, width, height, max_x, max_y
def on_mouse(event, x, y, flags, param):
global img, point1, point2, min_x, min_y, width, height, max_x, max_y
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 2)
cv2.imshow('image', img2)
min_y = min(point1[0], point2[0])
min_x = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
max_x = min_x + height
max_y = min_y + width
def overlap_restricted_area(x, y, patch_size, min_x, max_x, min_y, max_y):
dx0 = dy0 = patch_size // 2
minx1 = x - dx0
miny1 = y - dy0
maxx1 = x + dx0
maxy1 = y + dy0
minx2 = min_x
miny2 = min_y
maxx2 = max_x
maxy2 = max_y
minx = max(minx1, minx2)
miny = max(miny1, miny2)
maxx = min(maxx1, maxx2)
maxy = min(maxy1, maxy2)
if minx > maxx or miny > maxy:
return False
else:
return True
def cal_distance(a, b, A_padding, B, p_size):
p = p_size // 2
patch_a = A_padding[a[0]:a[0] + p_size, a[1]:a[1] + p_size, :]
patch_b = B[b[0] - p:b[0] + p + 1, b[1] - p:b[1] + p + 1, :]
temp = patch_b - patch_a
num = np.sum(1 - np.int32(np.isnan(temp)))
dist = np.sum(np.square(np.nan_to_num(temp))) / num
return dist
def cal_alpha(dis, gamma=2.0):
return gamma ** (-dis)
def reconstruction(f, A, B, p_size, dist, min_x, max_x, min_y, max_y, itter):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
temp = np.zeros_like(A)
p = p_size // 2
for i in range(A_h):
for j in range(A_w):
cnt = 0
ans = np.zeros(3)
for m in range(-p, p + 1, 1):
for n in range(-p, p + 1, 1):
if not ((0 <= i + m < A_h) and (0 <= j + n < A_w)):
continue
if not ((0 <= f[i + m][j + n][0] - m < B_h) and (0 <= f[i + m][j + n][1] - n < B_w)):
continue
if overlap_restricted_area(f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, p_size, min_x, max_x,
min_y,
max_y):
continue
alpha = cal_alpha(dis=dist[i + m, j + n])
cnt += alpha
ans += alpha * B[f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, :]
temp[i, j, :] = ans / cnt
tmp = np.copy(B)
# temp = cv2.GaussianBlur(temp, (3, 3), 0)
tmp[min_x:min_x + A_h, min_y:min_y + A_w, :] = temp
# Image.fromarray(tmp).show()
return tmp, temp
def initialization(A, B, f, p_size, min_x, max_x, min_y, max_y, create_f=False):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
# A_padding = np.ones([A_h+p*2, A_w+p*2, 3]) * np.nan
A_padding = B[min_x - p:min_x + A_h + p, min_y - p:min_y + A_w + p, :]
A_padding[p:A_h + p, p:A_w + p, :] = A
random_B_r = np.random.randint(p, B_h - p, [A_h, A_w])
random_B_c = np.random.randint(p, B_w - p, [A_h, A_w])
for i in range(A_h):
for j in range(A_w):
while overlap_restricted_area(random_B_r[i][j], random_B_c[i][j], p_size, min_x, max_x, min_y, max_y):
random_B_r[i][j] = np.random.randint(p, B_h - p)
random_B_c[i][j] = np.random.randint(p, B_w - p)
if create_f:
f = np.zeros([A_h, A_w], dtype=object)
dist = np.zeros([A_h, A_w])
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
if create_f:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
f[i, j] = b
else:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
if (i % 2 == 0) or (j % 2 == 0):
f[i, j] = b
else:
b = f[i, j]
dist[i, j] = cal_distance(a, b, A_padding, B, p_size)
return f, dist, A_padding
def propagation(f, a, dist, A_padding, B, p_size, is_odd, min_x, max_x, min_y, max_y):
A_h = np.size(A_padding, 0) - p_size + 1
A_w = np.size(A_padding, 1) - p_size + 1
# print(A_h, A_w)
x = a[0]
y = a[1]
if is_odd:
d_left = dist[max(x - 1, 0), y]
d_up = dist[x, max(y - 1, 0)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_left, d_up]))
if idx == 1 and (not overlap_restricted_area(f[max(x - 1, 0), y][0] + 1, f[max(x - 1, 0), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[max(x - 1, 0), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (not overlap_restricted_area(f[x, max(y - 1, 0)][0], f[x, max(y - 1, 0)][1] + 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, max(y - 1, 0)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
else:
# print(dist.shape)
# print(min(x + 1, A_h - 1), y)
d_right = dist[min(x + 1, A_h - 1), y]
d_down = dist[x, min(y + 1, A_w - 1)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_right, d_down]))
if idx == 1 and (
not overlap_restricted_area(f[min(x + 1, A_h - 1), y][0] - 1, f[min(x + 1, A_h - 1), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[min(x + 1, A_h - 1), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (
not overlap_restricted_area(f[x, min(y + 1, A_w - 1)][0], f[x, min(y + 1, A_w - 1)][1] - 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, min(y + 1, A_w - 1)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
def random_search(f, a, dist, A_padding, B, p_size, min_x, max_x, min_y, max_y, alpha=0.5):
x = a[0]
y = a[1]
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
i = 4
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b_x = f[x, y][0]
b_y = f[x, y][1]
while search_h > 1 and search_w > 1:
search_min_r = max(b_x - search_h, p)
search_max_r = min(b_x + search_h, B_h - p)
random_b_x = np.random.randint(search_min_r, search_max_r)
search_min_c = max(b_y - search_w, p)
search_max_c = min(b_y + search_w, B_w - p)
random_b_y = np.random.randint(search_min_c, search_max_c)
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b = np.array([random_b_x, random_b_y])
d = cal_distance(a, b, A_padding, B, p_size)
if d < dist[x, y] and (not overlap_restricted_area(b[0], b[1], p_size, min_x, max_x, min_y, max_y)):
dist[x, y] = d
f[x, y] = b
i += 1
def NNS(img, ref, p_size, itr, f, dist, img_padding, min_x, max_x, min_y, max_y):
A_h = np.size(img, 0)
A_w = np.size(img, 1)
# print(A_h, A_w)
# print(img_padding.shape)
for itr in range(1, itr + 1):
if itr % 2 == 0:
for i in range(A_h - 1, -1, -1):
for j in range(A_w - 1, -1, -1):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, False, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
else:
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, True, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
print("iteration: %d" % (itr))
return f
def upsample_nnf(nnf):
temp = np.zeros((nnf.shape[0], nnf.shape[1], 3))
for x in range(nnf.shape[0]):
for y in range(nnf.shape[1]):
temp[x][y] = [nnf[x][y][0], nnf[x][y][1], 0]
# img = np.zeros(shape=(size, size, 2), dtype=np.int)
# small_size = nnf.shape[0]
aw_ratio = 2 # ((size) // small_size)
ah_ratio = 2 # ((size) // small_size)
temp = cv2.resize(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.INTER_NEAREST)
imge = np.zeros(shape=(temp.shape[0], temp.shape[1], 2), dtype=np.int)
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
pos = temp[i, j]
imge[i, j] = pos[0] * aw_ratio, pos[1] * ah_ratio
return imge
padding_size = [15, 15, 13, 9, 5, 2]
# padding_size = [9, 7, 5, 3, 3, 2]
iter_arr = [2, 2, 16, 40, 64, 64]
def main(img_path):
# img_path = 'IMAGE/face.jpg'
global img
img = cv2.imread(img_path)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_mouse)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(min_x, min_y, height, width)
global_min_x = min_x
global_min_y = min_y
global_max_x = max_x
global_max_y = max_y
# img = np.array(Image.open("./cup_a.jpg"))
origin_ref = np.array(Image.open(img_path))
# ref = cv2.pyrDown(origin_ref, (np.size(origin_ref, 0)//2, np.size(origin_ref, 1)//2))
# Image.fromarray(ref).show()
itr = 4
start = time.time()
# origin_img = origin_ref[min_x: max_x + 1, min_y:max_y + 1, :]
# img = cv2.resize(origin_img, None, fx=2 ** (-4), fy=2 ** (-4), interpolation=cv2.INTER_NEAREST)
f = 0
depth = 3
for l in range(depth, -1, -1):
p_size = padding_size[l]
gmin_x = global_min_x // (2 ** l)
gmin_y = global_min_y // (2 ** l)
gmax_x = global_max_x // (2 ** l)
gmax_y = global_max_y // (2 ** l)
# print(origin_ref.shape)
# ref = cv2.resize(origin_ref, None, fx=2 ** (-l), fy=2 ** (-l), interpolation=cv2.INTER_LINEAR)
ref = origin_ref
for kk in range(l):
ref = cv2.pyrDown(ref, (np.size(origin_ref, 0) // 2, np.size(origin_ref, 1) // 2))
# print(ref.shape)
# print(gmin_x, gmin_y, gmax_x, gmax_y)
# !!!!!!!!!
img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# !!!!!!!!!
if l == depth:
# img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# img = np.zeros([gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3])
# !!!!!!!!!!
# img = np.random.randint(0, 256, size=(gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3), dtype=np.uint8)
# !!!!!!!!!!
# print(np.shape(img)[0] // 4)
f, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y, create_f=True)
else:
# print(img.shape)
fake, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y,
create_f=False)
# Image.fromarray(ref).show()
# Image.fromarray(img).show()
# print(img.shape)
# print(img_padding.shape)
for itter in range(iter_arr[l]):
f = NNS(img, ref, p_size, itr, f, dist, img_padding, gmin_x, gmax_x, gmin_y, gmax_y)
end = time.time()
print(end - start)
print(l, itter + 1, '/', iter_arr[l])
tmp, img = reconstruction(f, img, ref, p_size, dist, gmin_x, gmax_x, gmin_y, gmax_y, itter)
# if itter == iter_arr[l] - 1:
# Image.fromarray(tmp).show()
# img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# Image.fromarray(img).show()
img = cv2.pyrUp(img, (np.size(img, 0) * 2, np.size(img, 1) * 2))
f = upsample_nnf(f)
# Image.fromarray(img).show()
tmp = Image.fromarray(tmp)
tmp.save("temp.jpg")
return "temp.jpg"
if __name__ == '__main__':
img_path = 'D://project//Image_Completion//IMAGE//face.jpg'
# img_path = 'D://project//Image_Completion//IMAGE//birds.jpg'
while True:
img_path = main(img_path)
|
[
"numpy.nan_to_num",
"numpy.isnan",
"numpy.random.randint",
"cv2.rectangle",
"cv2.imshow",
"numpy.zeros_like",
"numpy.copy",
"cv2.setMouseCallback",
"cv2.destroyAllWindows",
"cv2.resize",
"numpy.size",
"cv2.circle",
"cv2.waitKey",
"numpy.zeros",
"time.time",
"PIL.Image.open",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray",
"cv2.namedWindow"
] |
[((1982, 1995), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (1989, 1995), True, 'import numpy as np\n'), ((2006, 2019), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (2013, 2019), True, 'import numpy as np\n'), ((2030, 2043), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (2037, 2043), True, 'import numpy as np\n'), ((2054, 2067), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (2061, 2067), True, 'import numpy as np\n'), ((2079, 2095), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (2092, 2095), True, 'import numpy as np\n'), ((3042, 3052), 'numpy.copy', 'np.copy', (['B'], {}), '(B)\n', (3049, 3052), True, 'import numpy as np\n'), ((3304, 3317), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (3311, 3317), True, 'import numpy as np\n'), ((3328, 3341), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (3335, 3341), True, 'import numpy as np\n'), ((3352, 3365), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (3359, 3365), True, 'import numpy as np\n'), ((3376, 3389), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (3383, 3389), True, 'import numpy as np\n'), ((3603, 3644), 'numpy.random.randint', 'np.random.randint', (['p', '(B_h - p)', '[A_h, A_w]'], {}), '(p, B_h - p, [A_h, A_w])\n', (3620, 3644), True, 'import numpy as np\n'), ((3662, 3703), 'numpy.random.randint', 'np.random.randint', (['p', '(B_w - p)', '[A_h, A_w]'], {}), '(p, B_w - p, [A_h, A_w])\n', (3679, 3703), True, 'import numpy as np\n'), ((4078, 4098), 'numpy.zeros', 'np.zeros', (['[A_h, A_w]'], {}), '([A_h, A_w])\n', (4086, 4098), True, 'import numpy as np\n'), ((6754, 6767), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (6761, 6767), True, 'import numpy as np\n'), ((6778, 6791), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (6785, 6791), True, 'import numpy as np\n'), ((7740, 7755), 'numpy.size', 'np.size', (['img', '(0)'], {}), '(img, 0)\n', (7747, 7755), True, 'import numpy as np\n'), ((7766, 7781), 'numpy.size', 'np.size', (['img', '(1)'], {}), '(img, 1)\n', (7773, 7781), True, 'import numpy as np\n'), ((8651, 8692), 'numpy.zeros', 'np.zeros', (['(nnf.shape[0], nnf.shape[1], 3)'], {}), '((nnf.shape[0], nnf.shape[1], 3))\n', (8659, 8692), True, 'import numpy as np\n'), ((9010, 9096), 'cv2.resize', 'cv2.resize', (['temp', 'None'], {'fx': 'aw_ratio', 'fy': 'aw_ratio', 'interpolation': 'cv2.INTER_NEAREST'}), '(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.\n INTER_NEAREST)\n', (9020, 9096), False, 'import cv2\n'), ((9103, 9166), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp.shape[0], temp.shape[1], 2)', 'dtype': 'np.int'}), '(shape=(temp.shape[0], temp.shape[1], 2), dtype=np.int)\n', (9111, 9166), True, 'import numpy as np\n'), ((9539, 9559), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (9549, 9559), False, 'import cv2\n'), ((9564, 9588), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (9579, 9588), False, 'import cv2\n'), ((9593, 9632), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'on_mouse'], {}), "('image', on_mouse)\n", (9613, 9632), False, 'import cv2\n'), ((9637, 9661), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (9647, 9661), False, 'import cv2\n'), ((9666, 9680), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9677, 9680), False, 'import cv2\n'), ((9685, 9708), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9706, 9708), False, 'import cv2\n'), ((10096, 10107), 'time.time', 'time.time', ([], {}), '()\n', (10105, 10107), False, 'import time\n'), ((12563, 12583), 'PIL.Image.fromarray', 'Image.fromarray', (['tmp'], {}), '(tmp)\n', (12578, 12583), False, 'from PIL import Image\n'), ((365, 409), 'cv2.circle', 'cv2.circle', (['img2', 'point1', '(10)', '(0, 255, 0)', '(2)'], {}), '(img2, point1, 10, (0, 255, 0), 2)\n', (375, 409), False, 'import cv2\n'), ((418, 443), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (428, 443), False, 'import cv2\n'), ((4032, 4066), 'numpy.zeros', 'np.zeros', (['[A_h, A_w]'], {'dtype': 'object'}), '([A_h, A_w], dtype=object)\n', (4040, 4066), True, 'import numpy as np\n'), ((7088, 7133), 'numpy.random.randint', 'np.random.randint', (['search_min_r', 'search_max_r'], {}), '(search_min_r, search_max_r)\n', (7105, 7133), True, 'import numpy as np\n'), ((7253, 7298), 'numpy.random.randint', 'np.random.randint', (['search_min_c', 'search_max_c'], {}), '(search_min_c, search_max_c)\n', (7270, 7298), True, 'import numpy as np\n'), ((7383, 7417), 'numpy.array', 'np.array', (['[random_b_x, random_b_y]'], {}), '([random_b_x, random_b_y])\n', (7391, 7417), True, 'import numpy as np\n'), ((9924, 9944), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9934, 9944), False, 'from PIL import Image\n'), ((538, 589), 'cv2.rectangle', 'cv2.rectangle', (['img2', 'point1', '(x, y)', '(255, 0, 0)', '(2)'], {}), '(img2, point1, (x, y), (255, 0, 0), 2)\n', (551, 589), False, 'import cv2\n'), ((598, 623), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (608, 623), False, 'import cv2\n'), ((2208, 2219), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2216, 2219), True, 'import numpy as np\n'), ((4169, 4185), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4177, 4185), True, 'import numpy as np\n'), ((4753, 4774), 'numpy.size', 'np.size', (['A_padding', '(0)'], {}), '(A_padding, 0)\n', (4760, 4774), True, 'import numpy as np\n'), ((4798, 4819), 'numpy.size', 'np.size', (['A_padding', '(1)'], {}), '(A_padding, 1)\n', (4805, 4819), True, 'import numpy as np\n'), ((5029, 5064), 'numpy.array', 'np.array', (['[d_current, d_left, d_up]'], {}), '([d_current, d_left, d_up])\n', (5037, 5064), True, 'import numpy as np\n'), ((5912, 5950), 'numpy.array', 'np.array', (['[d_current, d_right, d_down]'], {}), '([d_current, d_right, d_down])\n', (5920, 5950), True, 'import numpy as np\n'), ((12008, 12019), 'time.time', 'time.time', ([], {}), '()\n', (12017, 12019), False, 'import time\n'), ((703, 754), 'cv2.rectangle', 'cv2.rectangle', (['img2', 'point1', 'point2', '(0, 0, 255)', '(2)'], {}), '(img2, point1, point2, (0, 0, 255), 2)\n', (716, 754), False, 'import cv2\n'), ((763, 788), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (773, 788), False, 'import cv2\n'), ((1743, 1757), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (1751, 1757), True, 'import numpy as np\n'), ((1788, 1807), 'numpy.nan_to_num', 'np.nan_to_num', (['temp'], {}), '(temp)\n', (1801, 1807), True, 'import numpy as np\n'), ((3908, 3937), 'numpy.random.randint', 'np.random.randint', (['p', '(B_h - p)'], {}), '(p, B_h - p)\n', (3925, 3937), True, 'import numpy as np\n'), ((3973, 4002), 'numpy.random.randint', 'np.random.randint', (['p', '(B_w - p)'], {}), '(p, B_w - p)\n', (3990, 4002), True, 'import numpy as np\n'), ((4231, 4293), 'numpy.array', 'np.array', (['[random_B_r[i, j], random_B_c[i, j]]'], {'dtype': 'np.int32'}), '([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)\n', (4239, 4293), True, 'import numpy as np\n'), ((4360, 4422), 'numpy.array', 'np.array', (['[random_B_r[i, j], random_B_c[i, j]]'], {'dtype': 'np.int32'}), '([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)\n', (4368, 4422), True, 'import numpy as np\n'), ((8012, 8028), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (8020, 8028), True, 'import numpy as np\n'), ((8342, 8358), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (8350, 8358), True, 'import numpy as np\n'), ((12444, 12459), 'numpy.size', 'np.size', (['img', '(0)'], {}), '(img, 0)\n', (12451, 12459), True, 'import numpy as np\n'), ((12465, 12480), 'numpy.size', 'np.size', (['img', '(1)'], {}), '(img, 1)\n', (12472, 12480), True, 'import numpy as np\n'), ((10766, 10788), 'numpy.size', 'np.size', (['origin_ref', '(0)'], {}), '(origin_ref, 0)\n', (10773, 10788), True, 'import numpy as np\n'), ((10795, 10817), 'numpy.size', 'np.size', (['origin_ref', '(1)'], {}), '(origin_ref, 1)\n', (10802, 10817), True, 'import numpy as np\n')]
|
from hydra.experimental import compose, initialize
from random import randint
from random import seed
from soundbay.data import ClassifierDataset
import numpy as np
def test_dataloader() -> None:
seed(1)
with initialize(config_path="../soundbay/conf"):
# config is relative to a module
cfg = compose(config_name="runs/main")
data_loader = ClassifierDataset(cfg.data.train_dataset.data_path, cfg.data.train_dataset.metadata_path,
augmentations=cfg.data.train_dataset.augmentations,
augmentations_p=cfg.data.train_dataset.augmentations_p,
preprocessors=cfg.data.train_dataset.preprocessors)
assert data_loader.metadata.shape[1] == 5 # make sure metadata has 5 columns
assert data_loader.metadata.shape[0] > 0 # make sure metadata is not empty
data_size = data_loader.metadata.shape[0]
value = randint(0, data_size)
sample = data_loader[value]
assert np.issubdtype(sample[1], np.integer)
if 'spectrogram' in cfg.data.train_dataset.preprocessors:
assert len(sample[0].shape) == 3
if 'utils.LibrosaMelSpectrogram' in cfg.data.train_dataset.preprocessors.spectrogram._target_:
assert sample[0].shape[1] == cfg.data.train_dataset.preprocessors.spectrogram.n_mels
else:
assert sample[0].shape[1] == (cfg.data.train_dataset.preprocessors.spectrogram.n_fft // 2 + 1)
else:
assert sample[0].shape[1] == 1
|
[
"hydra.experimental.compose",
"random.randint",
"soundbay.data.ClassifierDataset",
"random.seed",
"hydra.experimental.initialize",
"numpy.issubdtype"
] |
[((202, 209), 'random.seed', 'seed', (['(1)'], {}), '(1)\n', (206, 209), False, 'from random import seed\n'), ((219, 261), 'hydra.experimental.initialize', 'initialize', ([], {'config_path': '"""../soundbay/conf"""'}), "(config_path='../soundbay/conf')\n", (229, 261), False, 'from hydra.experimental import compose, initialize\n'), ((318, 350), 'hydra.experimental.compose', 'compose', ([], {'config_name': '"""runs/main"""'}), "(config_name='runs/main')\n", (325, 350), False, 'from hydra.experimental import compose, initialize\n'), ((373, 636), 'soundbay.data.ClassifierDataset', 'ClassifierDataset', (['cfg.data.train_dataset.data_path', 'cfg.data.train_dataset.metadata_path'], {'augmentations': 'cfg.data.train_dataset.augmentations', 'augmentations_p': 'cfg.data.train_dataset.augmentations_p', 'preprocessors': 'cfg.data.train_dataset.preprocessors'}), '(cfg.data.train_dataset.data_path, cfg.data.train_dataset.\n metadata_path, augmentations=cfg.data.train_dataset.augmentations,\n augmentations_p=cfg.data.train_dataset.augmentations_p, preprocessors=\n cfg.data.train_dataset.preprocessors)\n', (390, 636), False, 'from soundbay.data import ClassifierDataset\n'), ((979, 1000), 'random.randint', 'randint', (['(0)', 'data_size'], {}), '(0, data_size)\n', (986, 1000), False, 'from random import randint\n'), ((1052, 1088), 'numpy.issubdtype', 'np.issubdtype', (['sample[1]', 'np.integer'], {}), '(sample[1], np.integer)\n', (1065, 1088), True, 'import numpy as np\n')]
|
import yaml
import os
import numpy as np
class DataOrganizer:
def __init__(self,parameter_file_path):
self.base_path = parameter_file_path
self.load_params()
def load_params(self):
params_file = os.path.join(self.base_path,'params.yaml')
with open(params_file) as yamlstream:
self.params = yaml.load(yamlstream,Loader=yaml.SafeLoader)
def get_closest_index_and_value(self,value,array):
index = np.argmin(np.abs(array - value))
value = array[index]
return index, value
|
[
"numpy.abs",
"yaml.load",
"os.path.join"
] |
[((229, 272), 'os.path.join', 'os.path.join', (['self.base_path', '"""params.yaml"""'], {}), "(self.base_path, 'params.yaml')\n", (241, 272), False, 'import os\n'), ((344, 389), 'yaml.load', 'yaml.load', (['yamlstream'], {'Loader': 'yaml.SafeLoader'}), '(yamlstream, Loader=yaml.SafeLoader)\n', (353, 389), False, 'import yaml\n'), ((471, 492), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (477, 492), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import os.path as osp
def save_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
model_save_dir = osp.join(args.save_dir, dataset)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
save_path = os.path.join(model_save_dir, save_filename)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
torch.save(model_state, save_path)
model.cuda()
print("Saved {0} at epoch: {1}, iter: {2}".format(network_label, epoch, iteration))
def load_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
# model_save_dir = osp.join(args.load_dir, dataset)
save_path = osp.join(args.load_dir, save_filename)
model_state = torch.load(save_path)
if "state_dict" in model_state:
model.load_state_dict(model_state["state_dict"])
else:
model.load_state_dict(model_state)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
model.cuda(device_id=args.gpu)
print('Loaded {0} from epoch: {1} itr: {2}'.format(network_label, epoch, args.load))
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d
else:
print('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
# Defines the GAN loss which uses either LSGAN or the regular GAN.
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor.cuda())
# TODO define forward() for GANLoss?
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),
norm_layer(ngf, affine=True),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2, affine=True),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2), affine=True),
nn.ReLU(True)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
if self.gpu_ids and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
assert(padding_type == 'zero')
p = 1
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the PatchGAN discriminator.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, x):
if len(self.gpu_ids) and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a, b, c * d) # resize F_XL into \hat F_XL
G = torch.bmm(features, features.transpose(1, 2)) # compute the gram product
# normalize the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(b * c * d)
class FeatureExtractor(nn.Module):
# Extract features from intermediate layers of a network
def __init__(self, submodule, extracted_layers):
super(FeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
x = module(x)
if name in self.extracted_layers:
outputs += [x]
return outputs + [x]
|
[
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.ReLU",
"os.makedirs",
"torch.nn.BCELoss",
"torch.nn.Sequential",
"torch.nn.Tanh",
"numpy.ceil",
"torch.load",
"torch.nn.Conv2d",
"os.path.exists",
"torch.autograd.Variable",
"torch.nn.Sigmoid",
"torch.save",
"torch.cuda.is_available",
"torch.nn.LeakyReLU",
"os.path.join",
"torch.nn.parallel.data_parallel"
] |
[((355, 387), 'os.path.join', 'osp.join', (['args.save_dir', 'dataset'], {}), '(args.save_dir, dataset)\n', (363, 387), True, 'import os.path as osp\n'), ((484, 527), 'os.path.join', 'os.path.join', (['model_save_dir', 'save_filename'], {}), '(model_save_dir, save_filename)\n', (496, 527), False, 'import os\n'), ((839, 873), 'torch.save', 'torch.save', (['model_state', 'save_path'], {}), '(model_state, save_path)\n', (849, 873), False, 'import torch\n'), ((1263, 1301), 'os.path.join', 'osp.join', (['args.load_dir', 'save_filename'], {}), '(args.load_dir, save_filename)\n', (1271, 1301), True, 'import os.path as osp\n'), ((1325, 1346), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (1335, 1346), False, 'import torch\n'), ((399, 429), 'os.path.exists', 'os.path.exists', (['model_save_dir'], {}), '(model_save_dir)\n', (413, 429), False, 'import os\n'), ((439, 466), 'os.makedirs', 'os.makedirs', (['model_save_dir'], {}), '(model_save_dir)\n', (450, 466), False, 'import os\n'), ((2754, 2779), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2777, 2779), False, 'import torch\n'), ((3226, 3251), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3249, 3251), False, 'import torch\n'), ((6976, 6997), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (6989, 6997), True, 'import torch.nn as nn\n'), ((8015, 8041), 'torch.nn.Sequential', 'nn.Sequential', (['*conv_block'], {}), '(*conv_block)\n', (8028, 8041), True, 'import torch.nn as nn\n'), ((9545, 9569), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (9558, 9569), True, 'import torch.nn as nn\n'), ((4164, 4176), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4174, 4176), True, 'import torch.nn as nn\n'), ((4215, 4227), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4225, 4227), True, 'import torch.nn as nn\n'), ((5830, 5880), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(3)'}), '(input_nc, ngf, kernel_size=7, padding=3)\n', (5839, 5880), True, 'import torch.nn as nn\n'), ((5946, 5959), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (5953, 5959), True, 'import torch.nn as nn\n'), ((6872, 6923), 'torch.nn.Conv2d', 'nn.Conv2d', (['ngf', 'output_nc'], {'kernel_size': '(7)', 'padding': '(3)'}), '(ngf, output_nc, kernel_size=7, padding=3)\n', (6881, 6923), True, 'import torch.nn as nn\n'), ((6943, 6952), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6950, 6952), True, 'import torch.nn as nn\n'), ((7116, 7170), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'x', 'self.gpu_ids'], {}), '(self.model, x, self.gpu_ids)\n', (7141, 7170), True, 'import torch.nn as nn\n'), ((7670, 7715), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (7679, 7715), True, 'import torch.nn as nn\n'), ((7793, 7806), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7800, 7806), True, 'import torch.nn as nn\n'), ((7899, 7944), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (7908, 7944), True, 'import torch.nn as nn\n'), ((8433, 8454), 'numpy.ceil', 'np.ceil', (['((kw - 1) / 2)'], {}), '((kw - 1) / 2)\n', (8440, 8454), True, 'import numpy as np\n'), ((8485, 8549), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (8494, 8549), True, 'import torch.nn as nn\n'), ((8563, 8586), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (8575, 8586), True, 'import torch.nn as nn\n'), ((9138, 9226), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw)\n', (9147, 9226), True, 'import torch.nn as nn\n'), ((9334, 9357), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (9346, 9357), True, 'import torch.nn as nn\n'), ((9390, 9457), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (9399, 9457), True, 'import torch.nn as nn\n'), ((9694, 9748), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'x', 'self.gpu_ids'], {}), '(self.model, x, self.gpu_ids)\n', (9719, 9748), True, 'import torch.nn as nn\n'), ((4624, 4666), 'torch.autograd.Variable', 'Variable', (['real_tensor'], {'requires_grad': '(False)'}), '(real_tensor, requires_grad=False)\n', (4632, 4666), False, 'from torch.autograd import Variable\n'), ((5012, 5054), 'torch.autograd.Variable', 'Variable', (['fake_tensor'], {'requires_grad': '(False)'}), '(fake_tensor, requires_grad=False)\n', (5020, 5054), False, 'from torch.autograd import Variable\n'), ((6075, 6148), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ngf * mult)', '(ngf * mult * 2)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)\n', (6084, 6148), True, 'import torch.nn as nn\n'), ((6267, 6280), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6274, 6280), True, 'import torch.nn as nn\n'), ((6838, 6851), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6845, 6851), True, 'import torch.nn as nn\n'), ((7859, 7874), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (7869, 7874), True, 'import torch.nn as nn\n'), ((8792, 8880), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw)\n', (8801, 8880), True, 'import torch.nn as nn\n'), ((8996, 9019), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (9008, 9019), True, 'import torch.nn as nn\n'), ((9509, 9521), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9519, 9521), True, 'import torch.nn as nn\n')]
|
# -*- coding: utf-8 -*-
"""Wrapper to run RCSCON from the command line.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import sdds_util
from sirepo.template import template_common
import numpy as np
import py.path
import sirepo.template.rcscon as template
def run(cfg_dir):
template_common.exec_parameters()
template.extract_report_data(
py.path.local(cfg_dir),
simulation_db.read_json(template_common.INPUT_BASE_NAME),
)
def run_background(cfg_dir):
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data.report == 'elegantAnimation':
return _run_elegant_simulation(cfg_dir)
template_common.exec_parameters()
def _build_arrays():
sigma = sdds_util.read_sdds_pages(
'run_setup.sigma.sdds',
['s', 's1', 's12', 's2', 's3', 's34', 's4', 's5', 's56', 's6'],
)
errors = _error_values()
inputs = []
outputs = []
k = 0
for i in range(len(errors)):
for j in range(int(len(sigma.s) / len(errors))):
initial_index = k - j
inputs.append([
errors[i, 1], errors[i, 2], sigma.s[k],
sigma.s1[initial_index], sigma.s12[initial_index], sigma.s2[initial_index],
sigma.s3[initial_index], sigma.s34[initial_index], sigma.s4[initial_index],
sigma.s5[initial_index], sigma.s56[initial_index], sigma.s6[initial_index],
])
outputs.append([
sigma.s1[k], sigma.s12[k], sigma.s2[k],
sigma.s3[k], sigma.s34[k], sigma.s4[k],
sigma.s5[k], sigma.s56[k], sigma.s6[k],
])
k+=1
return np.asarray(inputs), np.asarray(outputs)
def _error_values():
pages = sdds_util.read_sdds_pages(
'error_control.error_log.sdds',
['ElementParameter', 'ParameterValue'],
True)
res = []
for page in range(len(pages.ElementParameter)):
values = PKDict()
for idx in range(len(pages.ElementParameter[page])):
p = pages.ElementParameter[page][idx]
v = pages.ParameterValue[page][idx]
if p not in values:
values[p] = []
values[p].append(v)
res.append(
[page, np.mean(np.asarray(values.PHASE)), np.sum(np.asarray(values.VOLT))],
)
return np.asarray(res)
def _run_elegant_simulation(cfg_dir):
import sirepo.pkcli.elegant
sirepo.pkcli.elegant.run_elegant()
inputs, outputs = _build_arrays()
common = [
's1', 's12', 's2',
's3', 's34', 's4',
's5', 's56', 's6',
]
in_cols = ['average phase', 'total volts', 'position']
in_header = ','.join(in_cols + ['initial ' + x for x in common])
out_header = ','.join(common)
np.savetxt('inputs.csv', inputs, delimiter=',', comments='', header=in_header)
np.savetxt('outputs.csv', outputs, delimiter=',', comments='', header=out_header)
|
[
"sirepo.template.template_common.exec_parameters",
"numpy.asarray",
"numpy.savetxt",
"sirepo.simulation_db.read_json",
"sirepo.template.sdds_util.read_sdds_pages",
"pykern.pkcollections.PKDict"
] |
[((569, 602), 'sirepo.template.template_common.exec_parameters', 'template_common.exec_parameters', ([], {}), '()\n', (600, 602), False, 'from sirepo.template import template_common\n'), ((783, 839), 'sirepo.simulation_db.read_json', 'simulation_db.read_json', (['template_common.INPUT_BASE_NAME'], {}), '(template_common.INPUT_BASE_NAME)\n', (806, 839), False, 'from sirepo import simulation_db\n'), ((934, 967), 'sirepo.template.template_common.exec_parameters', 'template_common.exec_parameters', ([], {}), '()\n', (965, 967), False, 'from sirepo.template import template_common\n'), ((1003, 1120), 'sirepo.template.sdds_util.read_sdds_pages', 'sdds_util.read_sdds_pages', (['"""run_setup.sigma.sdds"""', "['s', 's1', 's12', 's2', 's3', 's34', 's4', 's5', 's56', 's6']"], {}), "('run_setup.sigma.sdds', ['s', 's1', 's12', 's2',\n 's3', 's34', 's4', 's5', 's56', 's6'])\n", (1028, 1120), False, 'from sirepo.template import sdds_util\n'), ((2026, 2134), 'sirepo.template.sdds_util.read_sdds_pages', 'sdds_util.read_sdds_pages', (['"""error_control.error_log.sdds"""', "['ElementParameter', 'ParameterValue']", '(True)'], {}), "('error_control.error_log.sdds', [\n 'ElementParameter', 'ParameterValue'], True)\n", (2051, 2134), False, 'from sirepo.template import sdds_util\n'), ((2629, 2644), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (2639, 2644), True, 'import numpy as np\n'), ((3062, 3140), 'numpy.savetxt', 'np.savetxt', (['"""inputs.csv"""', 'inputs'], {'delimiter': '""","""', 'comments': '""""""', 'header': 'in_header'}), "('inputs.csv', inputs, delimiter=',', comments='', header=in_header)\n", (3072, 3140), True, 'import numpy as np\n'), ((3145, 3231), 'numpy.savetxt', 'np.savetxt', (['"""outputs.csv"""', 'outputs'], {'delimiter': '""","""', 'comments': '""""""', 'header': 'out_header'}), "('outputs.csv', outputs, delimiter=',', comments='', header=\n out_header)\n", (3155, 3231), True, 'import numpy as np\n'), ((677, 733), 'sirepo.simulation_db.read_json', 'simulation_db.read_json', (['template_common.INPUT_BASE_NAME'], {}), '(template_common.INPUT_BASE_NAME)\n', (700, 733), False, 'from sirepo import simulation_db\n'), ((1951, 1969), 'numpy.asarray', 'np.asarray', (['inputs'], {}), '(inputs)\n', (1961, 1969), True, 'import numpy as np\n'), ((1971, 1990), 'numpy.asarray', 'np.asarray', (['outputs'], {}), '(outputs)\n', (1981, 1990), True, 'import numpy as np\n'), ((2237, 2245), 'pykern.pkcollections.PKDict', 'PKDict', ([], {}), '()\n', (2243, 2245), False, 'from pykern.pkcollections import PKDict\n'), ((2547, 2571), 'numpy.asarray', 'np.asarray', (['values.PHASE'], {}), '(values.PHASE)\n', (2557, 2571), True, 'import numpy as np\n'), ((2581, 2604), 'numpy.asarray', 'np.asarray', (['values.VOLT'], {}), '(values.VOLT)\n', (2591, 2604), True, 'import numpy as np\n')]
|
# Main.py - Pixels Fighting #
# Author: <NAME> #
# ---------------------#
# Imports #
import pygame
from pygame.locals import *
from helpers import *
import random
import numpy as np
import time
# ---------------------#
# Initialize number of rows/columns
INT = 100
INT_SQ = INT*INT
# Initialize size of arrays
SIZE = 5
# Initialize Pygame
pygame.init()
# Initialize screen, status and clock
screen = pygame.display.set_mode((80+INT*SIZE,160+INT*SIZE))
running = True
clock = pygame.time.Clock()
# Defining Colors
COLOR_ALIVE = (random.randint(1,256),random.randint(0,256),random.randint(0,256))
COLOR_DEAD = (random.randint(1,256),random.randint(0,256),random.randint(0,256))
# Initialize Status Array - Making an array with half dead and half alive
zero = np.zeros((INT,INT//2), dtype=int)
one = np.ones((INT,INT//2), dtype=int)
current_status_array = np.concatenate((zero,one), axis=1)
# ---------------------#
# For Title Text to be displayed
# Defining font style and size
font = pygame.font.Font('freesansbold.ttf', 32)
text_title = font.render('Pixels Fighting', True, (255,255,255), (0,0,0))
textRectTitle = text_title.get_rect()
textRectTitle.center = (40+INT*SIZE/2, 40)
# ---------------------#
# Defining Box Class
class Box():
# Status can be dead (0) or alive(1);
def __init__(self, x, y, alive):
self.x = x
self.y = y
self.alive = alive
self.surf = pygame.Surface((SIZE,SIZE))
self.rect = (40 + SIZE*self.y, 100 + SIZE*self.x)
# Function to fill surface with color
def assign_color(self):
if self.alive == 0:
self.surf.fill(COLOR_DEAD)
else:
self.surf.fill(COLOR_ALIVE)
screen.blit(self.surf,self.rect)
# Function to update surface; as per current_status_array
def update(self):
self.alive = current_status_array[self.x][self.y]
self.assign_color()
# ---------------------#
# Creating 'INT_SQ' instances of box class, and appending them to a list for accessibility
boxes = []
for i in range(INT_SQ):
# x,y will be filled sequentially
x = i//INT
y = i%INT
# Alive status depening on current array
boxes.append(Box(x,y,current_status_array[x][y]))
# ---------------------#
# For Ratio Text to be displayed and updated continuously
# Defining font style and size
font = pygame.font.Font('freesansbold.ttf', 25)
def UpdateRatioText():
# For the alive ones
text_alive = font.render('Alive: {:.4f}'.format(IsAliveWinning(current_status_array, INT_SQ)), True, COLOR_ALIVE, (0,0,0))
textRectAlive = text_alive.get_rect()
textRectAlive.x = 80 + INT*SIZE - 210
textRectAlive.y = 115 + INT*SIZE
# For the dead ones
text_dead = font.render('Dead: {:.4f}'.format(1-IsAliveWinning(current_status_array, INT_SQ)), True, COLOR_DEAD, (0,0,0))
textRectDead = text_dead.get_rect()
textRectDead.x = 60
textRectDead.y = 115 + INT*SIZE
# Updating the font on the rect
screen.blit(text_alive, textRectAlive)
screen.blit(text_dead, textRectDead)
# ---------------------#
# Main python loop
while running:
# Main python quit function
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# For updating array and boxes status
current_status_array = UpdateArray(current_status_array, INT)
for box in boxes:
box.update()
# Update Ratio text
UpdateRatioText()
# Display Title
screen.blit(text_title, textRectTitle)
# Refresh screen
pygame.display.update()
# A more optimal version of the clock.tick() function, determines fps of display basically
time.sleep(0.1)
# ---------------------#
|
[
"random.randint",
"pygame.Surface",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.zeros",
"numpy.ones",
"pygame.init",
"time.sleep",
"pygame.display.update",
"pygame.font.Font",
"pygame.time.Clock",
"numpy.concatenate"
] |
[((346, 359), 'pygame.init', 'pygame.init', ([], {}), '()\n', (357, 359), False, 'import pygame\n'), ((408, 468), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(80 + INT * SIZE, 160 + INT * SIZE)'], {}), '((80 + INT * SIZE, 160 + INT * SIZE))\n', (431, 468), False, 'import pygame\n'), ((483, 502), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (500, 502), False, 'import pygame\n'), ((768, 804), 'numpy.zeros', 'np.zeros', (['(INT, INT // 2)'], {'dtype': 'int'}), '((INT, INT // 2), dtype=int)\n', (776, 804), True, 'import numpy as np\n'), ((808, 843), 'numpy.ones', 'np.ones', (['(INT, INT // 2)'], {'dtype': 'int'}), '((INT, INT // 2), dtype=int)\n', (815, 843), True, 'import numpy as np\n'), ((864, 899), 'numpy.concatenate', 'np.concatenate', (['(zero, one)'], {'axis': '(1)'}), '((zero, one), axis=1)\n', (878, 899), True, 'import numpy as np\n'), ((998, 1038), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1014, 1038), False, 'import pygame\n'), ((2375, 2415), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(25)'], {}), "('freesansbold.ttf', 25)\n", (2391, 2415), False, 'import pygame\n'), ((538, 560), 'random.randint', 'random.randint', (['(1)', '(256)'], {}), '(1, 256)\n', (552, 560), False, 'import random\n'), ((560, 582), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (574, 582), False, 'import random\n'), ((582, 604), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (596, 604), False, 'import random\n'), ((619, 641), 'random.randint', 'random.randint', (['(1)', '(256)'], {}), '(1, 256)\n', (633, 641), False, 'import random\n'), ((641, 663), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (655, 663), False, 'import random\n'), ((663, 685), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (677, 685), False, 'import random\n'), ((3204, 3222), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3220, 3222), False, 'import pygame\n'), ((3579, 3602), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3600, 3602), False, 'import pygame\n'), ((3703, 3718), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3713, 3718), False, 'import time\n'), ((1427, 1455), 'pygame.Surface', 'pygame.Surface', (['(SIZE, SIZE)'], {}), '((SIZE, SIZE))\n', (1441, 1455), False, 'import pygame\n')]
|
import json
import numpy as np
import os
import pkg_resources
import re
from typing import Any, AnyStr, Dict, List, Optional, Tuple
def load_peripheral(pdata, templates=None):
"""Load a peripheral from a dict
This loads a peripheral with support for templates, as used in the board
definition file format
Args:
pdata: A dict containing the peripheral definition
templates: A dict mapping types to template definitions
"""
if not 'type' in pdata:
raise ValueError("Peripheral definition requires a type field")
template = None
if templates is not None and pdata['type'] in templates:
template = templates[pdata['type']]
periph = pdata
# Override electrodes with fields from template
def map_electrode(e):
eid = e['id']
if template is None:
return e
e_template = next((x for x in template['electrodes'] if x['id'] == eid), None)
if e_template is None:
return e
# Merge dicts, with values in e taking priority in case of duplicate keys
return {**e_template, **e}
periph['electrodes'] = [map_electrode(e) for e in periph['electrodes']]
return periph
class Fiducial(object):
"""Represents a fiducial location
"""
def __init__(self, corners: List[List[int]], label: str=""):
self.corners = corners
self.label = label
@staticmethod
def from_dict(data):
return Fiducial(**data)
def to_dict(self):
return {
'corners': self.corners,
'label': self.label
}
class ControlPoint(object):
"""Represents a control point in an image
A control point is a pair of corresponding points -- one in image coordinates
and the other in grid coordinates -- used to calibrate the position of
the electrode grid relative to fiducials.
"""
def __init__(self, grid_coord: Tuple[float, float], image_coord: Tuple[float, float]):
self.grid = grid_coord
self.image = image_coord
def from_dict(data):
if not 'grid' in data:
raise ValueError(f'A control point must have a `grid` and `image` attribute: {data}')
if not 'image' in data:
raise ValueError(f'A control point must have a `grid` and `image` attribute: {data}')
return ControlPoint(data['grid'], data['image'])
class Registration(object):
"""A registration is a collection of fiducials and control points which
together define relationship between the electrode locations and fiducials
"""
def __init__(self, data: dict):
if not 'fiducials' in data:
raise ValueError(f'A Registration requires a fiducials attribute, not found in: {data}')
if not 'control_points' in data:
raise ValueError(f'A Registration requires a control points attribute, not found in: {data}')
if not isinstance(data['fiducials'], list):
raise ValueError(f'A Registration `fiducial` attribute must be a list: {data}')
if not isinstance(data['control_points'], list):
raise ValueError(f'a Registration `control_points` attribute must be a list: {data}')
self.fiducials = [Fiducial.from_dict(f) for f in data['fiducials']]
self.control_points = [ControlPoint.from_dict(cp) for cp in data['control_points']]
class Layout(object):
"""Represents the 'layout' property of a baord definition
A layout defines the placement and pin mapping for the electrodes on the
board.
"""
def __init__(self, layout_def: Dict[str, Any]):
self.peripherals = None
self.grids = []
def intify_pins(grid_pins):
result = []
for row in grid_pins:
new_row: List[Optional[int]] = []
for pin in row:
if pin == -1 or pin is None:
new_row.append(None)
else:
new_row.append(int(pin))
result.append(new_row)
return result
# Old format files use 'grid' to define a single grid
# New format uses an array of objects, under the key 'grids'
if 'grid' in layout_def:
self.grids.append({
'origin': [0.0, 0.0],
'pitch': 1.0,
'pins': intify_pins(layout_def['grid'])
})
elif 'grids' in layout_def:
for g in layout_def['grids']:
self.grids.append({
'origin': g['origin'],
'pitch': g['pitch'],
'pins': intify_pins(g['pins']),
})
if 'peripherals' in layout_def:
self.peripherals = [load_peripheral(p, layout_def.get('peripheral_templates', None)) for p in layout_def['peripherals']]
def grid_location_to_pin(self, x: int, y: int, grid_number:int =0):
"""Return the pin number at given grid location, or None if no pin is
defined there.
"""
if grid_number < len(self.grids):
grid = self.grids[grid_number]['pins']
else:
grid = [[]] # Empty grid
if y < 0 or y >= len(grid):
return None
row = grid[y]
if x < 0 or x >= len(row):
return None
return grid[y][x]
def pin_to_grid_location(self, pin: int) -> Optional[Tuple[Tuple[int, int], int]]:
"""Return the grid location of a given pin number
"""
for g, grid in enumerate(self.grids):
for y, row in enumerate(grid['pins']):
for x, p in enumerate(row):
if p == pin:
return ((x, y), g)
return None
def pin_polygon(self, pin: int) -> Optional[List[Tuple[int, int]]]:
"""Get the polygon defining a pin in board coordinates
"""
# Try to find the pin in a grid
grid_info = self.pin_to_grid_location(pin)
if grid_info is not None:
loc, grid_idx = grid_info
square = np.array([[0., 0.], [0., 1.], [1., 1.], [1., 0.]])
grid = self.grids[grid_idx]
polygon = (square + loc) * grid['pitch'] + grid['origin']
return polygon.tolist()
# Try to find the pin in a peripheral
if self.peripherals is None:
return None
for periph in self.peripherals:
for el in periph['electrodes']:
if el['pin'] == pin:
polygon = np.array(el['polygon'])
rotation = np.deg2rad(periph.get('rotation', 0.0))
R = np.array([[np.cos(rotation), -np.sin(rotation)], [np.sin(rotation), np.cos(rotation)]])
polygon = np.dot(R, polygon.T).T
return (polygon + periph['origin']).tolist()
return None
def as_dict(self) -> dict:
"""Return a serializable dict version of the board definition
"""
return {
"grids": self.grids,
"peripherals": self.peripherals
}
class Board(object):
"""Represents the top-level object in an electrode board definition file
"""
def __init__(self, board_def: Dict[str, Any]):
self.registration: Optional[Registration] = None
if not 'layout' in board_def:
raise RuntimeError("Board definition file must contain a 'layout' object")
self.layout = Layout(board_def['layout'])
self.oversized_electrodes = board_def.get('oversized_electrodes', [])
if 'registration' in board_def:
self.registration = Registration(board_def['registration'])
@staticmethod
def load_from_file(filepath):
"""Create a Board from a board definition file
"""
with open(filepath, 'r') as f:
data = json.loads(f.read())
return Board(data)
@staticmethod
def load_from_string(data: AnyStr) -> 'Board':
"""Create a board from a JSON string in memory
"""
return Board(json.loads(data))
def as_dict(self) -> dict:
"""Return a serializable dict representation of the board
"""
return {
'layout': self.layout.as_dict(),
'oversized_electrodes': self.oversized_electrodes,
}
def list_boards():
"""Find all available board definitions.
Uses same search rules as load_board; see :func:`load_board`.
Returns:
A list of board names, which can be passed to `load_board`
"""
config_dir = os.path.expanduser("~/.config/purpledrop/boards")
package_files = pkg_resources.resource_listdir('purpledrop', 'boards')
if os.path.isdir(config_dir):
config_files = os.listdir(config_dir)
else:
config_files = []
board_names = []
def add_files(files):
for f in files:
print(f"Checking {f}")
match = re.match(r'(.+).json', os.path.basename(f))
if match:
board_names.append(match.group(1))
# Config files take priority, if there are any duplicates
add_files(package_files)
add_files(config_files)
return board_names
def load_board(name) -> Optional[Board]:
"""Load a board definition by name or path
Attempt to load a board definition from the name, using the following
priorities (the first to succeed is returned):
1. Load as a full path
2. Load from ~/.config/purpledrop/boards/{name}.json
3. Load from package resources (`purpledrop/boards` in repo)
"""
if os.path.isfile(name):
return Board.load_from_file(name)
home_path = os.path.expanduser(f"~/.config/purpledrop/boards/{name}.json")
if os.path.isfile(home_path):
return Board.load_from_file(home_path)
try:
resource_data = pkg_resources.resource_string('purpledrop', f"boards/{name}.json")
return Board.load_from_string(resource_data)
except FileNotFoundError:
pass
return None
|
[
"pkg_resources.resource_listdir",
"json.loads",
"os.path.basename",
"os.path.isdir",
"os.path.isfile",
"numpy.sin",
"numpy.array",
"pkg_resources.resource_string",
"numpy.cos",
"numpy.dot",
"os.path.expanduser",
"os.listdir"
] |
[((8557, 8606), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config/purpledrop/boards"""'], {}), "('~/.config/purpledrop/boards')\n", (8575, 8606), False, 'import os\n'), ((8627, 8681), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['"""purpledrop"""', '"""boards"""'], {}), "('purpledrop', 'boards')\n", (8657, 8681), False, 'import pkg_resources\n'), ((8689, 8714), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (8702, 8714), False, 'import os\n'), ((9567, 9587), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (9581, 9587), False, 'import os\n'), ((9648, 9710), 'os.path.expanduser', 'os.path.expanduser', (['f"""~/.config/purpledrop/boards/{name}.json"""'], {}), "(f'~/.config/purpledrop/boards/{name}.json')\n", (9666, 9710), False, 'import os\n'), ((9718, 9743), 'os.path.isfile', 'os.path.isfile', (['home_path'], {}), '(home_path)\n', (9732, 9743), False, 'import os\n'), ((8739, 8761), 'os.listdir', 'os.listdir', (['config_dir'], {}), '(config_dir)\n', (8749, 8761), False, 'import os\n'), ((9826, 9892), 'pkg_resources.resource_string', 'pkg_resources.resource_string', (['"""purpledrop"""', 'f"""boards/{name}.json"""'], {}), "('purpledrop', f'boards/{name}.json')\n", (9855, 9892), False, 'import pkg_resources\n'), ((6070, 6128), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]])\n', (6078, 6128), True, 'import numpy as np\n'), ((8056, 8072), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (8066, 8072), False, 'import json\n'), ((8948, 8967), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (8964, 8967), False, 'import os\n'), ((6525, 6548), 'numpy.array', 'np.array', (["el['polygon']"], {}), "(el['polygon'])\n", (6533, 6548), True, 'import numpy as np\n'), ((6762, 6782), 'numpy.dot', 'np.dot', (['R', 'polygon.T'], {}), '(R, polygon.T)\n', (6768, 6782), True, 'import numpy as np\n'), ((6655, 6671), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (6661, 6671), True, 'import numpy as np\n'), ((6694, 6710), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (6700, 6710), True, 'import numpy as np\n'), ((6712, 6728), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (6718, 6728), True, 'import numpy as np\n'), ((6674, 6690), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (6680, 6690), True, 'import numpy as np\n')]
|
from multiprocessing import Queue, Process
from threading import Thread
import numpy as np
import utils
from agent import PPOAgent
from policy import get_policy
from worker import Worker
import environments
class SimpleMaster:
def __init__(self, env_producer):
self.env_name = env_producer.get_env_name()
self.config = environments.get_config(self.env_name)
self.worker_size = self.config["worker_num"]
self.env_producer = env_producer
self.queues = []
self.w_in_queue = Queue()
self.init_workers()
self.session = None
self.trainable_vars = None
self.accum_vars = None
self.p_opt_vars = None
self.v_opt_vars = None
self.assign_op = None
self.agent = None
self.saver = None
self.summary_writer = None
self.beta = 1
self.lr_multiplier = 1.0
self.iter_count = 1
self.variables_file_path = "models/%s/variables.txt" % self.env_name
self.model_path = "models/%s/model" % self.env_name
self.initialized = False
self.cur_step = -1
self.start()
def init_workers(self):
for i in range(self.worker_size):
q = Queue()
self.queues.append(q)
t = Process(target=make_worker, args=(self.env_producer, i, q, self.w_in_queue))
t.start()
def start(self):
import tensorflow as tf
env_opts = environments.get_env_options(self.env_name, self.env_producer.get_use_gpu())
self.summary_writer = tf.summary.FileWriter("logs/%s" % self.env_name)
self.session = utils.create_session(env_opts, True)
with tf.variable_scope("master-0"):
pol = get_policy(env_opts, self.session)
self.agent = PPOAgent(pol, self.session, "master-0", env_opts)
self.trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "master-0")
self.accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in
self.trainable_vars]
p_vars = self.agent.p_opt.variables()
v_vars = self.agent.v_opt.variables()
self.p_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in p_vars]
self.v_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in v_vars]
p_assign_ops = [p_vars[i].assign(self.p_opt_vars[i]) for i in range(len(p_vars))]
v_assign_ops = [v_vars[i].assign(self.v_opt_vars[i]) for i in range(len(v_vars))]
assign_ops = [self.trainable_vars[i].assign(self.accum_vars[i]) for i in
range(len(self.trainable_vars))]
self.assign_op = tf.group(assign_ops + p_assign_ops + v_assign_ops)
self.restore_variables()
self.saver = tf.train.Saver(max_to_keep=1)
self.session.run(tf.global_variables_initializer())
try:
self.saver = tf.train.import_meta_graph(
tf.train.latest_checkpoint("models/%s/" % env_opts["env_name"]) + ".meta")
self.saver.restore(self.session,
tf.train.latest_checkpoint("models/%s/" % env_opts["env_name"]))
except:
print("failed to restore model")
while True:
if self.iter_count % 10 == 0:
print("Saving model...")
self.save_variables()
self.saver.save(self.session, self.model_path, self.iter_count)
print("Model saved")
self.broadcast_weights()
self.merge_weights()
self.iter_count += 1
def restore_variables(self):
try:
lines = open(self.variables_file_path).readlines()
result = {}
for l in lines:
a, b = l.split("=")
b = b.strip()
result[a] = b
self.iter_count = int(result["global_step"]) + 1
self.beta = float(result["beta"])
self.lr_multiplier = float(result["lr_multiplier"])
except:
print("failed to restore variables")
def save_variables(self):
f = open(self.variables_file_path, "w")
lines = []
lines.append("global_step=%s\n" % self.iter_count)
lines.append("beta=%s\n" % self.beta)
lines.append("lr_multiplier=%s\n" % self.lr_multiplier)
f.writelines(lines)
f.close()
def broadcast_weights(self):
weights, p_opt_weights, v_opt_weights = self.session.run([self.trainable_vars,
self.agent.p_opt.variables(),
self.agent.v_opt.variables()])
arr = [self.beta, self.lr_multiplier, p_opt_weights, v_opt_weights, weights]
for q in self.queues:
q.put(arr)
def merge_weights(self):
results = []
for i in range(self.worker_size):
results.append(self.w_in_queue.get())
self.beta = np.mean([x[0] for x in results])
self.lr_multiplier = np.mean([x[1] for x in results])
p_opt_weights = self.make_means([x[2] for x in results])
v_opt_weights = self.make_means([x[3] for x in results])
weights = self.make_means([x[4] for x in results])
first_worker = [x for x in results if x[5]["idx"] == 0][0]
self.record_stats(first_worker[5])
fd = {}
for i, t in enumerate(self.accum_vars):
fd[t] = weights[i]
for i, t in enumerate(self.p_opt_vars):
fd[t] = p_opt_weights[i]
for i, t in enumerate(self.v_opt_vars):
fd[t] = v_opt_weights[i]
self.session.run(self.assign_op, feed_dict=fd)
def make_means(self, weights):
result = []
for i in range(len(weights[0])):
acc = []
for j in range(len(weights)):
acc.append(weights[j][i])
acc = np.mean(acc, axis=0)
result.append(acc)
return result
def record_stats(self, stats):
if self.cur_step == stats["step"]:
return
self.cur_step = stats["step"]
self.record_losses(stats["kl"], stats["entropy"], stats["hinge"], stats["src_policy_loss"],
stats["vloss"], stats["ploss"], stats["step"])
cum_rew = 0
for s in stats["stats"]:
self.log_summary(s["reward"], s["step"], s["a_probs"], s["picked_a"], s["a_dim"], s["discrete"])
cum_rew += s["reward"]
cum_rew /= max(1, len(stats["stats"]))
print("Average reward: %s" % cum_rew)
def record_losses(self, cur_kl, entropy, hinge, src_policy_loss, vloss, ploss, step):
import tensorflow as tf
summary = tf.Summary()
summary.value.add(tag='Losses/value_loss', simple_value=vloss)
summary.value.add(tag='Losses/policy_loss', simple_value=ploss)
summary.value.add(tag='Losses/kl_divergence', simple_value=cur_kl)
summary.value.add(tag='Losses/entropy', simple_value=entropy)
summary.value.add(tag='Losses/src_policy_loss', simple_value=src_policy_loss)
summary.value.add(tag='Losses/hinge', simple_value=hinge)
summary.value.add(tag='Vars/beta', simple_value=self.beta)
summary.value.add(tag='Vars/lr_multiplier', simple_value=self.lr_multiplier)
self.summary_writer.add_summary(summary, step)
self.summary_writer.flush()
def log_summary(self, reward, step, a_probs, picked_a, a_dim, discrete):
import tensorflow as tf
summary = tf.Summary()
summary.value.add(tag='Reward/per_episode', simple_value=float(reward))
if not discrete:
for i in range(a_dim):
prefix = "Action" + str(i)
summary.value.add(tag=prefix + '/mean', simple_value=float(a_probs[i]))
summary.value.add(tag=prefix + "/std", simple_value=float(a_probs[i + a_dim]))
summary.value.add(tag=prefix + '/picked', simple_value=float(picked_a[i]))
else:
for i in range(a_dim):
prefix = "Action" + str(i)
summary.value.add(tag=prefix + '/prob', simple_value=float(a_probs[i]))
summary.value.add(tag='Action/picked', simple_value=float(picked_a))
self.summary_writer.add_summary(summary, step)
self.summary_writer.flush()
def make_worker(env_producer, i, q, w_in_queue):
return Worker(env_producer, i, q, w_in_queue)
|
[
"policy.get_policy",
"environments.get_config",
"utils.create_session",
"tensorflow.train.Saver",
"tensorflow.Summary",
"tensorflow.get_collection",
"tensorflow.global_variables_initializer",
"worker.Worker",
"tensorflow.variable_scope",
"tensorflow.summary.FileWriter",
"numpy.mean",
"agent.PPOAgent",
"multiprocessing.Queue",
"tensorflow.group",
"tensorflow.train.latest_checkpoint",
"multiprocessing.Process"
] |
[((8593, 8631), 'worker.Worker', 'Worker', (['env_producer', 'i', 'q', 'w_in_queue'], {}), '(env_producer, i, q, w_in_queue)\n', (8599, 8631), False, 'from worker import Worker\n'), ((343, 381), 'environments.get_config', 'environments.get_config', (['self.env_name'], {}), '(self.env_name)\n', (366, 381), False, 'import environments\n'), ((527, 534), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (532, 534), False, 'from multiprocessing import Queue, Process\n'), ((1561, 1609), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["('logs/%s' % self.env_name)"], {}), "('logs/%s' % self.env_name)\n", (1582, 1609), True, 'import tensorflow as tf\n'), ((1633, 1669), 'utils.create_session', 'utils.create_session', (['env_opts', '(True)'], {}), '(env_opts, True)\n', (1653, 1669), False, 'import utils\n'), ((2903, 2932), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (2917, 2932), True, 'import tensorflow as tf\n'), ((5136, 5168), 'numpy.mean', 'np.mean', (['[x[0] for x in results]'], {}), '([x[0] for x in results])\n', (5143, 5168), True, 'import numpy as np\n'), ((5198, 5230), 'numpy.mean', 'np.mean', (['[x[1] for x in results]'], {}), '([x[1] for x in results])\n', (5205, 5230), True, 'import numpy as np\n'), ((6885, 6897), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (6895, 6897), True, 'import tensorflow as tf\n'), ((7709, 7721), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (7719, 7721), True, 'import tensorflow as tf\n'), ((1224, 1231), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1229, 1231), False, 'from multiprocessing import Queue, Process\n'), ((1282, 1358), 'multiprocessing.Process', 'Process', ([], {'target': 'make_worker', 'args': '(self.env_producer, i, q, self.w_in_queue)'}), '(target=make_worker, args=(self.env_producer, i, q, self.w_in_queue))\n', (1289, 1358), False, 'from multiprocessing import Queue, Process\n'), ((1683, 1712), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""master-0"""'], {}), "('master-0')\n", (1700, 1712), True, 'import tensorflow as tf\n'), ((1732, 1766), 'policy.get_policy', 'get_policy', (['env_opts', 'self.session'], {}), '(env_opts, self.session)\n', (1742, 1766), False, 'from policy import get_policy\n'), ((1792, 1841), 'agent.PPOAgent', 'PPOAgent', (['pol', 'self.session', '"""master-0"""', 'env_opts'], {}), "(pol, self.session, 'master-0', env_opts)\n", (1800, 1841), False, 'from agent import PPOAgent\n'), ((1876, 1939), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""master-0"""'], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, 'master-0')\n", (1893, 1939), True, 'import tensorflow as tf\n'), ((2797, 2847), 'tensorflow.group', 'tf.group', (['(assign_ops + p_assign_ops + v_assign_ops)'], {}), '(assign_ops + p_assign_ops + v_assign_ops)\n', (2805, 2847), True, 'import tensorflow as tf\n'), ((2958, 2991), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2989, 2991), True, 'import tensorflow as tf\n'), ((6070, 6090), 'numpy.mean', 'np.mean', (['acc'], {'axis': '(0)'}), '(acc, axis=0)\n', (6077, 6090), True, 'import numpy as np\n'), ((3226, 3289), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["('models/%s/' % env_opts['env_name'])"], {}), "('models/%s/' % env_opts['env_name'])\n", (3252, 3289), True, 'import tensorflow as tf\n'), ((3075, 3138), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["('models/%s/' % env_opts['env_name'])"], {}), "('models/%s/' % env_opts['env_name'])\n", (3101, 3138), True, 'import tensorflow as tf\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import rospkg
import threading
import yaml
from copy import deepcopy
import message_filters
import numpy as np
import pyrobot.utils.util as prutil
import rospy
from pyrobot.core import Camera
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
sys.path.append(ros_path)
from cv_bridge import CvBridge, CvBridgeError
class Kinect2Camera(Camera):
"""
This is camera class that interfaces with the KinectV2 camera
"""
def __init__(self, configs):
"""
Constructor of the KinectV2Camera class.
:param configs: Camera specific configuration object
:type configs: YACS CfgNode
"""
super(Kinect2Camera, self).__init__(configs=configs)
self.cv_bridge = CvBridge()
self.camera_info_lock = threading.RLock()
self.camera_img_lock = threading.RLock()
self.rgb_img = None
self.depth_img = None
self.camera_info = None
self.camera_P = None
rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,
CameraInfo,
self._camera_info_callback)
rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM
self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)
depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM
self.depth_sub = message_filters.Subscriber(depth_topic, Image)
img_subs = [self.rgb_sub, self.depth_sub]
self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,
queue_size=10,
slop=0.2)
self.sync.registerCallback(self._sync_callback)
self.DepthMapFactor = float(self.configs.CAMERA.DEPTH_MAP_FACTOR)
self.intrinsic_mat = None
def _sync_callback(self, rgb, depth):
self.camera_img_lock.acquire()
try:
self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, "bgr8")
self.rgb_img = self.rgb_img[:, :, ::-1]
self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, "passthrough")
except CvBridgeError as e:
rospy.logerr(e)
self.camera_img_lock.release()
def _camera_info_callback(self, msg):
self.camera_info_lock.acquire()
self.camera_info = msg
self.camera_P = np.array(msg.P).reshape((3, 4))
self.camera_info_lock.release()
def get_rgb(self):
'''
This function returns the RGB image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
self.camera_img_lock.release()
return rgb
def get_depth(self):
'''
This function returns the depth image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return depth
def get_rgb_depth(self):
'''
This function returns both the RGB and depth
images perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return rgb, depth
def get_intrinsics(self):
"""
This function returns the camera intrinsics.
:rtype: np.ndarray
"""
if self.camera_P is None:
return self.camera_P
self.camera_info_lock.acquire()
P = deepcopy(self.camera_P)
self.camera_info_lock.release()
return P[:3, :3]
def get_current_pcd(self):
"""
Return the point cloud at current time step (one frame only)
:returns: tuple (pts, colors)
pts: point coordinates in camera frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
rgb_im, depth_im = self.get_rgb_depth()
depth = depth_im.reshape(-1) / self.DepthMapFactor
rgb = rgb_im.reshape(-1, 3)
if self.intrinsic_mat is None:
self.intrinsic_mat = self.get_intrinsics()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
#TODO: image height --> rgb_im.shape[0] and width--> rgb_im.shape[1]
img_pixs = np.mgrid[0: rgb_im.shape[0]: 1,
0: rgb_im.shape[1]: 1]
img_pixs = img_pixs.reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
self.uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)
pts_in_cam = np.multiply(self.uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
pts = pts_in_cam[:3, :].T
return pts, rgb
def pix_to_3dpt(self, rs, cs, reduce = 'none', k=5):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param reduce: whether to consider the depth at nearby pixels
'none': no neighbour consideration
'mean': depth based on the mean of kernel sized k centered at [rs,cs]
'max': depth based on the max of kernel sized k centered at [rs,cs]
'min': depth based on the min of kernel sized k centered at [rs,cs]
:param k: kernel size for reduce type['mean', 'max', 'min']
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type reduce: str
:tyep k: int
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
assert isinstance(rs,
int) or isinstance(rs,
list) or isinstance(rs,
np.ndarray)
assert isinstance(cs,
int) or isinstance(cs,
list) or isinstance(cs,
np.ndarray)
if isinstance(rs, int):
rs = [rs]
if isinstance(cs, int):
cs = [cs]
if isinstance(rs, np.ndarray):
rs = rs.flatten()
if isinstance(cs, np.ndarray):
cs = cs.flatten()
rgb_im, depth_im = self.get_rgb_depth()
R,C,_ = rgb_im.shape
if reduce == 'none':
depth_im = depth_im[rs, cs]
elif reduce == 'mean':
depth_im = np.array([np.mean(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
elif reduce == 'max':
depth_im = np.array([np.max(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
elif reduce == 'min':
depth_im = np.array([np.min(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
else:
raise ValueError('Invalid reduce name provided, only the following'
' are currently available: [{}, {}, {}, {}]'.format('none','mean', 'max', 'min'))
#depth_im = depth_im[rs, cs]
depth = depth_im.reshape(-1) / self.DepthMapFactor
img_pixs = np.stack((rs, cs)).reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
if self.intrinsic_mat is None:
self.intrinsic_mat = self.get_intrinsics()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
pts = pts_in_cam[:3, :].T
colors = rgb_im[rs, cs].reshape(-1, 3)
return pts, colors
|
[
"sys.path.append",
"numpy.stack",
"cv_bridge.CvBridge",
"sys.path.remove",
"rospy.Subscriber",
"copy.deepcopy",
"numpy.multiply",
"rospy.logerr",
"threading.RLock",
"message_filters.ApproximateTimeSynchronizer",
"numpy.ones",
"numpy.linalg.inv",
"numpy.array",
"message_filters.Subscriber",
"numpy.dot"
] |
[((627, 652), 'sys.path.append', 'sys.path.append', (['ros_path'], {}), '(ros_path)\n', (642, 652), False, 'import sys\n'), ((586, 611), 'sys.path.remove', 'sys.path.remove', (['ros_path'], {}), '(ros_path)\n', (601, 611), False, 'import sys\n'), ((1101, 1111), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1109, 1111), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1144, 1161), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1159, 1161), False, 'import threading\n'), ((1193, 1210), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1208, 1210), False, 'import threading\n'), ((1338, 1447), 'rospy.Subscriber', 'rospy.Subscriber', (['self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM', 'CameraInfo', 'self._camera_info_callback'], {}), '(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,\n CameraInfo, self._camera_info_callback)\n', (1354, 1447), False, 'import rospy\n'), ((1585, 1629), 'message_filters.Subscriber', 'message_filters.Subscriber', (['rgb_topic', 'Image'], {}), '(rgb_topic, Image)\n', (1611, 1629), False, 'import message_filters\n'), ((1726, 1772), 'message_filters.Subscriber', 'message_filters.Subscriber', (['depth_topic', 'Image'], {}), '(depth_topic, Image)\n', (1752, 1772), False, 'import message_filters\n'), ((1843, 1921), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['img_subs'], {'queue_size': '(10)', 'slop': '(0.2)'}), '(img_subs, queue_size=10, slop=0.2)\n', (1886, 1921), False, 'import message_filters\n'), ((3035, 3057), 'copy.deepcopy', 'deepcopy', (['self.rgb_img'], {}), '(self.rgb_img)\n', (3043, 3057), False, 'from copy import deepcopy\n'), ((3327, 3351), 'copy.deepcopy', 'deepcopy', (['self.depth_img'], {}), '(self.depth_img)\n', (3335, 3351), False, 'from copy import deepcopy\n'), ((3647, 3669), 'copy.deepcopy', 'deepcopy', (['self.rgb_img'], {}), '(self.rgb_img)\n', (3655, 3669), False, 'from copy import deepcopy\n'), ((3686, 3710), 'copy.deepcopy', 'deepcopy', (['self.depth_img'], {}), '(self.depth_img)\n', (3694, 3710), False, 'from copy import deepcopy\n'), ((4030, 4053), 'copy.deepcopy', 'deepcopy', (['self.camera_P'], {}), '(self.camera_P)\n', (4038, 4053), False, 'from copy import deepcopy\n'), ((5306, 5344), 'numpy.multiply', 'np.multiply', (['self.uv_one_in_cam', 'depth'], {}), '(self.uv_one_in_cam, depth)\n', (5317, 5344), True, 'import numpy as np\n'), ((9059, 9097), 'numpy.dot', 'np.dot', (['self.intrinsic_mat_inv', 'uv_one'], {}), '(self.intrinsic_mat_inv, uv_one)\n', (9065, 9097), True, 'import numpy as np\n'), ((9119, 9152), 'numpy.multiply', 'np.multiply', (['uv_one_in_cam', 'depth'], {}), '(uv_one_in_cam, depth)\n', (9130, 9152), True, 'import numpy as np\n'), ((4759, 4792), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsic_mat'], {}), '(self.intrinsic_mat)\n', (4772, 4792), True, 'import numpy as np\n'), ((5232, 5275), 'numpy.dot', 'np.dot', (['self.intrinsic_mat_inv', 'self.uv_one'], {}), '(self.intrinsic_mat_inv, self.uv_one)\n', (5238, 5275), True, 'import numpy as np\n'), ((9001, 9034), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsic_mat'], {}), '(self.intrinsic_mat)\n', (9014, 9034), True, 'import numpy as np\n'), ((2565, 2580), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (2577, 2580), False, 'import rospy\n'), ((2758, 2773), 'numpy.array', 'np.array', (['msg.P'], {}), '(msg.P)\n', (2766, 2773), True, 'import numpy as np\n'), ((5431, 5464), 'numpy.ones', 'np.ones', (['(1, pts_in_cam.shape[1])'], {}), '((1, pts_in_cam.shape[1]))\n', (5438, 5464), True, 'import numpy as np\n'), ((8676, 8694), 'numpy.stack', 'np.stack', (['(rs, cs)'], {}), '((rs, cs))\n', (8684, 8694), True, 'import numpy as np\n'), ((8836, 8867), 'numpy.ones', 'np.ones', (['(1, img_pixs.shape[1])'], {}), '((1, img_pixs.shape[1]))\n', (8843, 8867), True, 'import numpy as np\n'), ((9239, 9272), 'numpy.ones', 'np.ones', (['(1, pts_in_cam.shape[1])'], {}), '((1, pts_in_cam.shape[1]))\n', (9246, 9272), True, 'import numpy as np\n'), ((5165, 5196), 'numpy.ones', 'np.ones', (['(1, img_pixs.shape[1])'], {}), '((1, img_pixs.shape[1]))\n', (5172, 5196), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Union
from ..storage import History
from .util import to_lists_or_default
def plot_sample_numbers(
histories: Union[List, History],
labels: Union[List, str] = None,
rotation: int = 0,
title: str = "Total required samples",
size: tuple = None):
"""
Plot required numbers of samples over all iterations.
Parameters
----------
histories: Union[List, History]
The histories to plot from. History ids must be set correctly.
labels: Union[List ,str], optional
Labels corresponding to the histories. If None are provided,
indices are used as labels.
rotation: int, optional (default = 0)
Rotation to apply to the plot's x tick labels. For longer labels,
a tilting of 45 or even 90 can be preferable.
title: str, optional (default = "Total required samples")
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories, labels = to_lists_or_default(histories, labels)
# create figure
fig, ax = plt.subplots()
n_run = len(histories)
# extract sample numbers
samples = []
for history in histories:
# note: the first entry corresponds to the calibration and should
# be included here to be fair against methods not requiring
# calibration
samples.append(np.array(history.get_all_populations()['samples']))
# create matrix
n_pop = max(len(sample) for sample in samples)
matrix = np.zeros((n_pop, n_run))
for i_sample, sample in enumerate(samples):
matrix[:len(sample), i_sample] = sample
# plot bars
for i_pop in range(n_pop):
ax.bar(x=np.arange(n_run),
height=matrix[i_pop, :],
bottom=np.sum(matrix[:i_pop, :], axis=0))
# add labels
ax.set_xticks(np.arange(n_run))
ax.set_xticklabels(labels, rotation=rotation)
ax.set_title(title)
ax.set_ylabel("Samples")
ax.set_xlabel("Run")
# set size
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
|
[
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((1238, 1252), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1250, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1682, 1706), 'numpy.zeros', 'np.zeros', (['(n_pop, n_run)'], {}), '((n_pop, n_run))\n', (1690, 1706), True, 'import numpy as np\n'), ((2019, 2035), 'numpy.arange', 'np.arange', (['n_run'], {}), '(n_run)\n', (2028, 2035), True, 'import numpy as np\n'), ((1868, 1884), 'numpy.arange', 'np.arange', (['n_run'], {}), '(n_run)\n', (1877, 1884), True, 'import numpy as np\n'), ((1948, 1981), 'numpy.sum', 'np.sum', (['matrix[:i_pop, :]'], {'axis': '(0)'}), '(matrix[:i_pop, :], axis=0)\n', (1954, 1981), True, 'import numpy as np\n')]
|
import random
import math
import numpy as np
import matplotlib.pyplot as plt
# Calculating Pi using Monte Carlo algorithm.
def montecarlo_pi(times:int):
inside = 0
total = times
for i in range(times):
x_i = random.random()
y_i = random.random()
delta = x_i ** 2 + y_i **2 - 1
if delta <= 0:
inside += 1
approx_pi = 4 * inside / total
print('\nRandom test: ' + str(times))
print('Approximation of pi is:{:.8f}'.format(approx_pi))
return approx_pi
if __name__ == '__main__':
numlist = [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000, 10000000, 30000000, 50000000, 75000000, 100000000]
x_list = list(np.log10(numlist))
pi_ = []
for times in numlist:
pi_.append(montecarlo_pi(times))
plt.figure()
plt.plot([min(x_list), max(x_list)], [math.pi, math.pi], color='red', label='true value')
plt.plot(x_list, pi_, 'b.-', label='approximation')
plt.legend()
plt.xlabel('log10(n)')
plt.ylabel('pi')
my_y_ticks = np.arange(3, 3.4, 0.02)
plt.yticks(my_y_ticks)
plt.ylim((min(pi_)-0.1, max(pi_)+0.1))
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"random.random",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel"
] |
[((816, 828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (826, 828), True, 'import matplotlib.pyplot as plt\n'), ((927, 978), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'pi_', '"""b.-"""'], {'label': '"""approximation"""'}), "(x_list, pi_, 'b.-', label='approximation')\n", (935, 978), True, 'import matplotlib.pyplot as plt\n'), ((984, 996), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (994, 996), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log10(n)"""'], {}), "('log10(n)')\n", (1011, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1028, 1044), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pi"""'], {}), "('pi')\n", (1038, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1086), 'numpy.arange', 'np.arange', (['(3)', '(3.4)', '(0.02)'], {}), '(3, 3.4, 0.02)\n', (1072, 1086), True, 'import numpy as np\n'), ((1091, 1113), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (1101, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1170, 1172), True, 'import matplotlib.pyplot as plt\n'), ((231, 246), 'random.random', 'random.random', ([], {}), '()\n', (244, 246), False, 'import random\n'), ((261, 276), 'random.random', 'random.random', ([], {}), '()\n', (274, 276), False, 'import random\n'), ((712, 729), 'numpy.log10', 'np.log10', (['numlist'], {}), '(numlist)\n', (720, 729), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# TODO: Remove this when https://github.com/parejkoj/astropy/tree/luptonRGB
# is in Astropy.
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
For details, see : http://adsabs.harvard.edu/abs/2004PASP..116..133L
The three images must be aligned and have the same pixel scale and size.
Example usage:
imageR = np.random.random((100,100))
imageG = np.random.random((100,100))
imageB = np.random.random((100,100))
image = lupton_rgb.makeRGB(imageR, imageG, imageB, fileName='randoms.png')
lupton_rgb.displayRGB(image)
"""
import numpy as np
try:
import scipy.misc
HAVE_SCIPY_MISC = True
except ImportError:
HAVE_SCIPY_MISC = False
# from lsst.afw.display.displayLib import replaceSaturatedPixels, getZScale
def compute_intensity(imageR, imageG=None, imageB=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
imageR : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if imageG and
imageB are None.
imageG : `~numpy.ndarray`
Intensity of image to be mapped to green; or None.
imageB : `~numpy.ndarray`
Intensity of image to be mapped to blue; or None.
"""
if imageG is None or imageB is None:
assert imageG is None and imageB is None, \
"Please specify either a single image or red, green, and blue images"
return imageR
intensity = (imageR + imageG + imageB)/3.0
# Repack into whatever type was passed to us
return np.array(intensity, dtype=imageR.dtype)
def zscale(image, nSamples=1000, contrast=0.25):
"""
TBD: replace with newly added astropy.zscale function.
This emulates ds9's zscale feature. Returns the suggested minimum and
maximum values to display.
Parameters
----------
image : `~numpy.ndarray`
The image to compute the scaling on.
nSamples : int
How many samples to take when building the histogram.
contrast : float
???
"""
stride = image.size/nSamples
samples = image.flatten()[::stride]
samples.sort()
chop_size = int(0.10*len(samples))
subset = samples[chop_size:-chop_size]
i_midpoint = int(len(subset)/2)
I_mid = subset[i_midpoint]
fit = np.polyfit(np.arange(len(subset)) - i_midpoint, subset, 1)
# fit = [ slope, intercept]
z1 = I_mid + fit[0]/contrast * (1-i_midpoint)/1.0
z2 = I_mid + fit[0]/contrast * (len(subset)-i_midpoint)/1.0
return z1, z2
class Mapping(object):
"""Baseclass to map red, blue, green intensities into uint8 values"""
def __init__(self, minimum=None, image=None):
"""
Create a mapping
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : `~numpy.ndarray`
The image to be used to calculate the mapping.
If provided, it is also used as the default for makeRgbImage().
"""
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except:
minimum = 3*[minimum]
assert len(minimum) == 3, "Please provide 1 or 3 values for minimum"
self.minimum = minimum
self._image = image
def makeRgbImage(self, imageR=None, imageG=None, imageB=None,
xSize=None, ySize=None, rescaleFactor=None):
"""
Convert 3 arrays, imageR, imageG, and imageB into a numpy RGB image.
Parameters
----------
imageR : `~numpy.ndarray`
Image to map to red (if None, use the image passed to the constructor).
imageG : `~numpy.ndarray`
Image to map to green (if None, use imageR).
imageB : `~numpy.ndarray`
Image to map to blue (if None, use imageR).
xSize : int
Desired width of RGB image (or None). If ySize is None, preserve
aspect ratio.
ySize : int
Desired height of RGB image (or None).
rescaleFactor : float
Make size of output image rescaleFactor*size of the input image.
Cannot be specified if xSize or ySize are given.
"""
if imageR is None:
if self._image is None:
raise RuntimeError("You must provide an image or pass one to the constructor")
imageR = self._image
if imageG is None:
imageG = imageR
if imageB is None:
imageB = imageR
if xSize is not None or ySize is not None:
assert rescaleFactor is None, "You may not specify a size and rescaleFactor"
h, w = imageR.shape
if ySize is None:
ySize = int(xSize*h/float(w) + 0.5)
elif xSize is None:
xSize = int(ySize*w/float(h) + 0.5)
# need to cast to int when passing tuple to imresize.
size = (int(ySize), int(xSize)) # n.b. y, x order for scipy
elif rescaleFactor is not None:
size = float(rescaleFactor) # a float is intepreted as a percentage
else:
size = None
if size is not None:
if not HAVE_SCIPY_MISC:
raise RuntimeError("Unable to rescale as scipy.misc is unavailable.")
imageR = scipy.misc.imresize(imageR, size, interp='bilinear', mode='F')
imageG = scipy.misc.imresize(imageG, size, interp='bilinear', mode='F')
imageB = scipy.misc.imresize(imageB, size, interp='bilinear', mode='F')
return np.dstack(self._convertImagesToUint8(imageR, imageG, imageB)).astype(np.uint8)
def intensity(self, imageR, imageG, imageB):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
"""
return compute_intensity(imageR, imageG, imageB)
def mapIntensityToUint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
"""
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.where(I < self._uint8Max, I, self._uint8Max))
def _convertImagesToUint8(self, imageR, imageG, imageB):
"""Use the mapping to convert images imageR, imageG, and imageB to a triplet of uint8 images"""
imageR = imageR - self.minimum[0] # n.b. makes copy
imageG = imageG - self.minimum[1]
imageB = imageB - self.minimum[2]
fac = self.mapIntensityToUint8(self.intensity(imageR, imageG, imageB))
imageRGB = [imageR, imageG, imageB]
for c in imageRGB:
c *= fac
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
r0, g0, b0 = imageRGB # copies -- could work row by row to minimise memory usage
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
for i, c in enumerate(imageRGB):
c = np.where(r0 > g0,
np.where(r0 > b0,
np.where(r0 >= pixmax, c*pixmax/r0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c)),
np.where(g0 > b0,
np.where(g0 >= pixmax, c*pixmax/g0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
c[c > pixmax] = pixmax
imageRGB[i] = c
return imageRGB
class LinearMapping(Mapping):
"""A linear map map of red, blue, green intensities into uint8 values"""
def __init__(self, minimum=None, maximum=None, image=None):
"""
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
if minimum is None or maximum is None:
assert image is not None, "You must provide an image if you don't set both minimum and maximum"
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
assert maximum - minimum != 0, "minimum and maximum values must not be equal"
self._range = float(maximum - minimum)
def mapIntensityToUint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0,
np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range))
class ZScaleMapping(LinearMapping):
"""
A mapping for a linear stretch chosen by the zscale algorithm.
(preserving colours independent of brightness)
x = (I - minimum)/range
"""
def __init__(self, image, nSamples=1000, contrast=0.25):
"""
A linear stretch from [z1, z2] chosen by the zscale algorithm.
Parameters
----------
nSamples : int
The number of samples to use to estimate the zscale parameters.
contrast : float
The number of samples to use to estimate the zscale parameters.
"""
z1, z2 = zscale(image, nSamples, contrast)
LinearMapping.__init__(self, z1, z2, image)
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness)
x = asinh(Q (I - minimum)/range)/Q
This reduces to a linear stretch if Q == 0
See http://adsabs.harvard.edu/abs/2004PASP..116..133L
"""
def __init__(self, minimum, dataRange, Q=8):
"""
asinh stretch from minimum to minimum + dataRange, scaled by Q, via:
x = asinh(Q (I - minimum)/dataRange)/Q
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
dataRange : float
minimum+dataRange defines the white level of the image.
Q : float
The asinh softening parameter.
"""
Mapping.__init__(self, minimum)
epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
if False:
self._slope = self._uint8Max/Q # gradient at origin is self._slope
else:
frac = 0.1 # gradient estimated using frac*range is _slope
self._slope = frac*self._uint8Max/np.arcsinh(frac*Q)
self._soften = Q/float(dataRange)
def mapIntensityToUint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
See AsinhMapping
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
"""
Create an asinh mapping from an image, setting the linear part of the
stretch using zscale.
Parameters
----------
image1 : `~numpy.ndarray`
The image to analyse,
# or a list of 3 images to be converted to an intensity image.
image2 : `~numpy.ndarray`
the second image to analyse (must be specified with image3).
image3 : `~numpy.ndarray`
the third image to analyse (must be specified with image2).
Q : float
The asinh softening parameter.
pedestal : float or sequence(3)
The value, or array of 3 values, to subtract from the images; or None.
pedestal, if not None, is removed from the images when calculating
the zscale stretch, and added back into Mapping.minimum.
"""
if image2 is None or image3 is None:
assert image2 is None and image3 is None, "Please specify either a single image or three images."
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
assert len(pedestal) in (1, 3,), "Please provide 1 or 3 pedestals."
except TypeError:
pedestal = 3*[pedestal]
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image)*[0.0]
image = compute_intensity(*image)
zscale = ZScaleMapping(image)
dataRange = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, dataRange, Q)
self._image = image
def makeRGB(imageR, imageG=None, imageB=None, minimum=0, dataRange=5, Q=8,
saturatedBorderWidth=0, saturatedPixelValue=None,
xSize=None, ySize=None, rescaleFactor=None,
fileName=None):
"""
Make an RGB color image from 3 images using an asinh stretch.
Parameters
----------
imageR : `~numpy.ndarray`
Image to map to red (if None, use the image passed to the constructor).
imageG : `~numpy.ndarray`
Image to map to green (if None, use imageR).
imageB : `~numpy.ndarray`
Image to map to blue (if None, use imageR).
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
dataRange : float
minimum+dataRange defines the white level of the image.
Q : float
The asinh softening parameter.
saturatedBorderWidth : int
If saturatedBorderWidth is non-zero, replace saturated pixels with saturatedPixelValue.
Note that replacing saturated pixels requires that the input images be MaskedImages.
saturatedPixelValue : float
Value to replace saturated pixels with.
xSize : int
Desired width of RGB image (or None). If ySize is None, preserve aspect ratio.
ySize : int
Desired height of RGB image (or None).
rescaleFactor : float
Make size of output image rescaleFactor*size of the input image.
Cannot be specified if xSize or ySize are given.
"""
if imageG is None:
imageG = imageR
if imageB is None:
imageB = imageR
if saturatedBorderWidth:
if saturatedPixelValue is None:
raise ValueError("saturatedPixelValue must be set if saturatedBorderWidth is set")
msg = "Cannot do this until we extract replaceSaturatedPixels out of afw/display/saturated.cc"
raise NotImplementedError(msg)
# replaceSaturatedPixels(imageR, imageG, imageB, saturatedBorderWidth, saturatedPixelValue)
asinhMap = AsinhMapping(minimum, dataRange, Q)
rgb = asinhMap.makeRgbImage(imageR, imageG, imageB,
xSize=xSize, ySize=ySize, rescaleFactor=rescaleFactor)
if fileName:
writeRGB(fileName, rgb)
return rgb
def displayRGB(rgb, show=True, title=None):
"""
Display an rgb image using matplotlib.
Parameters
----------
rgb : `~numpy.ndarray`
The RGB image to display
show : bool
If true, call plt.show()
title : str
Title to use for the displayed image.
"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin="lower")
if title:
plt.title(title)
if show:
plt.show()
return plt
def writeRGB(fileName, rgbImage):
"""
Write an RGB image to disk.
Most versions of matplotlib support png and pdf (although the eps/pdf/svg
writers may be buggy, possibly due an interaction with useTeX=True in the
matplotlib settings).
If your matplotlib bundles pil/pillow you should also be able to write jpeg
and tiff files.
Parameters
----------
fileName : str
The output file. The extension defines the format, and must be
supported by matplotlib.imsave().
rgbImage : `~numpy.ndarray`
The RGB image to save.
"""
import matplotlib.image
matplotlib.image.imsave(fileName, rgbImage)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.iinfo",
"numpy.errstate",
"numpy.where",
"numpy.array",
"numpy.arcsinh"
] |
[((1660, 1699), 'numpy.array', 'np.array', (['intensity'], {'dtype': 'imageR.dtype'}), '(intensity, dtype=imageR.dtype)\n', (1668, 1699), True, 'import numpy as np\n'), ((16626, 16682), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(rgb, interpolation='nearest', origin='lower')\n", (16636, 16682), True, 'import matplotlib.pyplot as plt\n'), ((16705, 16721), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (16714, 16721), True, 'import matplotlib.pyplot as plt\n'), ((16743, 16753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16751, 16753), True, 'import matplotlib.pyplot as plt\n'), ((6445, 6491), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (6456, 6491), True, 'import numpy as np\n'), ((7355, 7401), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (7366, 7401), True, 'import numpy as np\n'), ((9274, 9320), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (9285, 9320), True, 'import numpy as np\n'), ((11647, 11693), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (11658, 11693), True, 'import numpy as np\n'), ((3197, 3215), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (3205, 3215), True, 'import numpy as np\n'), ((6581, 6628), 'numpy.where', 'np.where', (['(I < self._uint8Max)', 'I', 'self._uint8Max'], {}), '(I < self._uint8Max, I, self._uint8Max)\n', (6589, 6628), True, 'import numpy as np\n'), ((9438, 9514), 'numpy.where', 'np.where', (['(I >= self._range)', '(self._uint8Max / I)', '(self._uint8Max / self._range)'], {}), '(I >= self._range, self._uint8Max / I, self._uint8Max / self._range)\n', (9446, 9514), True, 'import numpy as np\n'), ((11533, 11553), 'numpy.arcsinh', 'np.arcsinh', (['(frac * Q)'], {}), '(frac * Q)\n', (11543, 11553), True, 'import numpy as np\n'), ((11783, 11811), 'numpy.arcsinh', 'np.arcsinh', (['(I * self._soften)'], {}), '(I * self._soften)\n', (11793, 11811), True, 'import numpy as np\n'), ((7620, 7662), 'numpy.where', 'np.where', (['(r0 >= pixmax)', '(c * pixmax / r0)', 'c'], {}), '(r0 >= pixmax, c * pixmax / r0, c)\n', (7628, 7662), True, 'import numpy as np\n'), ((7698, 7740), 'numpy.where', 'np.where', (['(b0 >= pixmax)', '(c * pixmax / b0)', 'c'], {}), '(b0 >= pixmax, c * pixmax / b0, c)\n', (7706, 7740), True, 'import numpy as np\n'), ((7824, 7866), 'numpy.where', 'np.where', (['(g0 >= pixmax)', '(c * pixmax / g0)', 'c'], {}), '(g0 >= pixmax, c * pixmax / g0, c)\n', (7832, 7866), True, 'import numpy as np\n'), ((7902, 7944), 'numpy.where', 'np.where', (['(b0 >= pixmax)', '(c * pixmax / b0)', 'c'], {}), '(b0 >= pixmax, c * pixmax / b0, c)\n', (7910, 7944), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
import pandas as pd
from .stats import IV, WOE, gini, gini_cond, entropy_cond, quality, _IV, VIF
np.random.seed(1)
feature = np.random.rand(500)
target = np.random.randint(2, size = 500)
A = np.random.randint(100, size = 500)
B = np.random.randint(100, size = 500)
mask = np.random.randint(8, size = 500)
df = pd.DataFrame({
'feature': feature,
'target': target,
'A': A,
'B': B,
})
def test_woe():
value = WOE(0.2, 0.3)
assert value == -0.4054651081081643
def test_iv_priv():
value, _ = _IV(df['feature'], df['target'])
assert value == 0.010385942643745403
def test_iv():
value = IV(df['feature'], df['target'], n_bins = 10, method = 'dt')
assert value == 0.2735917707743619
def test_iv_return_sub():
_, sub = IV(mask, df['target'], return_sub = True, n_bins = 10, method = 'dt')
assert len(sub) == 8
assert sub[4] == 0.006449386778057019
def test_iv_frame():
res = IV(df, 'target', n_bins = 10, method = 'chi')
assert res.loc[0, 'A'] == 0.226363832867123
def test_gini():
value = gini(df['target'])
assert value == 0.499352
def test_gini_cond():
value = gini_cond(df['feature'], df['target'])
assert value == 0.4970162601626016
def test_entropy_cond():
value = entropy_cond(df['feature'], df['target'])
assert value == 0.6924990371522171
def test_quality():
result = quality(df, 'target')
assert result.loc['feature', 'iv'] == 0.2735917707743619
assert result.loc['A', 'gini'] == 0.49284164671885444
assert result.loc['B', 'entropy'] == 0.6924956879070063
assert result.loc['feature', 'unique'] == 500
def test_quality_iv_only():
result = quality(df, 'target', iv_only = True)
assert np.isnan(result.loc['feature', 'gini'])
def test_quality_object_type_array_with_nan():
feature = np.array([np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G'], dtype = 'O')[mask]
df = pd.DataFrame({
'feature': feature,
'target': target,
})
result = quality(df)
assert result.loc['feature', 'iv'] == 0.016379338180530334
def test_vif():
vif = VIF(df)
assert vif['A'] == 2.969336442640111
|
[
"pandas.DataFrame",
"numpy.random.seed",
"numpy.isnan",
"numpy.random.randint",
"numpy.array",
"numpy.random.rand"
] |
[((133, 150), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (147, 150), True, 'import numpy as np\n'), ((162, 181), 'numpy.random.rand', 'np.random.rand', (['(500)'], {}), '(500)\n', (176, 181), True, 'import numpy as np\n'), ((191, 221), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(500)'}), '(2, size=500)\n', (208, 221), True, 'import numpy as np\n'), ((228, 260), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(500)'}), '(100, size=500)\n', (245, 260), True, 'import numpy as np\n'), ((267, 299), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(500)'}), '(100, size=500)\n', (284, 299), True, 'import numpy as np\n'), ((309, 339), 'numpy.random.randint', 'np.random.randint', (['(8)'], {'size': '(500)'}), '(8, size=500)\n', (326, 339), True, 'import numpy as np\n'), ((348, 416), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature, 'target': target, 'A': A, 'B': B}"], {}), "({'feature': feature, 'target': target, 'A': A, 'B': B})\n", (360, 416), True, 'import pandas as pd\n'), ((1746, 1785), 'numpy.isnan', 'np.isnan', (["result.loc['feature', 'gini']"], {}), "(result.loc['feature', 'gini'])\n", (1754, 1785), True, 'import numpy as np\n'), ((1931, 1983), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature, 'target': target}"], {}), "({'feature': feature, 'target': target})\n", (1943, 1983), True, 'import pandas as pd\n'), ((1848, 1912), 'numpy.array', 'np.array', (["[np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G']"], {'dtype': '"""O"""'}), "([np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G'], dtype='O')\n", (1856, 1912), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
with open('data/fake.pkl', 'rb') as f:
points, labels, scores, keys = pickle.load(f)
with open('data/fake_gt.pkl', 'rb') as f:
gt_points, gt_bboxes, gt_labels, gt_areas, gt_crowdeds = pickle.load(f)
gt_points_yx = []
gt_point_is_valids = []
for gt_point in gt_points:
gt_point_yx = []
gt_point_is_valid = []
for pnt in gt_point:
gt_point_yx.append(pnt[:, :2])
gt_point_is_valid.append(pnt[:, 2])
gt_points_yx.append(gt_point_yx)
gt_point_is_valids.append(gt_point_is_valid)
points_yx = []
for point in points:
point_yx = []
for pnt in point:
point_yx.append(pnt[:, :2])
points_yx.append(point_yx)
np.savez('eval_point_coco_dataset_2019_02_18.npz',
points=gt_points_yx,
is_valids=gt_point_is_valids,
bboxes=gt_bboxes,
labels=gt_labels,
areas=gt_areas,
crowdeds=gt_crowdeds)
np.savez('eval_point_coco_result_2019_02_18.npz',
points=points_yx,
scores=scores,
labels=labels,)
|
[
"numpy.savez",
"pickle.load"
] |
[((700, 884), 'numpy.savez', 'np.savez', (['"""eval_point_coco_dataset_2019_02_18.npz"""'], {'points': 'gt_points_yx', 'is_valids': 'gt_point_is_valids', 'bboxes': 'gt_bboxes', 'labels': 'gt_labels', 'areas': 'gt_areas', 'crowdeds': 'gt_crowdeds'}), "('eval_point_coco_dataset_2019_02_18.npz', points=gt_points_yx,\n is_valids=gt_point_is_valids, bboxes=gt_bboxes, labels=gt_labels, areas\n =gt_areas, crowdeds=gt_crowdeds)\n", (708, 884), True, 'import numpy as np\n'), ((930, 1032), 'numpy.savez', 'np.savez', (['"""eval_point_coco_result_2019_02_18.npz"""'], {'points': 'points_yx', 'scores': 'scores', 'labels': 'labels'}), "('eval_point_coco_result_2019_02_18.npz', points=points_yx, scores=\n scores, labels=labels)\n", (938, 1032), True, 'import numpy as np\n'), ((109, 123), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (120, 123), False, 'import pickle\n'), ((228, 242), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (239, 242), False, 'import pickle\n')]
|
import numpy as np
import time
from unityagents import UnityEnvironment
from agent_utils import env_initialize, env_reset, state_reward_done_unpack
from dqn_agent import DQN_Agent
from agent_utils import load_dqn
from agent_utils import load_params, load_weights
def demo_agent(env, agent, n_episodes, epsilon=0.05, seed=0, train_mode=False):
print(f'\r\nRunning demo of \'{agent.name}\' with epsilon={epsilon}')
scores = []
for i in range(1, n_episodes+1):
score = 0
state = env_reset(env, agent.brain_name, train_mode=train_mode)
while True:
action = int(agent.act(state, epsilon))
env_info = env.step(action)[agent.brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
state = next_state
if done:
break
scores.append(score)
print(f'Episode {i}\tScore: {score:.2f}')
print('\r\nDemo complete! Scores:\tMin:{:.2f}\tMax:{:.2f}\tAvg:{:.3f}'.format(
np.min(scores), np.max(scores), np.mean(scores)))
return scores
def demo_saved_agent(env, agent_name, n_episodes=3, epsilon=0.05, seed=0,
train_mode=False, verbose=False):
# initialize environment and scenario info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode)
# load the agent params and create the agent
params, local_weights, target_weights = load_dqn(agent_name, verbose=verbose)
agent = DQN_Agent(state_size, action_size, brain_name, seed, params=params)
print(agent.display_params())
# set trained agent weights
agent.qnetwork_local.load_state_dict(local_weights)
agent.qnetwork_target.load_state_dict(target_weights)
# run demo
return demo_agent(env, agent,
n_episodes=n_episodes, epsilon=epsilon,
seed=seed, train_mode=train_mode)
def demo_random_agent_discrete(env, n_episodes=3, train_mode=False, verbose=False):
""" Runs the environment using a uniform random action selection policy. """
# setup the environment and get initial info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode, verbose=verbose)
start_time = time.time()
for n_episode in range(1, n_episodes+1):
# reset the environment for the new episode
state = env_reset(env, brain_name, train_mode=train_mode)
# track scores and the number of steps in an episode
score = 0
steps = 0
while True:
# choose a random action
action = np.random.randint(action_size)
# send action to environment and get updated info
env_info = env.step(action)[brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
steps += 1
# set the state for next iteration
state = next_state
if done:
break # end episode if we get the done signal
print (f'Episode {n_episode} score: {score} in {steps} steps.')
end_time = time.time()
avg_episode_time = (end_time - start_time) / n_episodes
print (f'Random agent demo complete, avg episode duration: {avg_episode_time:.3f}s.')
|
[
"agent_utils.env_initialize",
"agent_utils.state_reward_done_unpack",
"agent_utils.load_dqn",
"dqn_agent.DQN_Agent",
"time.time",
"numpy.min",
"numpy.max",
"numpy.mean",
"numpy.random.randint",
"agent_utils.env_reset"
] |
[((1355, 1397), 'agent_utils.env_initialize', 'env_initialize', (['env'], {'train_mode': 'train_mode'}), '(env, train_mode=train_mode)\n', (1369, 1397), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((1496, 1533), 'agent_utils.load_dqn', 'load_dqn', (['agent_name'], {'verbose': 'verbose'}), '(agent_name, verbose=verbose)\n', (1504, 1533), False, 'from agent_utils import load_dqn\n'), ((1550, 1617), 'dqn_agent.DQN_Agent', 'DQN_Agent', (['state_size', 'action_size', 'brain_name', 'seed'], {'params': 'params'}), '(state_size, action_size, brain_name, seed, params=params)\n', (1559, 1617), False, 'from dqn_agent import DQN_Agent\n'), ((2243, 2302), 'agent_utils.env_initialize', 'env_initialize', (['env'], {'train_mode': 'train_mode', 'verbose': 'verbose'}), '(env, train_mode=train_mode, verbose=verbose)\n', (2257, 2302), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((2325, 2336), 'time.time', 'time.time', ([], {}), '()\n', (2334, 2336), False, 'import time\n'), ((3255, 3266), 'time.time', 'time.time', ([], {}), '()\n', (3264, 3266), False, 'import time\n'), ((508, 563), 'agent_utils.env_reset', 'env_reset', (['env', 'agent.brain_name'], {'train_mode': 'train_mode'}), '(env, agent.brain_name, train_mode=train_mode)\n', (517, 563), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((2450, 2499), 'agent_utils.env_reset', 'env_reset', (['env', 'brain_name'], {'train_mode': 'train_mode'}), '(env, brain_name, train_mode=train_mode)\n', (2459, 2499), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((733, 767), 'agent_utils.state_reward_done_unpack', 'state_reward_done_unpack', (['env_info'], {}), '(env_info)\n', (757, 767), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((1054, 1068), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (1060, 1068), True, 'import numpy as np\n'), ((1070, 1084), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (1076, 1084), True, 'import numpy as np\n'), ((1086, 1101), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1093, 1101), True, 'import numpy as np\n'), ((2693, 2723), 'numpy.random.randint', 'np.random.randint', (['action_size'], {}), '(action_size)\n', (2710, 2723), True, 'import numpy as np\n'), ((2890, 2924), 'agent_utils.state_reward_done_unpack', 'state_reward_done_unpack', (['env_info'], {}), '(env_info)\n', (2914, 2924), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n')]
|
from os import path
import numpy as np
from torch import nn
import torch
def get_embedding(embedding_path=None,
embedding_np=None,
num_embeddings=0, embedding_dim=0, freeze=True, **kargs):
"""Create embedding from:
1. saved numpy vocab array, embedding_path, freeze
2. numpy embedding array, embedding_np, freeze
3. raw embedding n_vocab, embedding_dim
"""
if isinstance(embedding_path, str) and path.exists(embedding_path):
embedding_np = np.load(embedding_path)
if embedding_np is not None:
return nn.Embedding.from_pretrained(torch.Tensor(embedding_np), freeze=freeze)
return nn.Embedding(num_embeddings, embedding_dim, **kargs)
# extract last output in last time step
def extract_last_timestep(output, lengths, batch_first):
"""Get the output of last time step.
output: seq_len x batch_size x dim if not batch_first. Else batch_size x seq_len x dim
length: one dimensional torch.LongTensor of lengths in a batch.
"""
idx = (lengths - 1).view(-1, 1).expand(len(lengths), output.size(2))
time_dimension = 1 if batch_first else 0
idx = idx.unsqueeze(time_dimension)
if output.is_cuda:
idx = idx.cuda(output.data.get_device())
return output.gather(time_dimension, idx).squeeze(time_dimension)
|
[
"torch.nn.Embedding",
"torch.Tensor",
"os.path.exists",
"numpy.load"
] |
[((665, 717), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim, **kargs)\n', (677, 717), False, 'from torch import nn\n'), ((458, 485), 'os.path.exists', 'path.exists', (['embedding_path'], {}), '(embedding_path)\n', (469, 485), False, 'from os import path\n'), ((510, 533), 'numpy.load', 'np.load', (['embedding_path'], {}), '(embedding_path)\n', (517, 533), True, 'import numpy as np\n'), ((611, 637), 'torch.Tensor', 'torch.Tensor', (['embedding_np'], {}), '(embedding_np)\n', (623, 637), False, 'import torch\n')]
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import tensorflow as tf
from parlai.core.agents import Teacher
from . import utils
from .build import build
from ...utils import coreference_utils
class CoreferenceTeacher(Teacher):
"""Teacher for coreference resolution task"""
@staticmethod
def add_cmdline_args(argparser):
"""Parameters of agent and default values"""
group = argparser.add_argument_group('Coreference Teacher')
group.add_argument('--language', type=str, default='ru')
group.add_argument('--predictions_folder', type=str, default='predicts',
help='folder where to dump conll predictions, scorer will use this folder')
group.add_argument('--scorer_path', type=str, default='scorer/reference-coreference-scorers/v8.01/scorer.pl',
help='path to CoNLL scorer perl script')
group.add_argument('--valid_ratio', type=float,
default=0.2, help='valid_set ratio')
group.add_argument('--test_ratio', type=float,
default=0.2, help='test_set ratio')
group.add_argument('--teacher_seed', type=int, default=42, help='seed')
group.add_argument('--raw-dataset-path', type=str, default=None,
help='Path to folder with two subfolders: dataset and scorer. '
'These two folders are extracted rucoref_29.10.2015.zip and '
'reference-coreference-scorers.v8.01.tar.gz')
def __init__(self, opt, shared=None):
"""Initialize the parameters for CoreferenceTeacher"""
super().__init__(opt, shared)
self.last_observation = None
self.id = 'two-step-coref'
self.seed = opt['teacher_seed']
np.random.seed(seed=self.seed)
random.seed(a=self.seed)
tf.set_random_seed(seed=self.seed)
if shared:
raise RuntimeError('Additional batching is not supported')
build(opt)
self.dt = opt['datatype'].split(':')[0]
self.datapath = os.path.join(opt['datapath'], 'coreference_scorer_model', opt['language'])
self.valid_path = None
self.train_path = None
self.predictions_folder = os.path.join(self.datapath, opt['predictions_folder'], self.dt)
self.scorer_path = os.path.join(self.datapath, opt['scorer_path'])
# in train mode we use train dataset to train model
# and valid dataset to adjust threshold
# in valid and test mode we use test dataset
if self.dt == 'train':
self.valid_path = os.path.join(self.datapath, 'valid')
self.train_path = os.path.join(self.datapath, 'train')
elif self.dt in ['test', 'valid']:
self.valid_path = os.path.join(self.datapath, 'test')
else:
raise ValueError('Unknown mode: {}. Available modes: train, test, valid.'.format(self.dt))
self.train_documents = [] if self.train_path is None else list(sorted(os.listdir(self.train_path)))
self.valid_documents = [] if self.valid_path is None else list(sorted(os.listdir(self.valid_path)))
self.len = 1
self.epoch = 0
self._epoch_done = False
def act(self):
"""reads all documents and returns them"""
self._epoch_done = True
train_conll = [open(os.path.join(self.train_path, file), 'r').readlines() for file in self.train_documents]
valid_conll = [open(os.path.join(self.valid_path, file), 'r').readlines() for file in self.valid_documents]
return {'id': self.id, 'conll': train_conll, 'valid_conll': valid_conll}
def observe(self, observation):
"""saves observation"""
self.last_observation = observation
self.epoch += 1
def report(self):
"""calls scorer on last observation and reports result"""
utils.save_observations(self.last_observation['valid_conll'], self.predictions_folder)
res = coreference_utils.score(self.scorer_path, self.valid_path, self.predictions_folder)
return {'f1': res['conll-F-1']}
def reset(self):
self._epoch_done = False
def epoch_done(self):
return self._epoch_done
def __len__(self):
return self.len
|
[
"numpy.random.seed",
"tensorflow.set_random_seed",
"random.seed",
"os.path.join",
"os.listdir"
] |
[((2425, 2455), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (2439, 2455), True, 'import numpy as np\n'), ((2464, 2488), 'random.seed', 'random.seed', ([], {'a': 'self.seed'}), '(a=self.seed)\n', (2475, 2488), False, 'import random\n'), ((2497, 2531), 'tensorflow.set_random_seed', 'tf.set_random_seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (2515, 2531), True, 'import tensorflow as tf\n'), ((2716, 2790), 'os.path.join', 'os.path.join', (["opt['datapath']", '"""coreference_scorer_model"""', "opt['language']"], {}), "(opt['datapath'], 'coreference_scorer_model', opt['language'])\n", (2728, 2790), False, 'import os\n'), ((2887, 2950), 'os.path.join', 'os.path.join', (['self.datapath', "opt['predictions_folder']", 'self.dt'], {}), "(self.datapath, opt['predictions_folder'], self.dt)\n", (2899, 2950), False, 'import os\n'), ((2978, 3025), 'os.path.join', 'os.path.join', (['self.datapath', "opt['scorer_path']"], {}), "(self.datapath, opt['scorer_path'])\n", (2990, 3025), False, 'import os\n'), ((3249, 3285), 'os.path.join', 'os.path.join', (['self.datapath', '"""valid"""'], {}), "(self.datapath, 'valid')\n", (3261, 3285), False, 'import os\n'), ((3316, 3352), 'os.path.join', 'os.path.join', (['self.datapath', '"""train"""'], {}), "(self.datapath, 'train')\n", (3328, 3352), False, 'import os\n'), ((3426, 3461), 'os.path.join', 'os.path.join', (['self.datapath', '"""test"""'], {}), "(self.datapath, 'test')\n", (3438, 3461), False, 'import os\n'), ((3658, 3685), 'os.listdir', 'os.listdir', (['self.train_path'], {}), '(self.train_path)\n', (3668, 3685), False, 'import os\n'), ((3766, 3793), 'os.listdir', 'os.listdir', (['self.valid_path'], {}), '(self.valid_path)\n', (3776, 3793), False, 'import os\n'), ((4004, 4039), 'os.path.join', 'os.path.join', (['self.train_path', 'file'], {}), '(self.train_path, file)\n', (4016, 4039), False, 'import os\n'), ((4120, 4155), 'os.path.join', 'os.path.join', (['self.valid_path', 'file'], {}), '(self.valid_path, file)\n', (4132, 4155), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Project: neurohacking
File: clench.py.py
Author: wffirilat
"""
import numpy as np
import time
import sys
import plugin_interface as plugintypes
from open_bci_v3 import OpenBCISample
class PluginClench(plugintypes.IPluginExtended):
def __init__(self):
self.release = True
self.packetnum = -1
self.threshold = None
self.uthreshold = None
self.ticknum = None
self.storelength = 1024
self.starttime = None
self.state = 'unstarted'
self.channel = 3
self.restingmax, self.restingmin = 0, 0
self.clenchmax, self.clenchmin = 0, 0
self.unclenchmax, self.unclenchmin = 0, 0
self.rawdata = np.zeros((8, self.storelength))
self.data = np.zeros((8, self.storelength))
def activate(self):
print("clench activated")
# called with each new sample
def __call__(self, sample: OpenBCISample):
if sample.id == 0:
if self.packetnum == -1:
self.starttime = time.time()
self.packetnum += 1
self.ticknum = self.packetnum * 256 + sample.id
self.rawdata[:, (sample.id + 256 * self.packetnum) % self.storelength] = sample.channel_data
self.data[:, (sample.id + 256 * self.packetnum) % self.storelength] = [v - avg for avg, v in zip(
[sum(self.rawdata[i, :]) / self.storelength for i in range(8)],
sample.channel_data
)]
#print(np.median(self.rawdata[3,:])) #The reason this is here is because it might help our basis be better
if self.state != 'calibrated':
self.calibratetick()
else:
self.tick()
def calibratetick(self):
# print(self.data)
dt = time.time() - self.starttime
if self.state == "unstarted":
print("Prepare to calibrate")
self.state = "positioning"
elif self.state == "positioning":
if dt > 4:
print('Calibrating')
self.state = 'resting'
elif self.state == 'resting':
if dt > 6:
print("Resting data gathered; Prepare to clench")
self.state = 'clench'
return
if self.current >= self.restingmax:
self.restingmax = self.current
if self.current <= self.restingmin:
self.restingmin = self.current
elif self.state == 'clench':
if dt > 7:
print("Clench NOW!")
self.state = 'clenching'
return
elif self.state == 'clenching':
if dt > 9:
print('Unclench!!')
self.state = 'postclench'
return
if self.current > self.clenchmax:
self.clenchmax = self.current
if self.current < self.clenchmin:
self.clenchmin = self.current
elif self.state == 'postclench':
if dt > 10:
self.threshold = self.restingmax + ((self.clenchmax - self.restingmax) / 2)
if self.release:
self.uthreshold = self.restingmin + ((self.clenchmin - self.restingmin) / 2)
self.state = 'calibrated'
print ("Resting Max", self.restingmax, "Resting Min", self.restingmin, "\n")
print ("Clench Max,", self.clenchmax, "Clench Min",self.clenchmin, "\n")
if self.release:
print ("Unclench Max,", self.unclenchmax, "Unclench Min",self.unclenchmin, "\n")
return
if self.release:
if self.current > self.unclenchmax:
self.unclenchmax = self.current
if self.current < self.unclenchmin:
self.unclenchmin = self.current
@property
def current(self):
return self.data[self.channel, self.ticknum % self.storelength]
def tick(self):
if self.current > self.unclenchmax-((self.current-self.unclenchmax)/5):#watch this work!
print(f" {self.current}: Clenched!!")
...
#if self.release:
# if self.current < self.uthreshold:
# print(f" {self.ticknum}: Unclenched!!")
|
[
"numpy.zeros",
"time.time"
] |
[((719, 750), 'numpy.zeros', 'np.zeros', (['(8, self.storelength)'], {}), '((8, self.storelength))\n', (727, 750), True, 'import numpy as np\n'), ((771, 802), 'numpy.zeros', 'np.zeros', (['(8, self.storelength)'], {}), '((8, self.storelength))\n', (779, 802), True, 'import numpy as np\n'), ((1766, 1777), 'time.time', 'time.time', ([], {}), '()\n', (1775, 1777), False, 'import time\n'), ((1042, 1053), 'time.time', 'time.time', ([], {}), '()\n', (1051, 1053), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
r"""Run the vacuum coefficients 3nu example shown in README.md.
Runs the three-neutrino example of coefficients for oscillations in
vacuum shown in README.md
References
----------
.. [1] <NAME>, "Exact neutrino oscillation probabilities:
a fast general-purpose computation method for two and three neutrino
flavors", arXiv:1904.XXXXX.
Created: 2019/04/29 23:48
Last modified: 2019/04/29 23:48
"""
from __future__ import print_function
__version__ = "1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
sys.path.append('../src')
import numpy as np
import oscprob3nu
import hamiltonians3nu
from globaldefs import *
energy = 1.e9 # Neutrino energy [eV]
baseline = 1.3e3 # Baseline [km]
h_vacuum_energy_indep = \
hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent( S12_NO_BF,
S23_NO_BF,
S13_NO_BF,
DCP_NO_BF,
D21_NO_BF,
D31_NO_BF)
h_vacuum = np.multiply(1./energy, h_vacuum_energy_indep)
h1, h2, h3, h4, h5, h6, h7, h8 = \
oscprob3nu.hamiltonian_3nu_coefficients(h_vacuum)
print('h1: {:.4e}'.format(h1))
print('h2: {:.4e}'.format(h2))
print('h3: {:.4e}'.format(h3))
print('h4: {:.4e}'.format(h4))
print('h5: {:.4e}'.format(h5))
print('h6: {:.4e}'.format(h6))
print('h7: {:.4e}'.format(h7))
print('h8: {:.4e}'.format(h8))
print()
u0, u1, u2, u3, u4, u5, u6, u7, u8 = \
oscprob3nu.evolution_operator_3nu_u_coefficients( \
h_vacuum,
baseline*CONV_KM_TO_INV_EV)
print('u0: {:.4f}'.format(u0))
print('u1: {:.4f}'.format(u1))
print('u2: {:.4f}'.format(u2))
print('u3: {:.4f}'.format(u3))
print('u4: {:.4f}'.format(u4))
print('u5: {:.4f}'.format(u5))
print('u6: {:.4f}'.format(u6))
print('u7: {:.4f}'.format(u7))
print('u8: {:.4f}'.format(u8))
print()
evol_operator = \
oscprob3nu.evolution_operator_3nu(h_vacuum, baseline*CONV_KM_TO_INV_EV)
print('U3 = ')
with np.printoptions(precision=3, suppress=True):
print(np.array(evol_operator))
|
[
"sys.path.append",
"oscprob3nu.evolution_operator_3nu",
"numpy.multiply",
"hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent",
"oscprob3nu.hamiltonian_3nu_coefficients",
"numpy.array",
"numpy.printoptions",
"oscprob3nu.evolution_operator_3nu_u_coefficients"
] |
[((549, 574), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (564, 574), False, 'import sys\n'), ((769, 896), 'hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent', 'hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent', (['S12_NO_BF', 'S23_NO_BF', 'S13_NO_BF', 'DCP_NO_BF', 'D21_NO_BF', 'D31_NO_BF'], {}), '(S12_NO_BF,\n S23_NO_BF, S13_NO_BF, DCP_NO_BF, D21_NO_BF, D31_NO_BF)\n', (826, 896), False, 'import hamiltonians3nu\n'), ((1226, 1274), 'numpy.multiply', 'np.multiply', (['(1.0 / energy)', 'h_vacuum_energy_indep'], {}), '(1.0 / energy, h_vacuum_energy_indep)\n', (1237, 1274), True, 'import numpy as np\n'), ((1312, 1361), 'oscprob3nu.hamiltonian_3nu_coefficients', 'oscprob3nu.hamiltonian_3nu_coefficients', (['h_vacuum'], {}), '(h_vacuum)\n', (1351, 1361), False, 'import oscprob3nu\n'), ((1662, 1754), 'oscprob3nu.evolution_operator_3nu_u_coefficients', 'oscprob3nu.evolution_operator_3nu_u_coefficients', (['h_vacuum', '(baseline * CONV_KM_TO_INV_EV)'], {}), '(h_vacuum, baseline *\n CONV_KM_TO_INV_EV)\n', (1710, 1754), False, 'import oscprob3nu\n'), ((2166, 2239), 'oscprob3nu.evolution_operator_3nu', 'oscprob3nu.evolution_operator_3nu', (['h_vacuum', '(baseline * CONV_KM_TO_INV_EV)'], {}), '(h_vacuum, baseline * CONV_KM_TO_INV_EV)\n', (2199, 2239), False, 'import oscprob3nu\n'), ((2258, 2301), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (2273, 2301), True, 'import numpy as np\n'), ((2313, 2336), 'numpy.array', 'np.array', (['evol_operator'], {}), '(evol_operator)\n', (2321, 2336), True, 'import numpy as np\n')]
|
"""
Convert ground truth latent classes into binary sensitive attributes
"""
def attr_fn_0(y):
return y[:,0] >= 1
def attr_fn_1(y):
return y[:,1] >= 1
def attr_fn_2(y):
return y[:,2] >= 3
def attr_fn_3(y):
return y[:,3] >= 20
def attr_fn_4(y):
return y[:,4] >= 16
def attr_fn_5(y):
return y[:,5] >= 16
dsprites_attr_fns = [attr_fn_0, attr_fn_1, attr_fn_2, attr_fn_3, attr_fn_4, attr_fn_5]
# celeba stuff
def attr_fn_chubby(a):
return a[:,13] > 0.
def attr_fn_eyeglasses(a):
return a[:,15] > 0.
def attr_fn_male(a):
return a[:,20] > 0.
def attr_fn_heavy_makeup(a):
return a[:,18] > 0.
CELEBA_SUBGROUPS = {
'H': attr_fn_heavy_makeup,
'S': lambda a: a[:,31] > 0., # smiling
'W': lambda a: a[:,36] > 0., # wears lipstick
'A': lambda a: a[:,2] > 0., # wears lipstick
'C': attr_fn_chubby,
'E': attr_fn_eyeglasses,
'M': attr_fn_male,
'C $\land$ E': lambda a: attr_fn_chubby(a) * attr_fn_eyeglasses(a),
'C $\land$ M': lambda a: attr_fn_chubby(a) * attr_fn_male(a),
'E $\land$ M': lambda a: attr_fn_eyeglasses(a) * attr_fn_male(a),
'C $\land$ $\\neg$ E': lambda a: attr_fn_chubby(a) * (1 - attr_fn_eyeglasses(a)),
'C $\land$ $\\neg$ M': lambda a: attr_fn_chubby(a) * (1 - attr_fn_male(a)),
'E $\land$ $\\neg$ M': lambda a: attr_fn_eyeglasses(a) * (1 - attr_fn_male(a)),
'$\\neg$ C $\land$ E': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_eyeglasses(a),
'$\\neg$ C $\land$ M': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_male(a),
'$\\neg$ E $\land$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * attr_fn_male(a),
'$\\neg$ C $\land$ $\\neg$ E': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_eyeglasses(a)),
'$\\neg$ C $\land$ $\\neg$ M': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_male(a)),
'$\\neg$ E $\land$ $\\neg$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * (1 - attr_fn_male(a)),
} # cf. generate_celeba_audit_table.format_subgroups
CELEBA_SENS_IDX = {
'C': [13],
'E': [15],
'M': [20],
'C $\land$ E': [13, 15],
'C $\land$ M': [13, 20],
'E $\land$ M': [15, 20],
'C $\land$ $\\neg$ E': [13, 15],
'C $\land$ $\\neg$ M': [13, 20],
'E $\land$ $\\neg$ M': [15, 20],
'$\\neg$ C $\land$ E': [13, 15],
'$\\neg$ C $\land$ M': [13, 20],
'$\\neg$ E $\land$ M': [15, 20],
'$\\neg$ C $\land$ $\\neg$ E': [13, 15],
'$\\neg$ C $\land$ $\\neg$ M': [13, 20],
'$\\neg$ E $\land$ $\\neg$ M': [15, 20],
} # maps named subgroups to the sensitive indices they depend on
# comcrime stuff
CC_ATTR_STRING = 'cc_attr_fn'
def create_cc_attr_fn(i):
def f(y):
# print('column', i)
return y[:, i] #>= 0.5 - should be already binarized
return f
cc_attr_fn_0 = create_cc_attr_fn(0)
cc_attr_fn_1 = create_cc_attr_fn(1)
cc_attr_fn_2 = create_cc_attr_fn(2)
cc_attr_fn_3 = create_cc_attr_fn(3)
cc_attr_fn_4 = create_cc_attr_fn(4)
cc_attr_fn_5 = create_cc_attr_fn(5)
cc_attr_fn_6 = create_cc_attr_fn(6)
cc_attr_fn_7 = create_cc_attr_fn(7)
cc_attr_fn_8 = create_cc_attr_fn(8)
cc_attr_fn_9 = create_cc_attr_fn(9)
cc_attr_fn_10 = create_cc_attr_fn(10)
cc_attr_fn_11 = create_cc_attr_fn(11)
cc_attr_fn_12 = create_cc_attr_fn(12)
cc_attr_fn_13 = create_cc_attr_fn(13)
cc_attr_fn_14 = create_cc_attr_fn(14)
cc_attr_fn_15 = create_cc_attr_fn(15)
cc_attr_fn_16 = create_cc_attr_fn(16)
cc_attr_fn_17 = create_cc_attr_fn(17)
cc_attr_fn_18 = create_cc_attr_fn(18)
if __name__ == '__main__':
import numpy as np
x = np.zeros((10, 10))
print('should print 5')
cc_attr_fn_5(x)
cc_attr_fn_6(x)
cc_attr_fn_7(x)
|
[
"numpy.zeros"
] |
[((3653, 3671), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3661, 3671), True, 'import numpy as np\n')]
|
import argparse
import os
import random
import time
import warnings
from math import cos, pi
import cv2
import numpy as np
import torch
import torch.optim as optim
from DLBio.pt_train_printer import Printer
from DLBio.pytorch_helpers import get_lr
class ITrainInterface():
"""
TrainInterfaces handle the prediction of the network, the loss
computation and the computation of additional training metrics.
These steps can quickly change depending on the dataset, the model
architecture, the task and so on. Therefore, it is reasonable to
create separate modules that are passed to the Training class.
You need to implement the constructor and the train_step method,
if the computations in the validation step differ from the train_step
you need to overwrite val_step.
"""
def __init__(self, *args, **kwargs):
"""Constructor. Usually you need to provide and process:
- a model
- a device
- implement a loss function
- implement additional metrics
"""
raise NotImplementedError('Needs model and loss fcn and metrics')
def train_step(self, *args, **kwargs):
"""
In the Training class, this functions is called for each drawn batch
like this:
loss, metrics = self.train_interface.train_step(sample)
(for more information see '_train_step' method)
Accordingly, you should compute the loss based on the prediction of
your model and other metrics.
The loss is used to update the weights of the model
returns list with loss, metrics, counters, functions
subsets like loss, metrics, counters and loss, metrics are possible
"""
raise NotImplementedError('Implement to run training')
def val_step(self, *args, **kwargs):
"""
By default, the same code as in train_step is excecuted.
"""
# usually exactly the same as the train step
return self.train_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
"""
By default, the same code as in val_step is excecuted.
"""
# usually exactly the same as the train step
return self.val_step(*args, **kwargs)
def after_training_process(self, *args, **kwargs):
"""
Use this if you want to run a specific process after the training that
depends on the model
"""
pass
class Training():
"""A Class that contains all necessary ingredients to train a pytorch
model. To start training, simply call the instantiated object with the
desired number of epochs, e.g.:
TODO: 'add_do_not_update' boolean for SAM optimization
training = Training(...)
training(100) # train for 100 epochs
"""
def __init__(
self, optimizer, data_loader, train_interface,
save_steps=-1, save_path=None,
printer=None, scheduler=None, clip=None,
retain_graph=False, val_data_loader=None, early_stopping=None,
validation_only=False, save_state_dict=False,
test_data_loader=None, batch_scheduler=None, start_epoch=0,
time_log_printer=None, stop_conditions=[]
):
"""Constructor
Parameters
----------
optimizer : pytorch optimizer
Controls the weight updates, see get_optimizer for more information
data_loader : pytorch dataloader
When iterated over in a for loop, data are returned in batches.
Note that the for loop is executed as
'for sample in data_loader:'
You need to specify what a sample actually is in the training-
interface.
train_interface : ITrainInterface
Computes the loss of a batch, see method _train_step
save_steps : int, optional
Every 'save_steps' the model is saved to 'save_path'. If 0, the
model is only saved on the end of the training. By default -1,
which means the model is not saved at all (if early_stopping is
None).
save_path : str, optional
Where to save the model, by default None. Needs to be specified if
save_steps != 1. Note that the model is always overwritten, i.e.,
there is only one '[model].pt' file after training at save_path.
printer : Printer (pt_train_printer), optional
Prints current training values to terminal and possibly a
log.json file. By default None, nothing is logged.
scheduler : pytorch scheduler, optional
Updates the learning rate according to some schedule. By default
None, no scheduling is used.
clip : float, optional
Gradient clipping, by default None, no gradient clipping
retain_graph : bool, optional
Needed for special backpropagation functions, see pytorch
documentation for more information. By default False.
val_data_loader : pytorch data_loader, optional
Can be used to validate/test the network performance. These data
are not used for training (but maybe early stopping). The model is
in eval-mode, when those data are processed. The val_step of the
TrainingInterface is applied to these data.
By default None, no validation is done.
early_stopping : EarlyStopping object, optional
Save the model based on a specified metric, each time the best
value of this metric is reached. By default None, no early stopping
validation_only: bool
When called, only the validation steps are computed. Note that, if
the flag is set to true, the model is not trained.
save_state_dict: save the model's state dict instead of the model
test_data_loader : pytorch data_loader, optional
Can be used to test the network performance. The model is
in eval-mode, when those data are processed. The test_step of the
TrainingInterface is applied to these data.
batch_scheduler: BatchScheduler object
For scheduling algorithms that adjust the learning
rate within an epoch, instead each epoch's end.
start_epoch: int
Set to a value other than 0 if a previous training is resumed.
In this case, start_epoch should be set to the last epoch the
previous training stopped.
time_log_printer: Printer (pt_train_printer)
If not none, the time needed for different training steps
is logged and written by this logger.
stop_conditions: List of [IStopCondition]
Similar to early stopping, stops the training based on a
train phase metric (no val- or test metric). Use, for example, to
quickly stop processes where the training does not converge.
Returns
-------
Training object
"""
self.optimizer = optimizer
self.data_loader = data_loader
assert issubclass(train_interface.__class__, ITrainInterface)
self.train_interface = train_interface
self.scheduler = scheduler
self.batch_scheduler = batch_scheduler
self.early_stopping = early_stopping
self.stop_conditions = stop_conditions
if printer is None:
self.printer = Printer(100, None)
else:
self.printer = printer
self.time_log_printer = time_log_printer
self.time_logger = TimeLogger(is_active=(time_log_printer is not None))
assert isinstance(save_steps, int)
if save_steps >= 0:
assert save_path is not None
self.do_save = save_steps >= 0 and save_path is not None
self.save_steps = save_steps
self.save_path = save_path
self.save_state_dict = save_state_dict
print(self.save_state_dict)
self.clip = clip
self.retain_graph = retain_graph
self.phases = ['train']
if val_data_loader is not None:
self.phases.append('validation')
if test_data_loader is not None:
self.phases.append('test')
# there should be no instance with ['train', 'test']. For now ['train', 'val'] should be used instead
# maybe this needs to be changed in the future
if 'test' in self.phases:
assert 'validation' in self.phases, 'No combination train and test allowed.'
self.validation_only = validation_only
if validation_only:
assert 'test' not in self.phases
self.phases = ['validation']
print('Running in validation only mode.')
self.data_loaders_ = {
'train': data_loader,
'validation': val_data_loader,
'test': test_data_loader
}
if start_epoch > 0:
self.start_ep = start_epoch + 1
else:
self.start_ep = 0
if not torch.cuda.is_available():
warnings.warn('No GPU detected. Training can be slow.')
# check for right order of training phases
if 'train' in self.phases and 'validation' in self.phases:
assert self.phases.index('train') == 0
assert self.phases.index('validation') == 1
if 'validation' in self.phases and 'test' in self.phases:
assert self.phases.index('validation') == 1
assert self.phases.index('test') == 2
def __call__(self, epochs_):
"""Train the model for a specified number of epochs
Parameters
----------
epochs_ : int
how many epochs for training
"""
self.printer.restart()
do_stop = False
if self.validation_only:
num_batches = 0
else:
num_batches = len(self.data_loaders_['train'])
if self.start_ep > 0:
if self.batch_scheduler is not None:
self._batch_schedule(
'train', self.start_ep, 0,
self.data_loaders_['train'].batch_size
)
if self.scheduler is not None:
# TODO: if resume, compute the learning rate beforehand
raise NotImplementedError
print('STARTING TRAINING')
for epoch in range(self.start_ep, epochs_):
self.printer.learning_rate = get_lr(self.optimizer)
for current_phase in self.phases:
if current_phase == 'train':
self.train_interface.model.train()
else:
self.train_interface.model.eval()
self.time_logger.start(current_phase + '_load_data')
for idx, sample in enumerate(self.data_loaders_[current_phase]):
self.time_logger.stop(current_phase + '_load_data')
self._batch_schedule(
current_phase, epoch, idx, num_batches
)
loss, metrics, counters, functions = self._iteration_step(
sample, current_phase)
self._update_printer(
epoch, loss, metrics, counters, functions, current_phase
)
if current_phase == 'train':
self._update_weights(loss)
self.time_logger.start(current_phase + '_load_data')
# ----------- end of phase ----------------------------
self.time_logger.stop(
current_phase + '_load_data', do_log=False
)
# do certain actions depending on which phase we are in
if self.early_stopping is not None and current_phase == 'validation':
do_stop = self.early_stopping(
self.printer.get_metrics(),
self.train_interface.model,
self.save_path,
self.save_state_dict
)
if self.stop_conditions and current_phase == 'train':
for sc in self.stop_conditions:
do_stop = sc(epoch, self.printer.get_metrics())
self.printer.on_epoch_end()
self._schedule(current_phase)
self._save(epoch, epochs_, current_phase)
# compute statistics on time values that are collected during
# the upper for-loop
if self.time_log_printer is not None:
self.time_log_printer.update(
torch.tensor([-1]), epoch, metrics=self.time_logger.get_data()
)
self.time_log_printer.on_epoch_end()
self.time_logger.restart()
if do_stop:
return
# -------------------end of epoch -------------------------------
def _iteration_step(self, sample, current_phase):
"""Compute loss and metrics
Parameters
----------
sample : anything provided by the data loader
typically the sample x and the corresponding label
current_phase : str
training, validation, or test
Returns
-------
float, dict
loss value that is used for gradient computation and dictionaries
with metrics, counters, and functions
"""
self.time_logger.start(current_phase + '_iteration_step')
if current_phase == 'validation':
with torch.no_grad():
output = self.train_interface.val_step(sample)
elif current_phase == 'test':
with torch.no_grad():
output = self.train_interface.test_step(sample)
else:
output = self.train_interface.train_step(sample)
functions = None
counters = None
if len(output) == 2:
loss, metrics = output[0], output[1]
elif len(output) == 3:
loss, metrics, counters = output[0], output[1], output[2]
else:
loss, metrics, counters = output[0], output[1], output[2]
functions = output[3]
self.time_logger.stop(current_phase + '_iteration_step')
return loss, metrics, counters, functions
def _update_weights(self, loss):
"""Compute gradient and apply backpropagation
from:
https://discuss.pytorch.org/t/what-step-backward-and-zero-grad-do/33301
Hopefully, you use them in the other order - opt.zero_grad(), loss.backward(), opt.step().
zero_grad clears old gradients from the last step (otherwise you’d just accumulate the gradients from all loss.backward() calls).
loss.backward() computes the derivative of the loss w.r.t. the parameters (or anything requiring gradients) using backpropagation.
opt.step() causes the optimizer to take a step based on the gradients of the parameters.
Parameters
----------
loss : float
error function the weight update is based on
"""
self.time_logger.start('update_weights')
self.optimizer.zero_grad()
self.time_logger.start('loss_backward')
loss.backward(retain_graph=self.retain_graph)
self.time_logger.stop('loss_backward')
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(
self.train_interface.model.parameters(), self.clip
)
self.time_logger.start('opt_step')
self.optimizer.step()
self.time_logger.stop('opt_step')
self.time_logger.stop('update_weights')
def _update_printer(self, epoch, loss, metrics, counters, functions, current_phase):
"""Pass the necessary values to the printer
Parameters
----------
epoch : int
Current epoch
loss : float
Current loss value
metrics : dict
current_phase : str
If the current phase is validation, all metrics/losses/etc. are renamed
from [name] to val_[name]. If the current phase is test, all they are renamed to test_[name].
"""
self.time_logger.start(current_phase + '_update_printer')
if current_phase == 'train':
self.printer.update(loss, epoch, metrics, counters, functions)
else:
prefix = {'validation': 'val_', 'test': 'test_'}[current_phase]
if metrics is not None:
metrics = {prefix + k: v for (k, v) in metrics.items()}
if counters is not None:
counters = {prefix + k: v for (k, v) in counters.items()}
if functions is not None:
functions = {prefix + k: v for (k, v) in functions.items()}
self.printer.update(
loss, epoch, metrics,
counters, functions, loss_key=prefix + 'loss'
)
self.time_logger.stop(current_phase + '_update_printer')
self.printer.print_conditional()
def _schedule(self, current_phase):
"""Update the scheduler after each training epoch.
"""
if self.scheduler is not None and current_phase == 'train':
self.time_logger.start('schedule')
self.scheduler.step()
self.time_logger.stop('schedule')
def _batch_schedule(self, current_phase, epoch, iteration, num_batches):
"""Update the scheduler after each training batch.
"""
if self.batch_scheduler is not None and current_phase == 'train':
self.time_logger.start('batch_schedule')
self.batch_scheduler.step(epoch, iteration, num_batches)
self.time_logger.stop('batch_schedule')
def _save(self, epoch, epochs_, current_phase):
"""save the model to model path every 'save_steps' epochs.
Parameters
----------
epoch : int
current epoch
epochs_ : int
number of epochs for entire training
current_phase: str
is this function called after training, val or testing? Only after
validation, the model is saved.
"""
# only save after validation
if current_phase != 'validation' and 'validation' in self.phases:
return
if self.do_save:
self.time_logger.start('save')
is_last_epoch = (epoch == epochs_ - 1)
if self.save_steps > 0:
is_save_intervall = epoch % self.save_steps == 0
else:
is_save_intervall = False
if is_last_epoch or is_save_intervall:
torch_save_model(
self.train_interface.model,
self.save_path,
self.save_state_dict
)
self.time_logger.stop('save')
def get_optimizer(opt_id, parameters, learning_rate, **kwargs):
""" Simple getter function for a pytorch optimizer
Parameters
----------
opt_id : str
Which optimizer, e.g., SGD or Adam
parameters : model.parameters
pytorch variables that shall be updated, usually model.parameters()
is passed
learning_rate : float
Returns
-------
pytorch optimizer
Raises
------
ValueError
if unknown opt_id
"""
if opt_id == 'SGD':
if 'momentum' not in kwargs.keys():
warnings.warn(f'Using default momentum for SGD: {.9}')
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.SGD(parameters,
lr=learning_rate,
momentum=kwargs.get('momentum', .9),
weight_decay=kwargs.get('weight_decay', 0.),
nesterov=kwargs.get('nesterov', False)
)
elif opt_id == 'Adam':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adam(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.)
)
elif opt_id == 'lamb':
from pytorch_lamb import Lamb
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.001}')
optimizer = Lamb(
parameters,
lr=learning_rate, weight_decay=kwargs.get('weight_decay', 0.001),
betas=(kwargs.get('beta0', .9), kwargs.get('beta1', .999))
)
elif opt_id == 'AdaDelta':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adadelta(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.),
rho=kwargs.get('rho', 0.9),
eps=kwargs.get('eps', 1e-3)
)
elif opt_id == 'RMSProb':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for RMSprop {0.}')
optimizer = optim.RMSprop(
parameters,
lr = learning_rate,
alpha = kwargs.get('alpha', 0.99),
eps = kwargs.get('eps', 1e-08),
weight_decay = kwargs.get('weight_decay', 0.),
momentum = kwargs.get('momentum', 0.),
centered = kwargs.get('centered', False)
)
else:
raise ValueError(f'Unknown opt value: {opt_id}')
return optimizer
def get_scheduler(lr_steps, epochs, optimizer, gamma=.1, fixed_steps=None):
"""returns a pytorch scheduler
Parameters
----------
lr_steps : int
the learning rate is altered in 'lr_steps' uniformly steps
epochs : int
number of epochs for the entire training
optimizer : pytorch optimizer
gamma : float, optional
the learning rate is multiplied by gamma, by default .1
Returns
-------
pytorch scheduler
"""
if fixed_steps is not None:
assert lr_steps == 0, 'no lr_steps if fixed steps is used'
# might be filled with strings, when coming from argparse
fixed_steps = [int(x) for x in fixed_steps]
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, fixed_steps,
gamma=gamma
)
print(f'fixed rate scheduling at: {fixed_steps}')
return scheduler
if lr_steps < 1:
return None
assert lr_steps < epochs, f'Epochs must be greater than lr_steps but e:{epochs} < l:{lr_steps}'
step_size = epochs // lr_steps
print(f'Sched step size: {step_size}')
scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size,
gamma=gamma, last_epoch=-1
)
return scheduler
def set_device(device=None, verbose=True):
"""Use if you have multiple GPUs, but you only want to use a subset.
Use the command 'nvidia-smi' in the terminal for more information on your
pc's gpu setup
Parameters
----------
device : int or list of int, optional
masks all devices but 'device'. By default None, all devices are
visible
"""
if device is not None:
if isinstance(device, list):
device = ','.join(str(x) for x in device)
else:
device = str(device)
os.environ['CUDA_VISIBLE_DEVICES'] = device
if verbose:
print(f'using device {device}')
def set_random_seed(seed):
"""Sets a seed for all training related random functions. The seed is only
identical on the same machine.
Parameters
----------
seed : int
"""
print(f'Setting seed: {seed}')
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
cv2.setRNGSeed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def _init_fn(worker_id):
np.random.seed(seed + worker_id)
# for debugging purposes some random numbers are generated
# output = {
# 'seed': seed,
# 'torch': torch.randn(1).item(),
# 'cuda': torch.cuda.FloatTensor(1).normal_().item(),
# 'numpy': float(np.random.randn(1)),
# 'python': random.randint(0, 5000)
# }
# with open(os.path.join(options.folder_name, 'rand_num_test.json'), 'w') as file:
# json.dump(output, file)
return _init_fn
def loss_verification(train_interface, data_loader, printer):
"""Run through one epoch and print the corresponding loss.
When using cross-entropy, the usual loss should be -ln(num_classes). If
not, there might be something wrong with your code.
Parameters
----------
train_interface : ITrainInterface
data_loader : pytorch data_loader
printer : Printer (pt_train_printer.py)
"""
# verify loss
print('Running loss verification')
with torch.no_grad():
mean_im = 0.0
std_im = 0.0
ctr = 0.0
for sample in data_loader:
mean_im += sample['x'].mean()
std_im += sample['x'].std()
ctr += 1.0
loss, metrics = train_interface.train_step(sample)
printer.update(loss, -1, metrics)
printer.print()
print(f'mean: {mean_im/ctr:.3f} std: {std_im/ctr:.3f}')
class EarlyStopping():
"""Save the best model depending on a specified metric on the validation
set.
Returns
-------
EarlyStopping
"""
def __init__(self, metric_key, get_max=True, epoch_thres=np.inf):
"""Constructor. You need to specify which metric should be observed,
if the value is better when decreased or increased.
For example:
EarlyStopping('val_acc', get_max=True, epoch_thres=10)
keeps track of the validation accuracy. If the current best validation
accuracy (starting from -inf) is exceeded, this value is saved and
the model is saved.
If after 10 epochs the best accuracy is not exceeded, the training
is stopped.
This object is used within the Training class.
Parameters
----------
metric_key : str
Which metric is observed. Needs to be a metric that is present in
the training_interface. val_[name] is also possible.
get_max : bool, optional
Save the model if the new observed metric is above the current best
value (True) or below it (False). By default True.
epoch_thres : int, optional
if the model has not bin saved for 'epoch_thres' epochs,
the training is stopped. By default np.inf, the model is trained
the full number of epochs.
"""
self.key = metric_key
self.get_max = get_max
self.no_update_counter = 0.
self.thres = epoch_thres
if self.thres < np.inf:
warnings.warn(
f'Early stopping: training is stopped after {self.thres} unchanged epochs.')
if get_max:
self.current_val = -np.inf
else:
self.current_val = +np.inf
def __call__(self, metrics, model, save_path, save_state_dict):
value = metrics[self.key]
self.no_update_counter += 1
if self.get_max:
if value > self.current_val:
self._update(value, model, save_path, save_state_dict)
else:
if value < self.current_val:
self._update(value, model, save_path, save_state_dict)
if self.no_update_counter > self.thres:
return True
else:
return False
def _update(self, value, model, save_path, save_state_dict):
self.no_update_counter = 0
self.current_val = value
torch_save_model(model, save_path, save_state_dict)
class IStopCondition():
def __call__(self, epoch, metrics):
raise NotImplementedError
def torch_save_model(model, save_path, save_state_dict):
print(f'saving model: {save_path}')
if save_state_dict:
print('save as state dict')
to_save = model.state_dict()
torch.save(to_save, save_path)
else:
torch.save(model, save_path)
print('model saved.')
def get_printer(print_intervall, log_file=None):
"""Convenience function, to get a printer without import pt_train_printer.
Note that only the basic keywords are passed on here!
Parameters
----------
print_intervall : int
print to terminal after n batches, if -1: no printing
log_file : str, optional
path to a json file, by default None: no log-file is saved.
Returns
-------
Printer
"""
return Printer(print_intervall, log_file=log_file)
# taken from https://github.com/d-li14/mobilenetv2.pytorch/blob/master/imagenet.py
class BatchScheduler():
def __init__(self, decay_type, optimizer, initial_learning_rate, warmup, num_epochs, gamma=.1):
self.optimizer = optimizer
self.lr = initial_learning_rate
self.warmup = warmup
self.num_epochs = num_epochs
self.decay_type = decay_type
self.gamma = gamma
def step(self, epoch, iteration, num_iter):
lr = self.optimizer.param_groups[0]['lr']
warmup_epoch = 5 if self.warmup else 0
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = self.num_epochs * num_iter
if self.decay_type == 'step':
lr = self.lr * \
(self.gamma ** ((current_iter - warmup_iter) // (max_iter - warmup_iter)))
elif self.decay_type == 'cos':
lr = self.lr * \
(1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
elif self.decay_type == 'linear':
lr = self.lr * (1 - (current_iter - warmup_iter) /
(max_iter - warmup_iter))
else:
raise ValueError('Unknown lr mode {}'.format(self.decay_type))
if epoch < warmup_epoch:
lr = self.lr * current_iter / warmup_iter
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
class TimeLogger():
def __init__(self, is_active):
self.is_active = is_active
self.data = dict()
self.qu = dict()
self.functions = {
'mean': np.mean,
'min': np.min,
'max': np.max,
'std': np.std,
'median': np.median,
'sum': np.sum
}
def restart(self):
if not self.is_active:
return
self.data = dict()
def start(self, key):
if not self.is_active:
return
assert key not in self.qu.keys()
self.qu[key] = time.time()
def stop(self, key, do_log=True):
if not self.is_active:
return
start_time = self.qu.pop(key)
time_needed = time.time() - start_time
if do_log:
self._update(key, time_needed)
def _update(self, key, value):
assert self.is_active
if key not in self.data.keys():
self.data[key] = [value]
else:
self.data[key].append(value)
def get_data(self):
assert self.is_active
out = dict()
for key, values in self.data.items():
values = np.array(values)
for name, fcn in self.functions.items():
tmp = float(fcn(values))
out[key + '_' + name] = tmp
return out
def get_train_arg_parser(config):
# Deprecated function
"""Typical argument parser to train a neural network
Parameters
----------
config : module or object
default values for your project
Returns
-------
argument parser
use like this:
import config_module
...
...
def get_options():
parser = get_train_argparser(config_module)
parser.add_argument(...)
...
return parser.parse_args()
"""
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=config.LEARNING_RATE)
parser.add_argument('--wd', type=float, default=config.WEIGHT_DECAY)
parser.add_argument('--mom', type=float, default=config.MOMENTUM)
parser.add_argument('--opt', type=str, default=config.OPTIMIZER)
parser.add_argument('--bs', type=int, default=config.BATCH_SIZE)
parser.add_argument('--epochs', type=int, default=config.EPOCHS)
parser.add_argument('--lr_steps', type=int, default=config.LR_STEPS)
parser.add_argument('--nw', type=int, default=config.NUM_WORKERS)
parser.add_argument('--sv_int', type=int, default=config.SAVE_INTERVALL)
parser.add_argument('--model_type', type=str, default=config.MODEL_TYPE)
parser.add_argument('--seed', type=int, default=config.SEED)
parser.add_argument('--device', type=int, default=config.DEVICE)
parser.add_argument('--folder', type=str, default=config.DEF_FOLDER)
parser.add_argument('--model_name', type=str, default=config.MODEL_NAME)
parser.add_argument('--in_dim', type=int, default=config.INPUT_DIM)
parser.add_argument('--early_stopping', action='store_true')
parser.add_argument('--es_metric', type=str, default=config.ES_METRIC)
parser.add_argument('--num_classes', type=int, default=config.NUM_CLASSES)
# may be unnecessary for your project
parser.add_argument('--ds_len', type=int, default=config.DATASET_LENGTH)
parser.add_argument('--crop_size', type=int, default=config.CROP_SIZE)
return parser
|
[
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"DLBio.pytorch_helpers.get_lr",
"torch.no_grad",
"random.seed",
"math.cos",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"DLBio.pt_train_printer.Printer",
"cv2.setRNGSeed",
"time.time",
"torch.save",
"torch.cuda.manual_seed_all",
"numpy.array",
"warnings.warn",
"torch.tensor",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((22861, 22936), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer', 'step_size'], {'gamma': 'gamma', 'last_epoch': '(-1)'}), '(optimizer, step_size, gamma=gamma, last_epoch=-1)\n', (22886, 22936), True, 'import torch.optim as optim\n'), ((23885, 23905), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (23899, 23905), True, 'import numpy as np\n'), ((23910, 23933), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (23927, 23933), False, 'import torch\n'), ((23938, 23955), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (23949, 23955), False, 'import random\n'), ((23964, 23989), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23987, 23989), False, 'import torch\n'), ((24171, 24191), 'cv2.setRNGSeed', 'cv2.setRNGSeed', (['seed'], {}), '(seed)\n', (24185, 24191), False, 'import cv2\n'), ((29030, 29073), 'DLBio.pt_train_printer.Printer', 'Printer', (['print_intervall'], {'log_file': 'log_file'}), '(print_intervall, log_file=log_file)\n', (29037, 29073), False, 'from DLBio.pt_train_printer import Printer\n'), ((32423, 32448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (32446, 32448), False, 'import argparse\n'), ((22438, 22505), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer', 'fixed_steps'], {'gamma': 'gamma'}), '(optimizer, fixed_steps, gamma=gamma)\n', (22468, 22505), True, 'import torch.optim as optim\n'), ((23999, 24027), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (24021, 24027), False, 'import torch\n'), ((24036, 24068), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (24062, 24068), False, 'import torch\n'), ((24275, 24307), 'numpy.random.seed', 'np.random.seed', (['(seed + worker_id)'], {}), '(seed + worker_id)\n', (24289, 24307), True, 'import numpy as np\n'), ((25235, 25250), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25248, 25250), False, 'import torch\n'), ((28466, 28496), 'torch.save', 'torch.save', (['to_save', 'save_path'], {}), '(to_save, save_path)\n', (28476, 28496), False, 'import torch\n'), ((28515, 28543), 'torch.save', 'torch.save', (['model', 'save_path'], {}), '(model, save_path)\n', (28525, 28543), False, 'import torch\n'), ((31119, 31130), 'time.time', 'time.time', ([], {}), '()\n', (31128, 31130), False, 'import time\n'), ((7442, 7460), 'DLBio.pt_train_printer.Printer', 'Printer', (['(100)', 'None'], {}), '(100, None)\n', (7449, 7460), False, 'from DLBio.pt_train_printer import Printer\n'), ((9034, 9059), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9057, 9059), False, 'import torch\n'), ((9073, 9128), 'warnings.warn', 'warnings.warn', (['"""No GPU detected. Training can be slow."""'], {}), "('No GPU detected. Training can be slow.')\n", (9086, 9128), False, 'import warnings\n'), ((10454, 10476), 'DLBio.pytorch_helpers.get_lr', 'get_lr', (['self.optimizer'], {}), '(self.optimizer)\n', (10460, 10476), False, 'from DLBio.pytorch_helpers import get_lr\n'), ((19540, 19595), 'warnings.warn', 'warnings.warn', (['f"""Using default momentum for SGD: {0.9}"""'], {}), "(f'Using default momentum for SGD: {0.9}')\n", (19553, 19595), False, 'import warnings\n'), ((19655, 19713), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (19668, 19713), False, 'import warnings\n'), ((27232, 27332), 'warnings.warn', 'warnings.warn', (['f"""Early stopping: training is stopped after {self.thres} unchanged epochs."""'], {}), "(\n f'Early stopping: training is stopped after {self.thres} unchanged epochs.'\n )\n", (27245, 27332), False, 'import warnings\n'), ((31280, 31291), 'time.time', 'time.time', ([], {}), '()\n', (31289, 31291), False, 'import time\n'), ((31709, 31725), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (31717, 31725), True, 'import numpy as np\n'), ((13655, 13670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13668, 13670), False, 'import torch\n'), ((20134, 20192), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (20147, 20192), False, 'import warnings\n'), ((13790, 13805), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13803, 13805), False, 'import torch\n'), ((20470, 20530), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.001}"""'], {}), "(f'Using default weight_decay for SGD {0.001}')\n", (20483, 20530), False, 'import warnings\n'), ((12705, 12723), 'torch.tensor', 'torch.tensor', (['[-1]'], {}), '([-1])\n', (12717, 12723), False, 'import torch\n'), ((20831, 20889), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (20844, 20889), False, 'import warnings\n'), ((21216, 21278), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for RMSprop {0.0}"""'], {}), "(f'Using default weight_decay for RMSprop {0.0}')\n", (21229, 21278), False, 'import warnings\n'), ((30028, 30093), 'math.cos', 'cos', (['(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))'], {}), '(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))\n', (30031, 30093), False, 'from math import cos, pi\n')]
|
import pandas as pd
import numpy as np
import logging
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
# IF CHOPPINESS INDEX <= 38.2 - -> MARKET IS TRENDING
# https://medium.com/codex/detecting-ranging-and-trending-markets-with-choppiness-index-in-python-1942e6450b58
class WyckoffAccumlationDistribution:
def __init__(self):
self.lookback = 10
self.barCountDistribution = 3
self.barCountVolClimaxRebound = 2
self.barCountAccumulation = 7
self.minVolumeClimax = 5.0 # minimum volume climax - 600%
self.isConsolidating = 61.8
self.isTrending = 38.2
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
def isAccumulating(self, value):
return value > self.isConsolidating
def isDistributing(self, value):
return value < self.isTrending
# **** Tricky part ****
# Because it uses previous data for choppiness, you cannot take an average of the chopiness.
# The average is already built-in to the calculation. So evaluate any of the data falls
# into consolidating or trending regions.
#
@staticmethod
def get_ci(high, low, close, lookback):
tr1 = pd.DataFrame(high - low).rename(columns={0: 'tr1'})
tr2 = pd.DataFrame(abs(high - close.shift(1))).rename(columns={0: 'tr2'})
tr3 = pd.DataFrame(abs(low - close.shift(1))).rename(columns={0: 'tr3'})
frames = [tr1, tr2, tr3]
tr = pd.concat(frames, axis=1, join='inner').dropna().max(axis=1)
atr = tr.rolling(1).mean()
highh = high.rolling(lookback).max()
lowl = low.rolling(lookback).min()
ci = 100 * np.log10((atr.rolling(lookback).sum()) /
(highh - lowl)) / np.log10(lookback)
return ci
def trimIndexes(self, ci:list, startIndex:int, endIndex:int):
if startIndex < 0:
startIndex = 0
if endIndex > len(ci):
endIndex = len(ci)
if startIndex >= endIndex:
startIndex = endIndex - 1
return startIndex, endIndex
def isDistributionPhase(self, ci: list, volClimaxIndex: int):
startIndex = volClimaxIndex - self.barCountDistribution - 1
endIndex = startIndex + self.barCountDistribution
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for i in range(startIndex, endIndex):
if self.isDistributing(ci[i]):
return True
return False
def isAccumulationValid(self, ci:list, volClimaxIndex:int):
endIndex = volClimaxIndex - self.barCountVolClimaxRebound
startIndex = endIndex - self.barCountAccumulation
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for value in ci[startIndex:endIndex]:
if self.isAccumulating(value):
return True
return False
def Run(self, symbol:str, df:pd.DataFrame, volClimax:float, volClimaxIndex:int):
try:
if volClimax > self.minVolumeClimax:
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
ci = data.to_numpy()[::-1]
isDistribute = self.isDistributionPhase(ci, volClimaxIndex)
isAccumulate = self.isAccumulationValid(ci, volClimaxIndex)
return isDistribute and isAccumulate
return False
except Exception as e:
logging.error(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
print(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
return False
def RunWickoff(self, symbol:str, dataf:pd.DataFrame):
df = dataf[::-1]
df.reset_index()
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
|
[
"pandas.DataFrame",
"numpy.log10",
"logging.error",
"pandas.concat"
] |
[((1742, 1760), 'numpy.log10', 'np.log10', (['lookback'], {}), '(lookback)\n', (1750, 1760), True, 'import numpy as np\n'), ((1191, 1215), 'pandas.DataFrame', 'pd.DataFrame', (['(high - low)'], {}), '(high - low)\n', (1203, 1215), True, 'import pandas as pd\n'), ((3523, 3591), 'logging.error', 'logging.error', (['f"""WyckoffAccumlationDistribution.Run: {symbol} - {e}"""'], {}), "(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')\n", (3536, 3591), False, 'import logging\n'), ((1452, 1491), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '(1)', 'join': '"""inner"""'}), "(frames, axis=1, join='inner')\n", (1461, 1491), True, 'import pandas as pd\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.